if_bridge.c revision 1.189.4.3 1 /* $NetBSD: if_bridge.c,v 1.189.4.3 2025/05/15 17:58:18 martin Exp $ */
2
3 /*
4 * Copyright 2001 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Copyright (c) 1999, 2000 Jason L. Wright (jason (at) thought.net)
40 * All rights reserved.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. All advertising materials mentioning features or use of this software
51 * must display the following acknowledgement:
52 * This product includes software developed by Jason L. Wright
53 * 4. The name of the author may not be used to endorse or promote products
54 * derived from this software without specific prior written permission.
55 *
56 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
57 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
58 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
59 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
60 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
61 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
62 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
64 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
65 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
66 * POSSIBILITY OF SUCH DAMAGE.
67 *
68 * OpenBSD: if_bridge.c,v 1.60 2001/06/15 03:38:33 itojun Exp
69 */
70
71 /*
72 * Network interface bridge support.
73 *
74 * TODO:
75 *
76 * - Currently only supports Ethernet-like interfaces (Ethernet,
77 * 802.11, VLANs on Ethernet, etc.) Figure out a nice way
78 * to bridge other types of interfaces (FDDI-FDDI, and maybe
79 * consider heterogenous bridges).
80 */
81
82 #include <sys/cdefs.h>
83 __KERNEL_RCSID(0, "$NetBSD: if_bridge.c,v 1.189.4.3 2025/05/15 17:58:18 martin Exp $");
84
85 #ifdef _KERNEL_OPT
86 #include "opt_inet.h"
87 #include "opt_net_mpsafe.h"
88 #endif /* _KERNEL_OPT */
89
90 #include <sys/param.h>
91 #include <sys/kernel.h>
92 #include <sys/mbuf.h>
93 #include <sys/queue.h>
94 #include <sys/socket.h>
95 #include <sys/socketvar.h> /* for softnet_lock */
96 #include <sys/sockio.h>
97 #include <sys/systm.h>
98 #include <sys/proc.h>
99 #include <sys/pool.h>
100 #include <sys/kauth.h>
101 #include <sys/cpu.h>
102 #include <sys/cprng.h>
103 #include <sys/mutex.h>
104 #include <sys/kmem.h>
105 #include <sys/syslog.h>
106
107 #include <net/bpf.h>
108 #include <net/if.h>
109 #include <net/if_dl.h>
110 #include <net/if_types.h>
111 #include <net/if_llc.h>
112
113 #include <net/if_ether.h>
114 #include <net/if_bridgevar.h>
115 #include <net/ether_sw_offload.h>
116
117 /* Used for bridge_ip[6]_checkbasic */
118 #include <netinet/in.h>
119 #include <netinet/in_systm.h>
120 #include <netinet/ip.h>
121 #include <netinet/ip_var.h>
122 #include <netinet/ip_private.h> /* XXX */
123 #include <netinet/ip6.h>
124 #include <netinet6/in6_var.h>
125 #include <netinet6/ip6_var.h>
126 #include <netinet6/ip6_private.h> /* XXX */
127
128 /*
129 * Size of the route hash table. Must be a power of two.
130 */
131 #ifndef BRIDGE_RTHASH_SIZE
132 #define BRIDGE_RTHASH_SIZE 1024
133 #endif
134
135 #define BRIDGE_RTHASH_MASK (BRIDGE_RTHASH_SIZE - 1)
136
137 #include "carp.h"
138 #if NCARP > 0
139 #include <netinet/in.h>
140 #include <netinet/in_var.h>
141 #include <netinet/ip_carp.h>
142 #endif
143
144 #include "ioconf.h"
145
146 __CTASSERT(sizeof(struct ifbifconf) == sizeof(struct ifbaconf));
147 __CTASSERT(offsetof(struct ifbifconf, ifbic_len) == offsetof(struct ifbaconf, ifbac_len));
148 __CTASSERT(offsetof(struct ifbifconf, ifbic_buf) == offsetof(struct ifbaconf, ifbac_buf));
149
150 /*
151 * Maximum number of addresses to cache.
152 */
153 #ifndef BRIDGE_RTABLE_MAX
154 #define BRIDGE_RTABLE_MAX 100
155 #endif
156
157 /*
158 * Spanning tree defaults.
159 */
160 #define BSTP_DEFAULT_MAX_AGE (20 * 256)
161 #define BSTP_DEFAULT_HELLO_TIME (2 * 256)
162 #define BSTP_DEFAULT_FORWARD_DELAY (15 * 256)
163 #define BSTP_DEFAULT_HOLD_TIME (1 * 256)
164 #define BSTP_DEFAULT_BRIDGE_PRIORITY 0x8000
165 #define BSTP_DEFAULT_PORT_PRIORITY 0x80
166 #define BSTP_DEFAULT_PATH_COST 55
167
168 /*
169 * Timeout (in seconds) for entries learned dynamically.
170 */
171 #ifndef BRIDGE_RTABLE_TIMEOUT
172 #define BRIDGE_RTABLE_TIMEOUT (20 * 60) /* same as ARP */
173 #endif
174
175 /*
176 * Number of seconds between walks of the route list.
177 */
178 #ifndef BRIDGE_RTABLE_PRUNE_PERIOD
179 #define BRIDGE_RTABLE_PRUNE_PERIOD (5 * 60)
180 #endif
181
182 #define BRIDGE_RT_LOCK(_sc) mutex_enter((_sc)->sc_rtlist_lock)
183 #define BRIDGE_RT_UNLOCK(_sc) mutex_exit((_sc)->sc_rtlist_lock)
184 #define BRIDGE_RT_LOCKED(_sc) mutex_owned((_sc)->sc_rtlist_lock)
185
186 #define BRIDGE_RT_PSZ_PERFORM(_sc) \
187 pserialize_perform((_sc)->sc_rtlist_psz)
188
189 #define BRIDGE_RT_RENTER(__s) do { __s = pserialize_read_enter(); } while (0)
190 #define BRIDGE_RT_REXIT(__s) do { pserialize_read_exit(__s); } while (0)
191
192 #define BRIDGE_RTLIST_READER_FOREACH(_brt, _sc) \
193 PSLIST_READER_FOREACH((_brt), &((_sc)->sc_rtlist), \
194 struct bridge_rtnode, brt_list)
195 #define BRIDGE_RTLIST_WRITER_FOREACH(_brt, _sc) \
196 PSLIST_WRITER_FOREACH((_brt), &((_sc)->sc_rtlist), \
197 struct bridge_rtnode, brt_list)
198 #define BRIDGE_RTLIST_WRITER_INSERT_HEAD(_sc, _brt) \
199 PSLIST_WRITER_INSERT_HEAD(&(_sc)->sc_rtlist, brt, brt_list)
200 #define BRIDGE_RTLIST_WRITER_REMOVE(_brt) \
201 PSLIST_WRITER_REMOVE((_brt), brt_list)
202
203 #define BRIDGE_RTHASH_READER_FOREACH(_brt, _sc, _hash) \
204 PSLIST_READER_FOREACH((_brt), &(_sc)->sc_rthash[(_hash)], \
205 struct bridge_rtnode, brt_hash)
206 #define BRIDGE_RTHASH_WRITER_FOREACH(_brt, _sc, _hash) \
207 PSLIST_WRITER_FOREACH((_brt), &(_sc)->sc_rthash[(_hash)], \
208 struct bridge_rtnode, brt_hash)
209 #define BRIDGE_RTHASH_WRITER_INSERT_HEAD(_sc, _hash, _brt) \
210 PSLIST_WRITER_INSERT_HEAD(&(_sc)->sc_rthash[(_hash)], brt, brt_hash)
211 #define BRIDGE_RTHASH_WRITER_INSERT_AFTER(_brt, _new) \
212 PSLIST_WRITER_INSERT_AFTER((_brt), (_new), brt_hash)
213 #define BRIDGE_RTHASH_WRITER_REMOVE(_brt) \
214 PSLIST_WRITER_REMOVE((_brt), brt_hash)
215
216 #ifdef NET_MPSAFE
217 #define DECLARE_LOCK_VARIABLE
218 #define ACQUIRE_GLOBAL_LOCKS() do { } while (0)
219 #define RELEASE_GLOBAL_LOCKS() do { } while (0)
220 #else
221 #define DECLARE_LOCK_VARIABLE int __s
222 #define ACQUIRE_GLOBAL_LOCKS() do { \
223 KERNEL_LOCK(1, NULL); \
224 mutex_enter(softnet_lock); \
225 __s = splsoftnet(); \
226 } while (0)
227 #define RELEASE_GLOBAL_LOCKS() do { \
228 splx(__s); \
229 mutex_exit(softnet_lock); \
230 KERNEL_UNLOCK_ONE(NULL); \
231 } while (0)
232 #endif
233
234 struct psref_class *bridge_psref_class __read_mostly;
235
236 int bridge_rtable_prune_period = BRIDGE_RTABLE_PRUNE_PERIOD;
237
238 static struct pool bridge_rtnode_pool;
239
240 static int bridge_clone_create(struct if_clone *, int);
241 static int bridge_clone_destroy(struct ifnet *);
242
243 static int bridge_ioctl(struct ifnet *, u_long, void *);
244 static int bridge_init(struct ifnet *);
245 static void bridge_stop(struct ifnet *, int);
246 static void bridge_start(struct ifnet *);
247 static void bridge_ifdetach(void *);
248
249 static void bridge_input(struct ifnet *, struct mbuf *);
250 static void bridge_forward(struct bridge_softc *, struct mbuf *);
251
252 static void bridge_timer(void *);
253
254 static void bridge_broadcast(struct bridge_softc *, struct ifnet *, bool,
255 struct mbuf *);
256
257 static int bridge_rtupdate(struct bridge_softc *, const uint8_t *,
258 struct ifnet *, int, uint8_t);
259 static struct ifnet *bridge_rtlookup(struct bridge_softc *, const uint8_t *);
260 static void bridge_rttrim(struct bridge_softc *);
261 static void bridge_rtage(struct bridge_softc *);
262 static void bridge_rtage_work(struct work *, void *);
263 static void bridge_rtflush(struct bridge_softc *, int);
264 static int bridge_rtdaddr(struct bridge_softc *, const uint8_t *);
265 static void bridge_rtdelete(struct bridge_softc *, struct ifnet *ifp);
266
267 static void bridge_rtable_init(struct bridge_softc *);
268 static void bridge_rtable_fini(struct bridge_softc *);
269
270 static struct bridge_rtnode *bridge_rtnode_lookup(struct bridge_softc *,
271 const uint8_t *);
272 static int bridge_rtnode_insert(struct bridge_softc *,
273 struct bridge_rtnode *);
274 static void bridge_rtnode_remove(struct bridge_softc *,
275 struct bridge_rtnode *);
276 static void bridge_rtnode_destroy(struct bridge_rtnode *);
277
278 static struct bridge_iflist *bridge_lookup_member(struct bridge_softc *,
279 const char *name,
280 struct psref *);
281 static struct bridge_iflist *bridge_lookup_member_if(struct bridge_softc *,
282 struct ifnet *ifp,
283 struct psref *);
284 static void bridge_release_member(struct bridge_softc *, struct bridge_iflist *,
285 struct psref *);
286 static void bridge_delete_member(struct bridge_softc *,
287 struct bridge_iflist *);
288 static void bridge_acquire_member(struct bridge_softc *sc,
289 struct bridge_iflist *,
290 struct psref *);
291
292 static int bridge_ioctl_add(struct bridge_softc *, void *);
293 static int bridge_ioctl_del(struct bridge_softc *, void *);
294 static int bridge_ioctl_gifflags(struct bridge_softc *, void *);
295 static int bridge_ioctl_sifflags(struct bridge_softc *, void *);
296 static int bridge_ioctl_scache(struct bridge_softc *, void *);
297 static int bridge_ioctl_gcache(struct bridge_softc *, void *);
298 static int bridge_ioctl_gifs(struct bridge_softc *, void *);
299 static int bridge_ioctl_rts(struct bridge_softc *, void *);
300 static int bridge_ioctl_saddr(struct bridge_softc *, void *);
301 static int bridge_ioctl_sto(struct bridge_softc *, void *);
302 static int bridge_ioctl_gto(struct bridge_softc *, void *);
303 static int bridge_ioctl_daddr(struct bridge_softc *, void *);
304 static int bridge_ioctl_flush(struct bridge_softc *, void *);
305 static int bridge_ioctl_gpri(struct bridge_softc *, void *);
306 static int bridge_ioctl_spri(struct bridge_softc *, void *);
307 static int bridge_ioctl_ght(struct bridge_softc *, void *);
308 static int bridge_ioctl_sht(struct bridge_softc *, void *);
309 static int bridge_ioctl_gfd(struct bridge_softc *, void *);
310 static int bridge_ioctl_sfd(struct bridge_softc *, void *);
311 static int bridge_ioctl_gma(struct bridge_softc *, void *);
312 static int bridge_ioctl_sma(struct bridge_softc *, void *);
313 static int bridge_ioctl_sifprio(struct bridge_softc *, void *);
314 static int bridge_ioctl_sifcost(struct bridge_softc *, void *);
315 static int bridge_ioctl_gfilt(struct bridge_softc *, void *);
316 static int bridge_ioctl_sfilt(struct bridge_softc *, void *);
317 static int bridge_ipf(void *, struct mbuf **, struct ifnet *, int);
318 static int bridge_ip_checkbasic(struct mbuf **mp);
319 # ifdef INET6
320 static int bridge_ip6_checkbasic(struct mbuf **mp);
321 # endif /* INET6 */
322
323 struct bridge_control {
324 int (*bc_func)(struct bridge_softc *, void *);
325 int bc_argsize;
326 int bc_flags;
327 };
328
329 #define BC_F_COPYIN 0x01 /* copy arguments in */
330 #define BC_F_COPYOUT 0x02 /* copy arguments out */
331 #define BC_F_SUSER 0x04 /* do super-user check */
332 #define BC_F_XLATEIN 0x08 /* xlate arguments in */
333 #define BC_F_XLATEOUT 0x10 /* xlate arguments out */
334
335 static const struct bridge_control bridge_control_table[] = {
336 [BRDGADD] = {bridge_ioctl_add, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
337 [BRDGDEL] = {bridge_ioctl_del, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
338
339 [BRDGGIFFLGS] = {bridge_ioctl_gifflags, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_COPYOUT},
340 [BRDGSIFFLGS] = {bridge_ioctl_sifflags, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
341
342 [BRDGSCACHE] = {bridge_ioctl_scache, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
343 [BRDGGCACHE] = {bridge_ioctl_gcache, sizeof(struct ifbrparam), BC_F_COPYOUT},
344
345 [OBRDGGIFS] = {bridge_ioctl_gifs, sizeof(struct ifbifconf), BC_F_COPYIN|BC_F_COPYOUT},
346 [OBRDGRTS] = {bridge_ioctl_rts, sizeof(struct ifbaconf), BC_F_COPYIN|BC_F_COPYOUT},
347
348 [BRDGSADDR] = {bridge_ioctl_saddr, sizeof(struct ifbareq), BC_F_COPYIN|BC_F_SUSER},
349
350 [BRDGSTO] = {bridge_ioctl_sto, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
351 [BRDGGTO] = {bridge_ioctl_gto, sizeof(struct ifbrparam), BC_F_COPYOUT},
352
353 [BRDGDADDR] = {bridge_ioctl_daddr, sizeof(struct ifbareq), BC_F_COPYIN|BC_F_SUSER},
354
355 [BRDGFLUSH] = {bridge_ioctl_flush, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
356
357 [BRDGGPRI] = {bridge_ioctl_gpri, sizeof(struct ifbrparam), BC_F_COPYOUT},
358 [BRDGSPRI] = {bridge_ioctl_spri, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
359
360 [BRDGGHT] = {bridge_ioctl_ght, sizeof(struct ifbrparam), BC_F_COPYOUT},
361 [BRDGSHT] = {bridge_ioctl_sht, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
362
363 [BRDGGFD] = {bridge_ioctl_gfd, sizeof(struct ifbrparam), BC_F_COPYOUT},
364 [BRDGSFD] = {bridge_ioctl_sfd, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
365
366 [BRDGGMA] = {bridge_ioctl_gma, sizeof(struct ifbrparam), BC_F_COPYOUT},
367 [BRDGSMA] = {bridge_ioctl_sma, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
368
369 [BRDGSIFPRIO] = {bridge_ioctl_sifprio, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
370
371 [BRDGSIFCOST] = {bridge_ioctl_sifcost, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
372
373 [BRDGGFILT] = {bridge_ioctl_gfilt, sizeof(struct ifbrparam), BC_F_COPYOUT},
374 [BRDGSFILT] = {bridge_ioctl_sfilt, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
375
376 [BRDGGIFS] = {bridge_ioctl_gifs, sizeof(struct ifbifconf), BC_F_XLATEIN|BC_F_XLATEOUT},
377 [BRDGRTS] = {bridge_ioctl_rts, sizeof(struct ifbaconf), BC_F_XLATEIN|BC_F_XLATEOUT},
378 };
379
380 static const int bridge_control_table_size = __arraycount(bridge_control_table);
381
382 static struct if_clone bridge_cloner =
383 IF_CLONE_INITIALIZER("bridge", bridge_clone_create, bridge_clone_destroy);
384
385 /*
386 * bridgeattach:
387 *
388 * Pseudo-device attach routine.
389 */
390 void
391 bridgeattach(int n)
392 {
393
394 pool_init(&bridge_rtnode_pool, sizeof(struct bridge_rtnode),
395 0, 0, 0, "brtpl", NULL, IPL_NET);
396
397 bridge_psref_class = psref_class_create("bridge", IPL_SOFTNET);
398
399 if_clone_attach(&bridge_cloner);
400 }
401
402 /*
403 * bridge_clone_create:
404 *
405 * Create a new bridge instance.
406 */
407 static int
408 bridge_clone_create(struct if_clone *ifc, int unit)
409 {
410 struct bridge_softc *sc;
411 struct ifnet *ifp;
412 int error;
413
414 sc = kmem_zalloc(sizeof(*sc), KM_SLEEP);
415 ifp = &sc->sc_if;
416
417 sc->sc_brtmax = BRIDGE_RTABLE_MAX;
418 sc->sc_brttimeout = BRIDGE_RTABLE_TIMEOUT;
419 sc->sc_bridge_max_age = BSTP_DEFAULT_MAX_AGE;
420 sc->sc_bridge_hello_time = BSTP_DEFAULT_HELLO_TIME;
421 sc->sc_bridge_forward_delay = BSTP_DEFAULT_FORWARD_DELAY;
422 sc->sc_bridge_priority = BSTP_DEFAULT_BRIDGE_PRIORITY;
423 sc->sc_hold_time = BSTP_DEFAULT_HOLD_TIME;
424 sc->sc_filter_flags = 0;
425
426 /* Initialize our routing table. */
427 bridge_rtable_init(sc);
428
429 error = workqueue_create(&sc->sc_rtage_wq, "bridge_rtage",
430 bridge_rtage_work, sc, PRI_SOFTNET, IPL_SOFTNET, WQ_MPSAFE);
431 if (error)
432 panic("%s: workqueue_create %d\n", __func__, error);
433
434 callout_init(&sc->sc_brcallout, CALLOUT_MPSAFE);
435 callout_init(&sc->sc_bstpcallout, CALLOUT_MPSAFE);
436
437 mutex_init(&sc->sc_iflist_psref.bip_lock, MUTEX_DEFAULT, IPL_NONE);
438 PSLIST_INIT(&sc->sc_iflist_psref.bip_iflist);
439 sc->sc_iflist_psref.bip_psz = pserialize_create();
440
441 if_initname(ifp, ifc->ifc_name, unit);
442 ifp->if_softc = sc;
443 #ifdef NET_MPSAFE
444 ifp->if_extflags = IFEF_MPSAFE;
445 #endif
446 ifp->if_mtu = ETHERMTU;
447 ifp->if_ioctl = bridge_ioctl;
448 ifp->if_output = bridge_output;
449 ifp->if_start = bridge_start;
450 ifp->if_stop = bridge_stop;
451 ifp->if_init = bridge_init;
452 ifp->if_type = IFT_BRIDGE;
453 ifp->if_addrlen = 0;
454 ifp->if_dlt = DLT_EN10MB;
455 ifp->if_hdrlen = ETHER_HDR_LEN;
456 if_initialize(ifp);
457
458 /*
459 * Set the link state to down.
460 * When interfaces are added the link state will reflect
461 * the best link state of the combined interfaces.
462 */
463 ifp->if_link_state = LINK_STATE_DOWN;
464
465 if_alloc_sadl(ifp);
466 if_register(ifp);
467
468 return 0;
469 }
470
471 /*
472 * bridge_clone_destroy:
473 *
474 * Destroy a bridge instance.
475 */
476 static int
477 bridge_clone_destroy(struct ifnet *ifp)
478 {
479 struct bridge_softc *sc = ifp->if_softc;
480 struct bridge_iflist *bif;
481
482 if ((ifp->if_flags & IFF_RUNNING) != 0)
483 bridge_stop(ifp, 1);
484
485 BRIDGE_LOCK(sc);
486 for (;;) {
487 bif = PSLIST_WRITER_FIRST(&sc->sc_iflist_psref.bip_iflist, struct bridge_iflist,
488 bif_next);
489 if (bif == NULL)
490 break;
491 bridge_delete_member(sc, bif);
492 }
493 PSLIST_DESTROY(&sc->sc_iflist_psref.bip_iflist);
494 BRIDGE_UNLOCK(sc);
495
496 if_detach(ifp);
497
498 /* Tear down the routing table. */
499 bridge_rtable_fini(sc);
500
501 pserialize_destroy(sc->sc_iflist_psref.bip_psz);
502 mutex_destroy(&sc->sc_iflist_psref.bip_lock);
503 callout_destroy(&sc->sc_brcallout);
504 callout_destroy(&sc->sc_bstpcallout);
505 workqueue_destroy(sc->sc_rtage_wq);
506 kmem_free(sc, sizeof(*sc));
507
508 return 0;
509 }
510
511 /*
512 * bridge_ioctl:
513 *
514 * Handle a control request from the operator.
515 */
516 static int
517 bridge_ioctl(struct ifnet *ifp, u_long cmd, void *data)
518 {
519 struct bridge_softc *sc = ifp->if_softc;
520 struct lwp *l = curlwp; /* XXX */
521 union {
522 struct ifbreq ifbreq;
523 struct ifbifconf ifbifconf;
524 struct ifbareq ifbareq;
525 struct ifbaconf ifbaconf;
526 struct ifbrparam ifbrparam;
527 } args;
528 struct ifdrv *ifd = (struct ifdrv *) data;
529 const struct bridge_control *bc = NULL; /* XXXGCC */
530 int error = 0;
531
532 /* Authorize command before calling splsoftnet(). */
533 switch (cmd) {
534 case SIOCGDRVSPEC:
535 case SIOCSDRVSPEC:
536 if (ifd->ifd_cmd >= bridge_control_table_size
537 || (bc = &bridge_control_table[ifd->ifd_cmd]) == NULL) {
538 error = EINVAL;
539 return error;
540 }
541
542 /* We only care about BC_F_SUSER at this point. */
543 if ((bc->bc_flags & BC_F_SUSER) == 0)
544 break;
545
546 error = kauth_authorize_network(l->l_cred,
547 KAUTH_NETWORK_INTERFACE_BRIDGE,
548 cmd == SIOCGDRVSPEC ?
549 KAUTH_REQ_NETWORK_INTERFACE_BRIDGE_GETPRIV :
550 KAUTH_REQ_NETWORK_INTERFACE_BRIDGE_SETPRIV,
551 ifd, NULL, NULL);
552 if (error)
553 return error;
554
555 break;
556 }
557
558 const int s = splsoftnet();
559
560 switch (cmd) {
561 case SIOCGDRVSPEC:
562 case SIOCSDRVSPEC:
563 KASSERT(bc != NULL);
564 if (cmd == SIOCGDRVSPEC &&
565 (bc->bc_flags & (BC_F_COPYOUT|BC_F_XLATEOUT)) == 0) {
566 error = EINVAL;
567 break;
568 }
569 else if (cmd == SIOCSDRVSPEC &&
570 (bc->bc_flags & (BC_F_COPYOUT|BC_F_XLATEOUT)) != 0) {
571 error = EINVAL;
572 break;
573 }
574
575 /* BC_F_SUSER is checked above, before splsoftnet(). */
576
577 if ((bc->bc_flags & (BC_F_XLATEIN|BC_F_XLATEOUT)) == 0
578 && (ifd->ifd_len != bc->bc_argsize
579 || ifd->ifd_len > sizeof(args))) {
580 error = EINVAL;
581 break;
582 }
583
584 memset(&args, 0, sizeof(args));
585 if (bc->bc_flags & BC_F_COPYIN) {
586 error = copyin(ifd->ifd_data, &args, ifd->ifd_len);
587 if (error)
588 break;
589 } else if (bc->bc_flags & BC_F_XLATEIN) {
590 args.ifbifconf.ifbic_len = ifd->ifd_len;
591 args.ifbifconf.ifbic_buf = ifd->ifd_data;
592 }
593
594 error = (*bc->bc_func)(sc, &args);
595 if (error)
596 break;
597
598 if (bc->bc_flags & BC_F_COPYOUT) {
599 error = copyout(&args, ifd->ifd_data, ifd->ifd_len);
600 } else if (bc->bc_flags & BC_F_XLATEOUT) {
601 ifd->ifd_len = args.ifbifconf.ifbic_len;
602 ifd->ifd_data = args.ifbifconf.ifbic_buf;
603 }
604 break;
605
606 case SIOCSIFFLAGS:
607 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
608 break;
609 switch (ifp->if_flags & (IFF_UP|IFF_RUNNING)) {
610 case IFF_RUNNING:
611 /*
612 * If interface is marked down and it is running,
613 * then stop and disable it.
614 */
615 if_stop(ifp, 1);
616 break;
617 case IFF_UP:
618 /*
619 * If interface is marked up and it is stopped, then
620 * start it.
621 */
622 error = if_init(ifp);
623 break;
624 default:
625 break;
626 }
627 break;
628
629 case SIOCSIFMTU:
630 if ((error = ifioctl_common(ifp, cmd, data)) == ENETRESET)
631 error = 0;
632 break;
633
634 case SIOCGIFCAP:
635 {
636 struct ifcapreq *ifcr = (struct ifcapreq *)data;
637 ifcr->ifcr_capabilities = sc->sc_capenable;
638 ifcr->ifcr_capenable = sc->sc_capenable;
639 break;
640 }
641
642 default:
643 error = ifioctl_common(ifp, cmd, data);
644 break;
645 }
646
647 splx(s);
648
649 return error;
650 }
651
652 /*
653 * bridge_lookup_member:
654 *
655 * Lookup a bridge member interface.
656 */
657 static struct bridge_iflist *
658 bridge_lookup_member(struct bridge_softc *sc, const char *name, struct psref *psref)
659 {
660 struct bridge_iflist *bif;
661 struct ifnet *ifp;
662 int s;
663
664 BRIDGE_PSZ_RENTER(s);
665
666 BRIDGE_IFLIST_READER_FOREACH(bif, sc) {
667 ifp = bif->bif_ifp;
668 if (strcmp(ifp->if_xname, name) == 0)
669 break;
670 }
671 if (bif != NULL)
672 bridge_acquire_member(sc, bif, psref);
673
674 BRIDGE_PSZ_REXIT(s);
675
676 return bif;
677 }
678
679 /*
680 * bridge_lookup_member_if:
681 *
682 * Lookup a bridge member interface by ifnet*.
683 */
684 static struct bridge_iflist *
685 bridge_lookup_member_if(struct bridge_softc *sc, struct ifnet *member_ifp,
686 struct psref *psref)
687 {
688 struct bridge_iflist *bif;
689 int s;
690
691 BRIDGE_PSZ_RENTER(s);
692
693 bif = member_ifp->if_bridgeif;
694 if (bif != NULL) {
695 psref_acquire(psref, &bif->bif_psref,
696 bridge_psref_class);
697 }
698
699 BRIDGE_PSZ_REXIT(s);
700
701 return bif;
702 }
703
704 static void
705 bridge_acquire_member(struct bridge_softc *sc, struct bridge_iflist *bif,
706 struct psref *psref)
707 {
708
709 psref_acquire(psref, &bif->bif_psref, bridge_psref_class);
710 }
711
712 /*
713 * bridge_release_member:
714 *
715 * Release the specified member interface.
716 */
717 static void
718 bridge_release_member(struct bridge_softc *sc, struct bridge_iflist *bif,
719 struct psref *psref)
720 {
721
722 psref_release(psref, &bif->bif_psref, bridge_psref_class);
723 }
724
725 /*
726 * bridge_delete_member:
727 *
728 * Delete the specified member interface.
729 */
730 static void
731 bridge_delete_member(struct bridge_softc *sc, struct bridge_iflist *bif)
732 {
733 struct ifnet *ifs = bif->bif_ifp;
734
735 KASSERT(BRIDGE_LOCKED(sc));
736
737 ifs->_if_input = ether_input;
738 ifs->if_bridge = NULL;
739 ifs->if_bridgeif = NULL;
740
741 PSLIST_WRITER_REMOVE(bif, bif_next);
742 BRIDGE_PSZ_PERFORM(sc);
743
744 if_linkstate_change_disestablish(ifs,
745 bif->bif_linkstate_hook, BRIDGE_LOCK_OBJ(sc));
746 ether_ifdetachhook_disestablish(ifs,
747 bif->bif_ifdetach_hook, BRIDGE_LOCK_OBJ(sc));
748
749 BRIDGE_UNLOCK(sc);
750
751 switch (ifs->if_type) {
752 case IFT_ETHER:
753 case IFT_L2TP:
754 /*
755 * Take the interface out of promiscuous mode.
756 * Don't call it with holding a spin lock.
757 */
758 (void) ifpromisc(ifs, 0);
759 IFNET_LOCK(ifs);
760 (void) ether_disable_vlan_mtu(ifs);
761 IFNET_UNLOCK(ifs);
762 break;
763 default:
764 #ifdef DIAGNOSTIC
765 panic("%s: impossible", __func__);
766 #endif
767 break;
768 }
769
770 psref_target_destroy(&bif->bif_psref, bridge_psref_class);
771
772 PSLIST_ENTRY_DESTROY(bif, bif_next);
773 kmem_free(bif, sizeof(*bif));
774
775 BRIDGE_LOCK(sc);
776 }
777
778 /*
779 * bridge_calc_csum_flags:
780 *
781 * Calculate logical and b/w csum flags each member interface supports.
782 */
783 void
784 bridge_calc_csum_flags(struct bridge_softc *sc)
785 {
786 struct bridge_iflist *bif;
787 struct ifnet *ifs = NULL;
788 int flags = ~0;
789 int capenable = ~0;
790
791 BRIDGE_LOCK(sc);
792 BRIDGE_IFLIST_READER_FOREACH(bif, sc) {
793 ifs = bif->bif_ifp;
794 flags &= ifs->if_csum_flags_tx;
795 capenable &= ifs->if_capenable;
796 }
797 sc->sc_csum_flags_tx = flags;
798 sc->sc_capenable = (ifs != NULL) ? capenable : 0;
799 BRIDGE_UNLOCK(sc);
800 }
801
802 /*
803 * bridge_calc_link_state:
804 *
805 * Calculate the link state based on each member interface.
806 */
807 static void
808 bridge_calc_link_state(void *xsc)
809 {
810 struct bridge_softc *sc = xsc;
811 struct bridge_iflist *bif;
812 struct ifnet *ifs;
813 int link_state = LINK_STATE_DOWN;
814
815 BRIDGE_LOCK(sc);
816 BRIDGE_IFLIST_READER_FOREACH(bif, sc) {
817 ifs = bif->bif_ifp;
818 if (ifs->if_link_state == LINK_STATE_UP) {
819 link_state = LINK_STATE_UP;
820 break;
821 }
822 if (ifs->if_link_state == LINK_STATE_UNKNOWN)
823 link_state = LINK_STATE_UNKNOWN;
824 }
825 if_link_state_change(&sc->sc_if, link_state);
826 BRIDGE_UNLOCK(sc);
827 }
828
829 static int
830 bridge_ioctl_add(struct bridge_softc *sc, void *arg)
831 {
832 struct ifbreq *req = arg;
833 struct bridge_iflist *bif = NULL;
834 struct ifnet *ifs;
835 int error = 0;
836 struct psref psref;
837
838 ifs = if_get(req->ifbr_ifsname, &psref);
839 if (ifs == NULL)
840 return ENOENT;
841
842 if (ifs->if_bridge == sc) {
843 error = EEXIST;
844 goto out;
845 }
846
847 if (ifs->if_bridge != NULL) {
848 error = EBUSY;
849 goto out;
850 }
851
852 if (ifs->_if_input != ether_input) {
853 error = EINVAL;
854 goto out;
855 }
856
857 /* FIXME: doesn't work with non-IFF_SIMPLEX interfaces */
858 if ((ifs->if_flags & IFF_SIMPLEX) == 0) {
859 error = EINVAL;
860 goto out;
861 }
862
863 bif = kmem_alloc(sizeof(*bif), KM_SLEEP);
864
865 switch (ifs->if_type) {
866 case IFT_ETHER:
867 if (sc->sc_if.if_mtu != ifs->if_mtu) {
868 /* Change MTU of added interface to bridge MTU */
869 struct ifreq ifr;
870 memset(&ifr, 0, sizeof(ifr));
871 ifr.ifr_mtu = sc->sc_if.if_mtu;
872 IFNET_LOCK(ifs);
873 error = if_ioctl(ifs, SIOCSIFMTU, &ifr);
874 IFNET_UNLOCK(ifs);
875 if (error != 0)
876 goto out;
877 }
878 /* FALLTHROUGH */
879 case IFT_L2TP:
880 IFNET_LOCK(ifs);
881 error = ether_enable_vlan_mtu(ifs);
882 IFNET_UNLOCK(ifs);
883 if (error > 0)
884 goto out;
885 /*
886 * Place the interface into promiscuous mode.
887 */
888 error = ifpromisc(ifs, 1);
889 if (error)
890 goto out;
891 break;
892 default:
893 error = EINVAL;
894 goto out;
895 }
896
897 bif->bif_ifp = ifs;
898 bif->bif_flags = IFBIF_LEARNING | IFBIF_DISCOVER;
899 bif->bif_priority = BSTP_DEFAULT_PORT_PRIORITY;
900 bif->bif_path_cost = BSTP_DEFAULT_PATH_COST;
901 bif->bif_linkstate_hook = if_linkstate_change_establish(ifs,
902 bridge_calc_link_state, sc);
903 PSLIST_ENTRY_INIT(bif, bif_next);
904 psref_target_init(&bif->bif_psref, bridge_psref_class);
905
906 BRIDGE_LOCK(sc);
907
908 ifs->if_bridge = sc;
909 ifs->if_bridgeif = bif;
910 PSLIST_WRITER_INSERT_HEAD(&sc->sc_iflist_psref.bip_iflist, bif, bif_next);
911 ifs->_if_input = bridge_input;
912
913 BRIDGE_UNLOCK(sc);
914
915 bif->bif_ifdetach_hook = ether_ifdetachhook_establish(ifs,
916 bridge_ifdetach, (void *)ifs);
917
918 bridge_calc_csum_flags(sc);
919 bridge_calc_link_state(sc);
920
921 if (sc->sc_if.if_flags & IFF_RUNNING)
922 bstp_initialization(sc);
923 else
924 bstp_stop(sc);
925
926 out:
927 if_put(ifs, &psref);
928 if (error) {
929 if (bif != NULL)
930 kmem_free(bif, sizeof(*bif));
931 }
932 return error;
933 }
934
935 static int
936 bridge_ioctl_del(struct bridge_softc *sc, void *arg)
937 {
938 struct ifbreq *req = arg;
939 const char *name = req->ifbr_ifsname;
940 struct bridge_iflist *bif;
941 struct ifnet *ifs;
942
943 BRIDGE_LOCK(sc);
944
945 /*
946 * Don't use bridge_lookup_member. We want to get a member
947 * with bif_refs == 0.
948 */
949 BRIDGE_IFLIST_WRITER_FOREACH(bif, sc) {
950 ifs = bif->bif_ifp;
951 if (strcmp(ifs->if_xname, name) == 0)
952 break;
953 }
954
955 if (bif == NULL) {
956 BRIDGE_UNLOCK(sc);
957 return ENOENT;
958 }
959
960 bridge_delete_member(sc, bif);
961
962 BRIDGE_UNLOCK(sc);
963
964 bridge_rtdelete(sc, ifs);
965 bridge_calc_csum_flags(sc);
966 bridge_calc_link_state(sc);
967
968 if (sc->sc_if.if_flags & IFF_RUNNING)
969 bstp_initialization(sc);
970
971 return 0;
972 }
973
974 static int
975 bridge_ioctl_gifflags(struct bridge_softc *sc, void *arg)
976 {
977 struct ifbreq *req = arg;
978 struct bridge_iflist *bif;
979 struct psref psref;
980
981 bif = bridge_lookup_member(sc, req->ifbr_ifsname, &psref);
982 if (bif == NULL)
983 return ENOENT;
984
985 req->ifbr_ifsflags = bif->bif_flags;
986 req->ifbr_state = bif->bif_state;
987 req->ifbr_priority = bif->bif_priority;
988 req->ifbr_path_cost = bif->bif_path_cost;
989 req->ifbr_portno = bif->bif_ifp->if_index & 0xff;
990
991 bridge_release_member(sc, bif, &psref);
992
993 return 0;
994 }
995
996 static int
997 bridge_ioctl_sifflags(struct bridge_softc *sc, void *arg)
998 {
999 struct ifbreq *req = arg;
1000 struct bridge_iflist *bif;
1001 struct psref psref;
1002
1003 bif = bridge_lookup_member(sc, req->ifbr_ifsname, &psref);
1004 if (bif == NULL)
1005 return ENOENT;
1006
1007 if (req->ifbr_ifsflags & IFBIF_STP) {
1008 switch (bif->bif_ifp->if_type) {
1009 case IFT_ETHER:
1010 case IFT_L2TP:
1011 /* These can do spanning tree. */
1012 break;
1013
1014 default:
1015 /* Nothing else can. */
1016 bridge_release_member(sc, bif, &psref);
1017 return EINVAL;
1018 }
1019 }
1020
1021 if (bif->bif_flags & IFBIF_PROTECTED) {
1022 if ((req->ifbr_ifsflags & IFBIF_PROTECTED) == 0) {
1023 log(LOG_INFO, "%s: disabling protection on %s\n",
1024 sc->sc_if.if_xname, bif->bif_ifp->if_xname);
1025 }
1026 } else {
1027 if (req->ifbr_ifsflags & IFBIF_PROTECTED) {
1028 log(LOG_INFO, "%s: enabling protection on %s\n",
1029 sc->sc_if.if_xname, bif->bif_ifp->if_xname);
1030 }
1031 }
1032
1033 bif->bif_flags = req->ifbr_ifsflags;
1034
1035 bridge_release_member(sc, bif, &psref);
1036
1037 if (sc->sc_if.if_flags & IFF_RUNNING)
1038 bstp_initialization(sc);
1039
1040 return 0;
1041 }
1042
1043 static int
1044 bridge_ioctl_scache(struct bridge_softc *sc, void *arg)
1045 {
1046 struct ifbrparam *param = arg;
1047
1048 sc->sc_brtmax = param->ifbrp_csize;
1049 bridge_rttrim(sc);
1050
1051 return 0;
1052 }
1053
1054 static int
1055 bridge_ioctl_gcache(struct bridge_softc *sc, void *arg)
1056 {
1057 struct ifbrparam *param = arg;
1058
1059 param->ifbrp_csize = sc->sc_brtmax;
1060
1061 return 0;
1062 }
1063
1064 static int
1065 bridge_ioctl_gifs(struct bridge_softc *sc, void *arg)
1066 {
1067 struct ifbifconf *bifc = arg;
1068 struct bridge_iflist *bif;
1069 struct ifbreq *breqs;
1070 int i, count, error = 0;
1071
1072 retry:
1073 BRIDGE_LOCK(sc);
1074 count = 0;
1075 BRIDGE_IFLIST_WRITER_FOREACH(bif, sc)
1076 count++;
1077 BRIDGE_UNLOCK(sc);
1078
1079 if (count == 0) {
1080 bifc->ifbic_len = 0;
1081 return 0;
1082 }
1083
1084 if (bifc->ifbic_len == 0 || bifc->ifbic_len < (sizeof(*breqs) * count)) {
1085 /* Tell that a larger buffer is needed */
1086 bifc->ifbic_len = sizeof(*breqs) * count;
1087 return 0;
1088 }
1089
1090 breqs = kmem_alloc(sizeof(*breqs) * count, KM_SLEEP);
1091
1092 BRIDGE_LOCK(sc);
1093
1094 i = 0;
1095 BRIDGE_IFLIST_WRITER_FOREACH(bif, sc)
1096 i++;
1097 if (i > count) {
1098 /*
1099 * The number of members has been increased.
1100 * We need more memory!
1101 */
1102 BRIDGE_UNLOCK(sc);
1103 kmem_free(breqs, sizeof(*breqs) * count);
1104 goto retry;
1105 }
1106
1107 i = 0;
1108 BRIDGE_IFLIST_WRITER_FOREACH(bif, sc) {
1109 struct ifbreq *breq = &breqs[i++];
1110 memset(breq, 0, sizeof(*breq));
1111
1112 strlcpy(breq->ifbr_ifsname, bif->bif_ifp->if_xname,
1113 sizeof(breq->ifbr_ifsname));
1114 breq->ifbr_ifsflags = bif->bif_flags;
1115 breq->ifbr_state = bif->bif_state;
1116 breq->ifbr_priority = bif->bif_priority;
1117 breq->ifbr_path_cost = bif->bif_path_cost;
1118 breq->ifbr_portno = bif->bif_ifp->if_index & 0xff;
1119 }
1120
1121 /* Don't call copyout with holding the mutex */
1122 BRIDGE_UNLOCK(sc);
1123
1124 for (i = 0; i < count; i++) {
1125 error = copyout(&breqs[i], bifc->ifbic_req + i, sizeof(*breqs));
1126 if (error)
1127 break;
1128 }
1129 bifc->ifbic_len = sizeof(*breqs) * i;
1130
1131 kmem_free(breqs, sizeof(*breqs) * count);
1132
1133 return error;
1134 }
1135
1136 static int
1137 bridge_ioctl_rts(struct bridge_softc *sc, void *arg)
1138 {
1139 struct ifbaconf *bac = arg;
1140 struct bridge_rtnode *brt;
1141 struct ifbareq bareq;
1142 int count = 0, error = 0, len;
1143
1144 if (bac->ifbac_len == 0)
1145 return 0;
1146
1147 BRIDGE_RT_LOCK(sc);
1148
1149 /* The passed buffer is not enough, tell a required size. */
1150 if (bac->ifbac_len < (sizeof(bareq) * sc->sc_brtcnt)) {
1151 count = sc->sc_brtcnt;
1152 goto out;
1153 }
1154
1155 len = bac->ifbac_len;
1156 BRIDGE_RTLIST_WRITER_FOREACH(brt, sc) {
1157 if (len < sizeof(bareq))
1158 goto out;
1159 memset(&bareq, 0, sizeof(bareq));
1160 strlcpy(bareq.ifba_ifsname, brt->brt_ifp->if_xname,
1161 sizeof(bareq.ifba_ifsname));
1162 memcpy(bareq.ifba_dst, brt->brt_addr, sizeof(brt->brt_addr));
1163 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
1164 bareq.ifba_expire = brt->brt_expire - time_uptime;
1165 } else
1166 bareq.ifba_expire = 0;
1167 bareq.ifba_flags = brt->brt_flags;
1168
1169 error = copyout(&bareq, bac->ifbac_req + count, sizeof(bareq));
1170 if (error)
1171 goto out;
1172 count++;
1173 len -= sizeof(bareq);
1174 }
1175 out:
1176 BRIDGE_RT_UNLOCK(sc);
1177
1178 bac->ifbac_len = sizeof(bareq) * count;
1179 return error;
1180 }
1181
1182 static int
1183 bridge_ioctl_saddr(struct bridge_softc *sc, void *arg)
1184 {
1185 struct ifbareq *req = arg;
1186 struct bridge_iflist *bif;
1187 int error;
1188 struct psref psref;
1189
1190 bif = bridge_lookup_member(sc, req->ifba_ifsname, &psref);
1191 if (bif == NULL)
1192 return ENOENT;
1193
1194 error = bridge_rtupdate(sc, req->ifba_dst, bif->bif_ifp, 1,
1195 req->ifba_flags);
1196
1197 bridge_release_member(sc, bif, &psref);
1198
1199 return error;
1200 }
1201
1202 static int
1203 bridge_ioctl_sto(struct bridge_softc *sc, void *arg)
1204 {
1205 struct ifbrparam *param = arg;
1206
1207 sc->sc_brttimeout = param->ifbrp_ctime;
1208
1209 return 0;
1210 }
1211
1212 static int
1213 bridge_ioctl_gto(struct bridge_softc *sc, void *arg)
1214 {
1215 struct ifbrparam *param = arg;
1216
1217 param->ifbrp_ctime = sc->sc_brttimeout;
1218
1219 return 0;
1220 }
1221
1222 static int
1223 bridge_ioctl_daddr(struct bridge_softc *sc, void *arg)
1224 {
1225 struct ifbareq *req = arg;
1226
1227 return (bridge_rtdaddr(sc, req->ifba_dst));
1228 }
1229
1230 static int
1231 bridge_ioctl_flush(struct bridge_softc *sc, void *arg)
1232 {
1233 struct ifbreq *req = arg;
1234
1235 bridge_rtflush(sc, req->ifbr_ifsflags);
1236
1237 return 0;
1238 }
1239
1240 static int
1241 bridge_ioctl_gpri(struct bridge_softc *sc, void *arg)
1242 {
1243 struct ifbrparam *param = arg;
1244
1245 param->ifbrp_prio = sc->sc_bridge_priority;
1246
1247 return 0;
1248 }
1249
1250 static int
1251 bridge_ioctl_spri(struct bridge_softc *sc, void *arg)
1252 {
1253 struct ifbrparam *param = arg;
1254
1255 sc->sc_bridge_priority = param->ifbrp_prio;
1256
1257 if (sc->sc_if.if_flags & IFF_RUNNING)
1258 bstp_initialization(sc);
1259
1260 return 0;
1261 }
1262
1263 static int
1264 bridge_ioctl_ght(struct bridge_softc *sc, void *arg)
1265 {
1266 struct ifbrparam *param = arg;
1267
1268 param->ifbrp_hellotime = sc->sc_bridge_hello_time >> 8;
1269
1270 return 0;
1271 }
1272
1273 static int
1274 bridge_ioctl_sht(struct bridge_softc *sc, void *arg)
1275 {
1276 struct ifbrparam *param = arg;
1277
1278 if (param->ifbrp_hellotime == 0)
1279 return EINVAL;
1280 sc->sc_bridge_hello_time = param->ifbrp_hellotime << 8;
1281
1282 if (sc->sc_if.if_flags & IFF_RUNNING)
1283 bstp_initialization(sc);
1284
1285 return 0;
1286 }
1287
1288 static int
1289 bridge_ioctl_gfd(struct bridge_softc *sc, void *arg)
1290 {
1291 struct ifbrparam *param = arg;
1292
1293 param->ifbrp_fwddelay = sc->sc_bridge_forward_delay >> 8;
1294
1295 return 0;
1296 }
1297
1298 static int
1299 bridge_ioctl_sfd(struct bridge_softc *sc, void *arg)
1300 {
1301 struct ifbrparam *param = arg;
1302
1303 if (param->ifbrp_fwddelay == 0)
1304 return EINVAL;
1305 sc->sc_bridge_forward_delay = param->ifbrp_fwddelay << 8;
1306
1307 if (sc->sc_if.if_flags & IFF_RUNNING)
1308 bstp_initialization(sc);
1309
1310 return 0;
1311 }
1312
1313 static int
1314 bridge_ioctl_gma(struct bridge_softc *sc, void *arg)
1315 {
1316 struct ifbrparam *param = arg;
1317
1318 param->ifbrp_maxage = sc->sc_bridge_max_age >> 8;
1319
1320 return 0;
1321 }
1322
1323 static int
1324 bridge_ioctl_sma(struct bridge_softc *sc, void *arg)
1325 {
1326 struct ifbrparam *param = arg;
1327
1328 if (param->ifbrp_maxage == 0)
1329 return EINVAL;
1330 sc->sc_bridge_max_age = param->ifbrp_maxage << 8;
1331
1332 if (sc->sc_if.if_flags & IFF_RUNNING)
1333 bstp_initialization(sc);
1334
1335 return 0;
1336 }
1337
1338 static int
1339 bridge_ioctl_sifprio(struct bridge_softc *sc, void *arg)
1340 {
1341 struct ifbreq *req = arg;
1342 struct bridge_iflist *bif;
1343 struct psref psref;
1344
1345 bif = bridge_lookup_member(sc, req->ifbr_ifsname, &psref);
1346 if (bif == NULL)
1347 return ENOENT;
1348
1349 bif->bif_priority = req->ifbr_priority;
1350
1351 if (sc->sc_if.if_flags & IFF_RUNNING)
1352 bstp_initialization(sc);
1353
1354 bridge_release_member(sc, bif, &psref);
1355
1356 return 0;
1357 }
1358
1359 static int
1360 bridge_ioctl_gfilt(struct bridge_softc *sc, void *arg)
1361 {
1362 struct ifbrparam *param = arg;
1363
1364 param->ifbrp_filter = sc->sc_filter_flags;
1365
1366 return 0;
1367 }
1368
1369 static int
1370 bridge_ioctl_sfilt(struct bridge_softc *sc, void *arg)
1371 {
1372 struct ifbrparam *param = arg;
1373 uint32_t nflags, oflags;
1374
1375 if (param->ifbrp_filter & ~IFBF_FILT_MASK)
1376 return EINVAL;
1377
1378 nflags = param->ifbrp_filter;
1379 oflags = sc->sc_filter_flags;
1380
1381 if ((nflags & IFBF_FILT_USEIPF) && !(oflags & IFBF_FILT_USEIPF)) {
1382 pfil_add_hook((void *)bridge_ipf, NULL, PFIL_IN|PFIL_OUT,
1383 sc->sc_if.if_pfil);
1384 }
1385 if (!(nflags & IFBF_FILT_USEIPF) && (oflags & IFBF_FILT_USEIPF)) {
1386 pfil_remove_hook((void *)bridge_ipf, NULL, PFIL_IN|PFIL_OUT,
1387 sc->sc_if.if_pfil);
1388 }
1389
1390 sc->sc_filter_flags = nflags;
1391
1392 return 0;
1393 }
1394
1395 static int
1396 bridge_ioctl_sifcost(struct bridge_softc *sc, void *arg)
1397 {
1398 struct ifbreq *req = arg;
1399 struct bridge_iflist *bif;
1400 struct psref psref;
1401
1402 bif = bridge_lookup_member(sc, req->ifbr_ifsname, &psref);
1403 if (bif == NULL)
1404 return ENOENT;
1405
1406 bif->bif_path_cost = req->ifbr_path_cost;
1407
1408 if (sc->sc_if.if_flags & IFF_RUNNING)
1409 bstp_initialization(sc);
1410
1411 bridge_release_member(sc, bif, &psref);
1412
1413 return 0;
1414 }
1415
1416 /*
1417 * bridge_ifdetach:
1418 *
1419 * Detach an interface from a bridge. Called when a member
1420 * interface is detaching.
1421 */
1422 static void
1423 bridge_ifdetach(void *xifs)
1424 {
1425 struct ifnet *ifs;
1426 struct bridge_softc *sc;
1427 struct ifbreq breq;
1428
1429 ifs = (struct ifnet *)xifs;
1430 sc = ifs->if_bridge;
1431
1432 /* ioctl_lock should prevent this from happening */
1433 KASSERT(sc != NULL);
1434
1435 memset(&breq, 0, sizeof(breq));
1436 strlcpy(breq.ifbr_ifsname, ifs->if_xname, sizeof(breq.ifbr_ifsname));
1437
1438 (void) bridge_ioctl_del(sc, &breq);
1439 }
1440
1441 /*
1442 * bridge_init:
1443 *
1444 * Initialize a bridge interface.
1445 */
1446 static int
1447 bridge_init(struct ifnet *ifp)
1448 {
1449 struct bridge_softc *sc = ifp->if_softc;
1450
1451 KASSERT((ifp->if_flags & IFF_RUNNING) == 0);
1452
1453 BRIDGE_LOCK(sc);
1454 sc->sc_stopping = false;
1455 BRIDGE_UNLOCK(sc);
1456
1457 callout_reset(&sc->sc_brcallout, bridge_rtable_prune_period * hz,
1458 bridge_timer, sc);
1459 bstp_initialization(sc);
1460
1461 ifp->if_flags |= IFF_RUNNING;
1462 return 0;
1463 }
1464
1465 /*
1466 * bridge_stop:
1467 *
1468 * Stop the bridge interface.
1469 */
1470 static void
1471 bridge_stop(struct ifnet *ifp, int disable)
1472 {
1473 struct bridge_softc *sc = ifp->if_softc;
1474
1475 KASSERT((ifp->if_flags & IFF_RUNNING) != 0);
1476 ifp->if_flags &= ~IFF_RUNNING;
1477
1478 BRIDGE_LOCK(sc);
1479 sc->sc_stopping = true;
1480 BRIDGE_UNLOCK(sc);
1481
1482 callout_halt(&sc->sc_brcallout, NULL);
1483 workqueue_wait(sc->sc_rtage_wq, &sc->sc_rtage_wk);
1484 bstp_stop(sc);
1485 bridge_rtflush(sc, IFBF_FLUSHDYN);
1486 }
1487
1488 /*
1489 * bridge_enqueue:
1490 *
1491 * Enqueue a packet on a bridge member interface.
1492 */
1493 void
1494 bridge_enqueue(struct bridge_softc *sc, struct ifnet *dst_ifp, struct mbuf *m,
1495 int runfilt)
1496 {
1497 int len, error;
1498 short mflags;
1499
1500 if (runfilt) {
1501 if (pfil_run_hooks(sc->sc_if.if_pfil, &m,
1502 dst_ifp, PFIL_OUT) != 0) {
1503 if (m != NULL)
1504 m_freem(m);
1505 return;
1506 }
1507 if (m == NULL)
1508 return;
1509 }
1510
1511 #ifdef ALTQ
1512 KERNEL_LOCK(1, NULL);
1513 /*
1514 * If ALTQ is enabled on the member interface, do
1515 * classification; the queueing discipline might
1516 * not require classification, but might require
1517 * the address family/header pointer in the pktattr.
1518 */
1519 if (ALTQ_IS_ENABLED(&dst_ifp->if_snd)) {
1520 /* XXX IFT_ETHER */
1521 altq_etherclassify(&dst_ifp->if_snd, m);
1522 }
1523 KERNEL_UNLOCK_ONE(NULL);
1524 #endif /* ALTQ */
1525
1526 if (vlan_has_tag(m) &&
1527 !vlan_is_hwtag_enabled(dst_ifp)) {
1528 (void)ether_inject_vlantag(&m, ETHERTYPE_VLAN,
1529 vlan_get_tag(m));
1530 if (m == NULL) {
1531 if_statinc(&sc->sc_if, if_oerrors);
1532 return;
1533 }
1534 }
1535
1536 len = m->m_pkthdr.len;
1537 mflags = m->m_flags;
1538
1539 error = if_transmit_lock(dst_ifp, m);
1540 if (error) {
1541 /* mbuf is already freed */
1542 if_statinc(&sc->sc_if, if_oerrors);
1543 return;
1544 }
1545
1546 net_stat_ref_t nsr = IF_STAT_GETREF(&sc->sc_if);
1547 if_statinc_ref(nsr, if_opackets);
1548 if_statadd_ref(nsr, if_obytes, len);
1549 if (mflags & M_MCAST)
1550 if_statinc_ref(nsr, if_omcasts);
1551 IF_STAT_PUTREF(&sc->sc_if);
1552 }
1553
1554 /*
1555 * bridge_output:
1556 *
1557 * Send output from a bridge member interface. This
1558 * performs the bridging function for locally originated
1559 * packets.
1560 *
1561 * The mbuf has the Ethernet header already attached. We must
1562 * enqueue or free the mbuf before returning.
1563 */
1564 int
1565 bridge_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *sa,
1566 const struct rtentry *rt)
1567 {
1568 struct ether_header *eh;
1569 struct ifnet *dst_if;
1570 struct bridge_softc *sc;
1571 struct mbuf *n;
1572 int s, bound;
1573
1574 /*
1575 * bridge_output() is called from ether_output(), furthermore
1576 * ifp argument doesn't point to bridge(4). So, don't assert
1577 * IFEF_MPSAFE here.
1578 */
1579
1580 KASSERT(m->m_len >= ETHER_HDR_LEN);
1581
1582 eh = mtod(m, struct ether_header *);
1583 sc = ifp->if_bridge;
1584
1585 if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
1586 if (memcmp(etherbroadcastaddr,
1587 eh->ether_dhost, ETHER_ADDR_LEN) == 0)
1588 m->m_flags |= M_BCAST;
1589 else
1590 m->m_flags |= M_MCAST;
1591 }
1592
1593 /*
1594 * If bridge is down, but the original output interface is up,
1595 * go ahead and send out that interface. Otherwise, the packet
1596 * is dropped below.
1597 */
1598 if (__predict_false(sc == NULL) ||
1599 (sc->sc_if.if_flags & IFF_RUNNING) == 0) {
1600 dst_if = ifp;
1601 goto unicast_asis;
1602 }
1603
1604 /*
1605 * If the packet is a multicast, or we don't know a better way to
1606 * get there, send to all interfaces.
1607 */
1608 if ((m->m_flags & (M_MCAST | M_BCAST)) != 0)
1609 dst_if = NULL;
1610 else
1611 dst_if = bridge_rtlookup(sc, eh->ether_dhost);
1612
1613 /*
1614 * In general, we need to handle TX offload in software before
1615 * enqueueing a packet. However, we can send it as is in the
1616 * cases of unicast via (1) the source interface, or (2) an
1617 * interface which supports the specified offload options.
1618 * For multicast or broadcast, send it as is only if (3) all
1619 * the member interfaces support the specified options.
1620 */
1621
1622 /*
1623 * Unicast via the source interface.
1624 */
1625 if (dst_if == ifp)
1626 goto unicast_asis;
1627
1628 /*
1629 * Unicast via other interface.
1630 */
1631 if (dst_if != NULL) {
1632 KASSERT(m->m_flags & M_PKTHDR);
1633 if (TX_OFFLOAD_SUPPORTED(dst_if->if_csum_flags_tx,
1634 m->m_pkthdr.csum_flags)) {
1635 /*
1636 * Unicast via an interface which supports the
1637 * specified offload options.
1638 */
1639 goto unicast_asis;
1640 }
1641
1642 /*
1643 * Handle TX offload in software. For TSO, a packet is
1644 * split into multiple chunks. Thus, the return value of
1645 * ether_sw_offload_tx() is mbuf queue consists of them.
1646 */
1647 m = ether_sw_offload_tx(ifp, m);
1648 if (m == NULL)
1649 return 0;
1650
1651 do {
1652 n = m->m_nextpkt;
1653 if ((dst_if->if_flags & IFF_RUNNING) == 0)
1654 m_freem(m);
1655 else
1656 bridge_enqueue(sc, dst_if, m, 0);
1657 m = n;
1658 } while (m != NULL);
1659
1660 return 0;
1661 }
1662
1663 /*
1664 * Multicast or broadcast.
1665 */
1666 if (TX_OFFLOAD_SUPPORTED(sc->sc_csum_flags_tx,
1667 m->m_pkthdr.csum_flags)) {
1668 /*
1669 * Specified TX offload options are supported by all
1670 * the member interfaces of this bridge.
1671 */
1672 m->m_nextpkt = NULL; /* XXX */
1673 } else {
1674 /*
1675 * Otherwise, handle TX offload in software.
1676 */
1677 m = ether_sw_offload_tx(ifp, m);
1678 if (m == NULL)
1679 return 0;
1680 }
1681
1682 /*
1683 * When we use pppoe over bridge, bridge_output() can be called
1684 * in a lwp context by pppoe_timeout_wk().
1685 */
1686 bound = curlwp_bind();
1687 do {
1688 /* XXX Should call bridge_broadcast, but there are locking
1689 * issues which need resolving first. */
1690 struct bridge_iflist *bif;
1691 struct mbuf *mc;
1692 bool used = false;
1693
1694 n = m->m_nextpkt;
1695
1696 BRIDGE_PSZ_RENTER(s);
1697 BRIDGE_IFLIST_READER_FOREACH(bif, sc) {
1698 struct psref psref;
1699
1700 bridge_acquire_member(sc, bif, &psref);
1701 BRIDGE_PSZ_REXIT(s);
1702
1703 dst_if = bif->bif_ifp;
1704 if ((dst_if->if_flags & IFF_RUNNING) == 0)
1705 goto next;
1706
1707 /*
1708 * If this is not the original output interface,
1709 * and the interface is participating in spanning
1710 * tree, make sure the port is in a state that
1711 * allows forwarding.
1712 */
1713 if (dst_if != ifp &&
1714 (bif->bif_flags & IFBIF_STP) != 0) {
1715 switch (bif->bif_state) {
1716 case BSTP_IFSTATE_BLOCKING:
1717 case BSTP_IFSTATE_LISTENING:
1718 case BSTP_IFSTATE_DISABLED:
1719 goto next;
1720 }
1721 }
1722
1723 if (PSLIST_READER_NEXT(bif, struct bridge_iflist,
1724 bif_next) == NULL &&
1725 ((m->m_flags & (M_MCAST | M_BCAST)) == 0 ||
1726 dst_if == ifp))
1727 {
1728 used = true;
1729 mc = m;
1730 } else {
1731 mc = m_copypacket(m, M_DONTWAIT);
1732 if (mc == NULL) {
1733 if_statinc(&sc->sc_if, if_oerrors);
1734 goto next;
1735 }
1736 }
1737
1738 bridge_enqueue(sc, dst_if, mc, 0);
1739
1740 if ((m->m_flags & (M_MCAST | M_BCAST)) != 0 &&
1741 dst_if != ifp)
1742 {
1743 if (PSLIST_READER_NEXT(bif,
1744 struct bridge_iflist, bif_next) == NULL)
1745 {
1746 used = true;
1747 mc = m;
1748 } else {
1749 mc = m_copypacket(m, M_DONTWAIT);
1750 if (mc == NULL) {
1751 if_statinc(&sc->sc_if,
1752 if_oerrors);
1753 goto next;
1754 }
1755 }
1756
1757 m_set_rcvif(mc, dst_if);
1758 mc->m_flags &= ~M_PROMISC;
1759
1760 const int _s = splsoftnet();
1761 KERNEL_LOCK_UNLESS_IFP_MPSAFE(dst_if);
1762 ether_input(dst_if, mc);
1763 KERNEL_UNLOCK_UNLESS_IFP_MPSAFE(dst_if);
1764 splx(_s);
1765 }
1766
1767 next:
1768 BRIDGE_PSZ_RENTER(s);
1769 bridge_release_member(sc, bif, &psref);
1770
1771 /* Guarantee we don't re-enter the loop as we already
1772 * decided we're at the end. */
1773 if (used)
1774 break;
1775 }
1776 BRIDGE_PSZ_REXIT(s);
1777
1778 if (!used)
1779 m_freem(m);
1780
1781 m = n;
1782 } while (m != NULL);
1783 curlwp_bindx(bound);
1784
1785 return 0;
1786
1787 unicast_asis:
1788 /*
1789 * XXX Spanning tree consideration here?
1790 */
1791 if ((dst_if->if_flags & IFF_RUNNING) == 0)
1792 m_freem(m);
1793 else
1794 bridge_enqueue(sc, dst_if, m, 0);
1795 return 0;
1796 }
1797
1798 /*
1799 * bridge_start:
1800 *
1801 * Start output on a bridge.
1802 *
1803 * NOTE: This routine should never be called in this implementation.
1804 */
1805 static void
1806 bridge_start(struct ifnet *ifp)
1807 {
1808
1809 printf("%s: bridge_start() called\n", ifp->if_xname);
1810 }
1811
1812 /*
1813 * bridge_forward:
1814 *
1815 * The forwarding function of the bridge.
1816 */
1817 static void
1818 bridge_forward(struct bridge_softc *sc, struct mbuf *m)
1819 {
1820 struct bridge_iflist *bif;
1821 struct ifnet *src_if, *dst_if;
1822 struct ether_header *eh;
1823 struct psref psref;
1824 struct psref psref_src;
1825 DECLARE_LOCK_VARIABLE;
1826 bool src_if_protected;
1827
1828 if ((sc->sc_if.if_flags & IFF_RUNNING) == 0)
1829 return;
1830
1831 src_if = m_get_rcvif_psref(m, &psref_src);
1832 if (src_if == NULL) {
1833 /* Interface is being destroyed? */
1834 m_freem(m);
1835 goto out;
1836 }
1837
1838 if_statadd2(&sc->sc_if, if_ipackets, 1, if_ibytes, m->m_pkthdr.len);
1839
1840 /*
1841 * Look up the bridge_iflist.
1842 */
1843 bif = bridge_lookup_member_if(sc, src_if, &psref);
1844 if (bif == NULL) {
1845 /* Interface is not a bridge member (anymore?) */
1846 m_freem(m);
1847 goto out;
1848 }
1849
1850 if (bif->bif_flags & IFBIF_STP) {
1851 switch (bif->bif_state) {
1852 case BSTP_IFSTATE_BLOCKING:
1853 case BSTP_IFSTATE_LISTENING:
1854 case BSTP_IFSTATE_DISABLED:
1855 m_freem(m);
1856 bridge_release_member(sc, bif, &psref);
1857 goto out;
1858 }
1859 }
1860
1861 eh = mtod(m, struct ether_header *);
1862
1863 /*
1864 * If the interface is learning, and the source
1865 * address is valid and not multicast, record
1866 * the address.
1867 */
1868 if ((bif->bif_flags & IFBIF_LEARNING) != 0 &&
1869 ETHER_IS_MULTICAST(eh->ether_shost) == 0 &&
1870 (eh->ether_shost[0] == 0 &&
1871 eh->ether_shost[1] == 0 &&
1872 eh->ether_shost[2] == 0 &&
1873 eh->ether_shost[3] == 0 &&
1874 eh->ether_shost[4] == 0 &&
1875 eh->ether_shost[5] == 0) == 0) {
1876 (void) bridge_rtupdate(sc, eh->ether_shost,
1877 src_if, 0, IFBAF_DYNAMIC);
1878 }
1879
1880 if ((bif->bif_flags & IFBIF_STP) != 0 &&
1881 bif->bif_state == BSTP_IFSTATE_LEARNING) {
1882 m_freem(m);
1883 bridge_release_member(sc, bif, &psref);
1884 goto out;
1885 }
1886
1887 src_if_protected = ((bif->bif_flags & IFBIF_PROTECTED) != 0);
1888
1889 bridge_release_member(sc, bif, &psref);
1890
1891 /*
1892 * At this point, the port either doesn't participate
1893 * in spanning tree or it is in the forwarding state.
1894 */
1895
1896 /*
1897 * If the packet is unicast, destined for someone on
1898 * "this" side of the bridge, drop it.
1899 */
1900 if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) {
1901 dst_if = bridge_rtlookup(sc, eh->ether_dhost);
1902 if (src_if == dst_if) {
1903 m_freem(m);
1904 goto out;
1905 }
1906 } else {
1907 /* ...forward it to all interfaces. */
1908 if_statinc(&sc->sc_if, if_imcasts);
1909 dst_if = NULL;
1910 }
1911
1912 if (pfil_run_hooks(sc->sc_if.if_pfil, &m, src_if, PFIL_IN) != 0) {
1913 if (m != NULL)
1914 m_freem(m);
1915 goto out;
1916 }
1917 if (m == NULL)
1918 goto out;
1919
1920 if (dst_if == NULL) {
1921 bridge_broadcast(sc, src_if, src_if_protected, m);
1922 goto out;
1923 }
1924
1925 m_put_rcvif_psref(src_if, &psref_src);
1926 src_if = NULL;
1927
1928 /*
1929 * At this point, we're dealing with a unicast frame
1930 * going to a different interface.
1931 */
1932 if ((dst_if->if_flags & IFF_RUNNING) == 0) {
1933 m_freem(m);
1934 goto out;
1935 }
1936
1937 bif = bridge_lookup_member_if(sc, dst_if, &psref);
1938 if (bif == NULL) {
1939 /* Not a member of the bridge (anymore?) */
1940 m_freem(m);
1941 goto out;
1942 }
1943
1944 if (bif->bif_flags & IFBIF_STP) {
1945 switch (bif->bif_state) {
1946 case BSTP_IFSTATE_DISABLED:
1947 case BSTP_IFSTATE_BLOCKING:
1948 m_freem(m);
1949 bridge_release_member(sc, bif, &psref);
1950 goto out;
1951 }
1952 }
1953
1954 if ((bif->bif_flags & IFBIF_PROTECTED) && src_if_protected) {
1955 m_freem(m);
1956 bridge_release_member(sc, bif, &psref);
1957 goto out;
1958 }
1959
1960 bridge_release_member(sc, bif, &psref);
1961
1962 /*
1963 * Before enqueueing this packet to the destination interface,
1964 * clear any in-bound checksum flags to prevent them from being
1965 * misused as out-bound flags.
1966 */
1967 m->m_pkthdr.csum_flags = 0;
1968
1969 ACQUIRE_GLOBAL_LOCKS();
1970 bridge_enqueue(sc, dst_if, m, 1);
1971 RELEASE_GLOBAL_LOCKS();
1972 out:
1973 if (src_if != NULL)
1974 m_put_rcvif_psref(src_if, &psref_src);
1975 return;
1976 }
1977
1978 static bool
1979 bstp_state_before_learning(struct bridge_iflist *bif)
1980 {
1981 if (bif->bif_flags & IFBIF_STP) {
1982 switch (bif->bif_state) {
1983 case BSTP_IFSTATE_BLOCKING:
1984 case BSTP_IFSTATE_LISTENING:
1985 case BSTP_IFSTATE_DISABLED:
1986 return true;
1987 }
1988 }
1989 return false;
1990 }
1991
1992 static bool
1993 bridge_ourether(struct bridge_iflist *bif, struct ether_header *eh, int src)
1994 {
1995 uint8_t *ether = src ? eh->ether_shost : eh->ether_dhost;
1996
1997 if (memcmp(CLLADDR(bif->bif_ifp->if_sadl), ether, ETHER_ADDR_LEN) == 0
1998 #if NCARP > 0
1999 || (bif->bif_ifp->if_carp &&
2000 carp_ourether(bif->bif_ifp->if_carp, eh, IFT_ETHER, src) != NULL)
2001 #endif /* NCARP > 0 */
2002 )
2003 return true;
2004
2005 return false;
2006 }
2007
2008 /*
2009 * bridge_input:
2010 *
2011 * Receive input from a member interface. Queue the packet for
2012 * bridging if it is not for us.
2013 */
2014 static void
2015 bridge_input(struct ifnet *ifp, struct mbuf *m)
2016 {
2017 struct bridge_softc *sc = ifp->if_bridge;
2018 struct bridge_iflist *bif;
2019 struct ether_header *eh;
2020 struct psref psref;
2021 int bound;
2022 DECLARE_LOCK_VARIABLE;
2023
2024 KASSERT(!cpu_intr_p());
2025
2026 if (__predict_false(sc == NULL) ||
2027 (sc->sc_if.if_flags & IFF_RUNNING) == 0) {
2028 ACQUIRE_GLOBAL_LOCKS();
2029 ether_input(ifp, m);
2030 RELEASE_GLOBAL_LOCKS();
2031 return;
2032 }
2033
2034 bound = curlwp_bind();
2035 bif = bridge_lookup_member_if(sc, ifp, &psref);
2036 if (bif == NULL) {
2037 curlwp_bindx(bound);
2038 ACQUIRE_GLOBAL_LOCKS();
2039 ether_input(ifp, m);
2040 RELEASE_GLOBAL_LOCKS();
2041 return;
2042 }
2043
2044 eh = mtod(m, struct ether_header *);
2045
2046 if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
2047 if (memcmp(etherbroadcastaddr,
2048 eh->ether_dhost, ETHER_ADDR_LEN) == 0)
2049 m->m_flags |= M_BCAST;
2050 else
2051 m->m_flags |= M_MCAST;
2052 }
2053
2054 /*
2055 * A 'fast' path for packets addressed to interfaces that are
2056 * part of this bridge.
2057 */
2058 if (!(m->m_flags & (M_BCAST|M_MCAST)) &&
2059 !bstp_state_before_learning(bif)) {
2060 struct bridge_iflist *_bif;
2061 struct ifnet *_ifp = NULL;
2062 int s;
2063 struct psref _psref;
2064
2065 BRIDGE_PSZ_RENTER(s);
2066 BRIDGE_IFLIST_READER_FOREACH(_bif, sc) {
2067 /* It is destined for us. */
2068 if (bridge_ourether(_bif, eh, 0)) {
2069 bridge_acquire_member(sc, _bif, &_psref);
2070 BRIDGE_PSZ_REXIT(s);
2071 if (_bif->bif_flags & IFBIF_LEARNING)
2072 (void) bridge_rtupdate(sc,
2073 eh->ether_shost, ifp, 0, IFBAF_DYNAMIC);
2074 m_set_rcvif(m, _bif->bif_ifp);
2075 _ifp = _bif->bif_ifp;
2076 bridge_release_member(sc, _bif, &_psref);
2077 goto out;
2078 }
2079
2080 /* We just received a packet that we sent out. */
2081 if (bridge_ourether(_bif, eh, 1))
2082 break;
2083 }
2084 BRIDGE_PSZ_REXIT(s);
2085 out:
2086
2087 if (_bif != NULL) {
2088 bridge_release_member(sc, bif, &psref);
2089 curlwp_bindx(bound);
2090 if (_ifp != NULL) {
2091 m->m_flags &= ~M_PROMISC;
2092 ACQUIRE_GLOBAL_LOCKS();
2093 ether_input(_ifp, m);
2094 RELEASE_GLOBAL_LOCKS();
2095 } else
2096 m_freem(m);
2097 return;
2098 }
2099 }
2100
2101 /* Tap off 802.1D packets; they do not get forwarded. */
2102 if (bif->bif_flags & IFBIF_STP &&
2103 memcmp(eh->ether_dhost, bstp_etheraddr, ETHER_ADDR_LEN) == 0) {
2104 bstp_input(sc, bif, m);
2105 bridge_release_member(sc, bif, &psref);
2106 curlwp_bindx(bound);
2107 return;
2108 }
2109
2110 /*
2111 * A normal switch would discard the packet here, but that's not what
2112 * we've done historically. This also prevents some obnoxious behaviour.
2113 */
2114 if (bstp_state_before_learning(bif)) {
2115 bridge_release_member(sc, bif, &psref);
2116 curlwp_bindx(bound);
2117 ACQUIRE_GLOBAL_LOCKS();
2118 ether_input(ifp, m);
2119 RELEASE_GLOBAL_LOCKS();
2120 return;
2121 }
2122
2123 bridge_release_member(sc, bif, &psref);
2124
2125 bridge_forward(sc, m);
2126
2127 curlwp_bindx(bound);
2128 }
2129
2130 /*
2131 * bridge_broadcast:
2132 *
2133 * Send a frame to all interfaces that are members of
2134 * the bridge, except for the one on which the packet
2135 * arrived.
2136 */
2137 static void
2138 bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if,
2139 bool src_if_protected, struct mbuf *m)
2140 {
2141 struct bridge_iflist *bif;
2142 struct mbuf *mc;
2143 struct ifnet *dst_if;
2144 bool bmcast;
2145 int s;
2146 DECLARE_LOCK_VARIABLE;
2147
2148 bmcast = m->m_flags & (M_BCAST|M_MCAST);
2149
2150 BRIDGE_PSZ_RENTER(s);
2151 BRIDGE_IFLIST_READER_FOREACH(bif, sc) {
2152 struct psref psref;
2153
2154 bridge_acquire_member(sc, bif, &psref);
2155 BRIDGE_PSZ_REXIT(s);
2156
2157 dst_if = bif->bif_ifp;
2158
2159 if (bif->bif_flags & IFBIF_STP) {
2160 switch (bif->bif_state) {
2161 case BSTP_IFSTATE_BLOCKING:
2162 case BSTP_IFSTATE_DISABLED:
2163 goto next;
2164 }
2165 }
2166
2167 if ((bif->bif_flags & IFBIF_DISCOVER) == 0 && !bmcast)
2168 goto next;
2169
2170 if ((dst_if->if_flags & IFF_RUNNING) == 0)
2171 goto next;
2172
2173 if (dst_if != src_if) {
2174 if ((bif->bif_flags & IFBIF_PROTECTED) &&
2175 src_if_protected) {
2176 goto next;
2177 }
2178
2179 mc = m_copypacket(m, M_DONTWAIT);
2180 if (mc == NULL) {
2181 if_statinc(&sc->sc_if, if_oerrors);
2182 goto next;
2183 }
2184 /*
2185 * Before enqueueing this packet to the destination
2186 * interface, clear any in-bound checksum flags to
2187 * prevent them from being misused as out-bound flags.
2188 */
2189 mc->m_pkthdr.csum_flags = 0;
2190
2191 ACQUIRE_GLOBAL_LOCKS();
2192 bridge_enqueue(sc, dst_if, mc, 1);
2193 RELEASE_GLOBAL_LOCKS();
2194 }
2195
2196 if (bmcast) {
2197 mc = m_copypacket(m, M_DONTWAIT);
2198 if (mc == NULL) {
2199 if_statinc(&sc->sc_if, if_oerrors);
2200 goto next;
2201 }
2202 /*
2203 * Before enqueueing this packet to the destination
2204 * interface, clear any in-bound checksum flags to
2205 * prevent them from being misused as out-bound flags.
2206 */
2207 mc->m_pkthdr.csum_flags = 0;
2208
2209 m_set_rcvif(mc, dst_if);
2210 mc->m_flags &= ~M_PROMISC;
2211
2212 ACQUIRE_GLOBAL_LOCKS();
2213 ether_input(dst_if, mc);
2214 RELEASE_GLOBAL_LOCKS();
2215 }
2216 next:
2217 BRIDGE_PSZ_RENTER(s);
2218 bridge_release_member(sc, bif, &psref);
2219 }
2220 BRIDGE_PSZ_REXIT(s);
2221
2222 m_freem(m);
2223 }
2224
2225 static int
2226 bridge_rtalloc(struct bridge_softc *sc, const uint8_t *dst,
2227 struct bridge_rtnode **brtp)
2228 {
2229 struct bridge_rtnode *brt;
2230 int error;
2231
2232 if (sc->sc_brtcnt >= sc->sc_brtmax)
2233 return ENOSPC;
2234
2235 /*
2236 * Allocate a new bridge forwarding node, and
2237 * initialize the expiration time and Ethernet
2238 * address.
2239 */
2240 brt = pool_get(&bridge_rtnode_pool, PR_NOWAIT);
2241 if (brt == NULL)
2242 return ENOMEM;
2243
2244 memset(brt, 0, sizeof(*brt));
2245 brt->brt_expire = time_uptime + sc->sc_brttimeout;
2246 brt->brt_flags = IFBAF_DYNAMIC;
2247 memcpy(brt->brt_addr, dst, ETHER_ADDR_LEN);
2248 PSLIST_ENTRY_INIT(brt, brt_list);
2249 PSLIST_ENTRY_INIT(brt, brt_hash);
2250
2251 BRIDGE_RT_LOCK(sc);
2252 error = bridge_rtnode_insert(sc, brt);
2253 BRIDGE_RT_UNLOCK(sc);
2254
2255 if (error != 0) {
2256 pool_put(&bridge_rtnode_pool, brt);
2257 return error;
2258 }
2259
2260 *brtp = brt;
2261 return 0;
2262 }
2263
2264 /*
2265 * bridge_rtupdate:
2266 *
2267 * Add a bridge routing entry.
2268 */
2269 static int
2270 bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst,
2271 struct ifnet *dst_if, int setflags, uint8_t flags)
2272 {
2273 struct bridge_rtnode *brt;
2274 int s;
2275
2276 again:
2277 /*
2278 * A route for this destination might already exist. If so,
2279 * update it, otherwise create a new one.
2280 */
2281 BRIDGE_RT_RENTER(s);
2282 brt = bridge_rtnode_lookup(sc, dst);
2283
2284 if (brt != NULL) {
2285 brt->brt_ifp = dst_if;
2286 if (setflags) {
2287 brt->brt_flags = flags;
2288 if (flags & IFBAF_STATIC)
2289 brt->brt_expire = 0;
2290 else
2291 brt->brt_expire = time_uptime + sc->sc_brttimeout;
2292 } else {
2293 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
2294 brt->brt_expire = time_uptime + sc->sc_brttimeout;
2295 }
2296 }
2297 BRIDGE_RT_REXIT(s);
2298
2299 if (brt == NULL) {
2300 int r;
2301
2302 r = bridge_rtalloc(sc, dst, &brt);
2303 if (r != 0)
2304 return r;
2305 goto again;
2306 }
2307
2308 return 0;
2309 }
2310
2311 /*
2312 * bridge_rtlookup:
2313 *
2314 * Lookup the destination interface for an address.
2315 */
2316 static struct ifnet *
2317 bridge_rtlookup(struct bridge_softc *sc, const uint8_t *addr)
2318 {
2319 struct bridge_rtnode *brt;
2320 struct ifnet *ifs = NULL;
2321 int s;
2322
2323 BRIDGE_RT_RENTER(s);
2324 brt = bridge_rtnode_lookup(sc, addr);
2325 if (brt != NULL)
2326 ifs = brt->brt_ifp;
2327 BRIDGE_RT_REXIT(s);
2328
2329 return ifs;
2330 }
2331
2332 typedef bool (*bridge_iterate_cb_t)
2333 (struct bridge_softc *, struct bridge_rtnode *, bool *, void *);
2334
2335 /*
2336 * bridge_rtlist_iterate_remove:
2337 *
2338 * It iterates on sc->sc_rtlist and removes rtnodes of it which func
2339 * callback judges to remove. Removals of rtnodes are done in a manner
2340 * of pserialize. To this end, all kmem_* operations are placed out of
2341 * mutexes.
2342 */
2343 static void
2344 bridge_rtlist_iterate_remove(struct bridge_softc *sc, bridge_iterate_cb_t func, void *arg)
2345 {
2346 struct bridge_rtnode *brt;
2347 struct bridge_rtnode **brt_list;
2348 int i, count;
2349
2350 retry:
2351 count = sc->sc_brtcnt;
2352 if (count == 0)
2353 return;
2354 brt_list = kmem_alloc(sizeof(*brt_list) * count, KM_SLEEP);
2355
2356 BRIDGE_RT_LOCK(sc);
2357 if (__predict_false(sc->sc_brtcnt > count)) {
2358 /* The rtnodes increased, we need more memory */
2359 BRIDGE_RT_UNLOCK(sc);
2360 kmem_free(brt_list, sizeof(*brt_list) * count);
2361 goto retry;
2362 }
2363
2364 i = 0;
2365 /*
2366 * We don't need to use a _SAFE variant here because we know
2367 * that a removed item keeps its next pointer as-is thanks to
2368 * pslist(9) and isn't freed in the loop.
2369 */
2370 BRIDGE_RTLIST_WRITER_FOREACH(brt, sc) {
2371 bool need_break = false;
2372 if (func(sc, brt, &need_break, arg)) {
2373 bridge_rtnode_remove(sc, brt);
2374 brt_list[i++] = brt;
2375 }
2376 if (need_break)
2377 break;
2378 }
2379
2380 if (i > 0)
2381 BRIDGE_RT_PSZ_PERFORM(sc);
2382 BRIDGE_RT_UNLOCK(sc);
2383
2384 while (--i >= 0)
2385 bridge_rtnode_destroy(brt_list[i]);
2386
2387 kmem_free(brt_list, sizeof(*brt_list) * count);
2388 }
2389
2390 static bool
2391 bridge_rttrim0_cb(struct bridge_softc *sc, struct bridge_rtnode *brt,
2392 bool *need_break, void *arg)
2393 {
2394 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
2395 /* Take into account of the subsequent removal */
2396 if ((sc->sc_brtcnt - 1) <= sc->sc_brtmax)
2397 *need_break = true;
2398 return true;
2399 } else
2400 return false;
2401 }
2402
2403 static void
2404 bridge_rttrim0(struct bridge_softc *sc)
2405 {
2406 bridge_rtlist_iterate_remove(sc, bridge_rttrim0_cb, NULL);
2407 }
2408
2409 /*
2410 * bridge_rttrim:
2411 *
2412 * Trim the routine table so that we have a number
2413 * of routing entries less than or equal to the
2414 * maximum number.
2415 */
2416 static void
2417 bridge_rttrim(struct bridge_softc *sc)
2418 {
2419
2420 /* Make sure we actually need to do this. */
2421 if (sc->sc_brtcnt <= sc->sc_brtmax)
2422 return;
2423
2424 /* Force an aging cycle; this might trim enough addresses. */
2425 bridge_rtage(sc);
2426 if (sc->sc_brtcnt <= sc->sc_brtmax)
2427 return;
2428
2429 bridge_rttrim0(sc);
2430
2431 return;
2432 }
2433
2434 /*
2435 * bridge_timer:
2436 *
2437 * Aging timer for the bridge.
2438 */
2439 static void
2440 bridge_timer(void *arg)
2441 {
2442 struct bridge_softc *sc = arg;
2443
2444 workqueue_enqueue(sc->sc_rtage_wq, &sc->sc_rtage_wk, NULL);
2445 }
2446
2447 static void
2448 bridge_rtage_work(struct work *wk, void *arg)
2449 {
2450 struct bridge_softc *sc = arg;
2451
2452 KASSERT(wk == &sc->sc_rtage_wk);
2453
2454 bridge_rtage(sc);
2455
2456 BRIDGE_LOCK(sc);
2457 if (!sc->sc_stopping) {
2458 callout_reset(&sc->sc_brcallout,
2459 bridge_rtable_prune_period * hz, bridge_timer, sc);
2460 }
2461 BRIDGE_UNLOCK(sc);
2462 }
2463
2464 static bool
2465 bridge_rtage_cb(struct bridge_softc *sc, struct bridge_rtnode *brt,
2466 bool *need_break, void *arg)
2467 {
2468 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
2469 time_uptime >= brt->brt_expire)
2470 return true;
2471 else
2472 return false;
2473 }
2474
2475 /*
2476 * bridge_rtage:
2477 *
2478 * Perform an aging cycle.
2479 */
2480 static void
2481 bridge_rtage(struct bridge_softc *sc)
2482 {
2483 bridge_rtlist_iterate_remove(sc, bridge_rtage_cb, NULL);
2484 }
2485
2486
2487 static bool
2488 bridge_rtflush_cb(struct bridge_softc *sc, struct bridge_rtnode *brt,
2489 bool *need_break, void *arg)
2490 {
2491 int full = *(int*)arg;
2492
2493 if (full || (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
2494 return true;
2495 else
2496 return false;
2497 }
2498
2499 /*
2500 * bridge_rtflush:
2501 *
2502 * Remove all dynamic addresses from the bridge.
2503 */
2504 static void
2505 bridge_rtflush(struct bridge_softc *sc, int full)
2506 {
2507 bridge_rtlist_iterate_remove(sc, bridge_rtflush_cb, &full);
2508 }
2509
2510 /*
2511 * bridge_rtdaddr:
2512 *
2513 * Remove an address from the table.
2514 */
2515 static int
2516 bridge_rtdaddr(struct bridge_softc *sc, const uint8_t *addr)
2517 {
2518 struct bridge_rtnode *brt;
2519
2520 BRIDGE_RT_LOCK(sc);
2521 if ((brt = bridge_rtnode_lookup(sc, addr)) == NULL) {
2522 BRIDGE_RT_UNLOCK(sc);
2523 return ENOENT;
2524 }
2525 bridge_rtnode_remove(sc, brt);
2526 BRIDGE_RT_PSZ_PERFORM(sc);
2527 BRIDGE_RT_UNLOCK(sc);
2528
2529 bridge_rtnode_destroy(brt);
2530
2531 return 0;
2532 }
2533
2534 /*
2535 * bridge_rtdelete:
2536 *
2537 * Delete routes to a speicifc member interface.
2538 */
2539 static void
2540 bridge_rtdelete(struct bridge_softc *sc, struct ifnet *ifp)
2541 {
2542 struct bridge_rtnode *brt;
2543
2544 /* XXX pserialize_perform for each entry is slow */
2545 again:
2546 BRIDGE_RT_LOCK(sc);
2547 BRIDGE_RTLIST_WRITER_FOREACH(brt, sc) {
2548 if (brt->brt_ifp == ifp)
2549 break;
2550 }
2551 if (brt == NULL) {
2552 BRIDGE_RT_UNLOCK(sc);
2553 return;
2554 }
2555 bridge_rtnode_remove(sc, brt);
2556 BRIDGE_RT_PSZ_PERFORM(sc);
2557 BRIDGE_RT_UNLOCK(sc);
2558
2559 bridge_rtnode_destroy(brt);
2560
2561 goto again;
2562 }
2563
2564 /*
2565 * bridge_rtable_init:
2566 *
2567 * Initialize the route table for this bridge.
2568 */
2569 static void
2570 bridge_rtable_init(struct bridge_softc *sc)
2571 {
2572 int i;
2573
2574 sc->sc_rthash = kmem_alloc(sizeof(*sc->sc_rthash) * BRIDGE_RTHASH_SIZE,
2575 KM_SLEEP);
2576
2577 for (i = 0; i < BRIDGE_RTHASH_SIZE; i++)
2578 PSLIST_INIT(&sc->sc_rthash[i]);
2579
2580 sc->sc_rthash_key = cprng_fast32();
2581
2582 PSLIST_INIT(&sc->sc_rtlist);
2583
2584 sc->sc_rtlist_psz = pserialize_create();
2585 sc->sc_rtlist_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET);
2586 }
2587
2588 /*
2589 * bridge_rtable_fini:
2590 *
2591 * Deconstruct the route table for this bridge.
2592 */
2593 static void
2594 bridge_rtable_fini(struct bridge_softc *sc)
2595 {
2596
2597 kmem_free(sc->sc_rthash, sizeof(*sc->sc_rthash) * BRIDGE_RTHASH_SIZE);
2598 mutex_obj_free(sc->sc_rtlist_lock);
2599 pserialize_destroy(sc->sc_rtlist_psz);
2600 }
2601
2602 /*
2603 * The following hash function is adapted from "Hash Functions" by Bob Jenkins
2604 * ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
2605 */
2606 #define mix(a, b, c) \
2607 do { \
2608 a -= b; a -= c; a ^= (c >> 13); \
2609 b -= c; b -= a; b ^= (a << 8); \
2610 c -= a; c -= b; c ^= (b >> 13); \
2611 a -= b; a -= c; a ^= (c >> 12); \
2612 b -= c; b -= a; b ^= (a << 16); \
2613 c -= a; c -= b; c ^= (b >> 5); \
2614 a -= b; a -= c; a ^= (c >> 3); \
2615 b -= c; b -= a; b ^= (a << 10); \
2616 c -= a; c -= b; c ^= (b >> 15); \
2617 } while (/*CONSTCOND*/0)
2618
2619 static inline uint32_t
2620 bridge_rthash(struct bridge_softc *sc, const uint8_t *addr)
2621 {
2622 uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = sc->sc_rthash_key;
2623
2624 b += addr[5] << 8;
2625 b += addr[4];
2626 a += (uint32_t)addr[3] << 24;
2627 a += addr[2] << 16;
2628 a += addr[1] << 8;
2629 a += addr[0];
2630
2631 mix(a, b, c);
2632
2633 return (c & BRIDGE_RTHASH_MASK);
2634 }
2635
2636 #undef mix
2637
2638 /*
2639 * bridge_rtnode_lookup:
2640 *
2641 * Look up a bridge route node for the specified destination.
2642 */
2643 static struct bridge_rtnode *
2644 bridge_rtnode_lookup(struct bridge_softc *sc, const uint8_t *addr)
2645 {
2646 struct bridge_rtnode *brt;
2647 uint32_t hash;
2648 int dir;
2649
2650 hash = bridge_rthash(sc, addr);
2651 BRIDGE_RTHASH_READER_FOREACH(brt, sc, hash) {
2652 dir = memcmp(addr, brt->brt_addr, ETHER_ADDR_LEN);
2653 if (dir == 0)
2654 return brt;
2655 if (dir > 0)
2656 return NULL;
2657 }
2658
2659 return NULL;
2660 }
2661
2662 /*
2663 * bridge_rtnode_insert:
2664 *
2665 * Insert the specified bridge node into the route table. We
2666 * assume the entry is not already in the table.
2667 */
2668 static int
2669 bridge_rtnode_insert(struct bridge_softc *sc, struct bridge_rtnode *brt)
2670 {
2671 struct bridge_rtnode *lbrt, *prev = NULL;
2672 uint32_t hash;
2673
2674 KASSERT(BRIDGE_RT_LOCKED(sc));
2675
2676 hash = bridge_rthash(sc, brt->brt_addr);
2677 BRIDGE_RTHASH_WRITER_FOREACH(lbrt, sc, hash) {
2678 int dir = memcmp(brt->brt_addr, lbrt->brt_addr, ETHER_ADDR_LEN);
2679 if (dir == 0)
2680 return EEXIST;
2681 if (dir > 0)
2682 break;
2683 prev = lbrt;
2684 }
2685 if (prev == NULL)
2686 BRIDGE_RTHASH_WRITER_INSERT_HEAD(sc, hash, brt);
2687 else
2688 BRIDGE_RTHASH_WRITER_INSERT_AFTER(prev, brt);
2689
2690 BRIDGE_RTLIST_WRITER_INSERT_HEAD(sc, brt);
2691 sc->sc_brtcnt++;
2692
2693 return 0;
2694 }
2695
2696 /*
2697 * bridge_rtnode_remove:
2698 *
2699 * Remove a bridge rtnode from the rthash and the rtlist of a bridge.
2700 */
2701 static void
2702 bridge_rtnode_remove(struct bridge_softc *sc, struct bridge_rtnode *brt)
2703 {
2704
2705 KASSERT(BRIDGE_RT_LOCKED(sc));
2706
2707 BRIDGE_RTHASH_WRITER_REMOVE(brt);
2708 BRIDGE_RTLIST_WRITER_REMOVE(brt);
2709 sc->sc_brtcnt--;
2710 }
2711
2712 /*
2713 * bridge_rtnode_destroy:
2714 *
2715 * Destroy a bridge rtnode.
2716 */
2717 static void
2718 bridge_rtnode_destroy(struct bridge_rtnode *brt)
2719 {
2720
2721 PSLIST_ENTRY_DESTROY(brt, brt_list);
2722 PSLIST_ENTRY_DESTROY(brt, brt_hash);
2723 pool_put(&bridge_rtnode_pool, brt);
2724 }
2725
2726 extern pfil_head_t *inet_pfil_hook; /* XXX */
2727 extern pfil_head_t *inet6_pfil_hook; /* XXX */
2728
2729 /*
2730 * Send bridge packets through IPF if they are one of the types IPF can deal
2731 * with, or if they are ARP or REVARP. (IPF will pass ARP and REVARP without
2732 * question.)
2733 */
2734 static int
2735 bridge_ipf(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
2736 {
2737 int snap, error;
2738 struct ether_header *eh1, eh2;
2739 struct llc llc1;
2740 uint16_t ether_type;
2741
2742 snap = 0;
2743 error = -1; /* Default error if not error == 0 */
2744 eh1 = mtod(*mp, struct ether_header *);
2745 ether_type = ntohs(eh1->ether_type);
2746
2747 /*
2748 * Check for SNAP/LLC.
2749 */
2750 if (ether_type < ETHERMTU) {
2751 struct llc *llc2 = (struct llc *)(eh1 + 1);
2752
2753 if ((*mp)->m_len >= ETHER_HDR_LEN + 8 &&
2754 llc2->llc_dsap == LLC_SNAP_LSAP &&
2755 llc2->llc_ssap == LLC_SNAP_LSAP &&
2756 llc2->llc_control == LLC_UI) {
2757 ether_type = htons(llc2->llc_un.type_snap.ether_type);
2758 snap = 1;
2759 }
2760 }
2761
2762 /* drop VLAN traffic untagged by hardware offloading */
2763 if (vlan_has_tag(*mp))
2764 goto bad;
2765
2766 /*
2767 * If we're trying to filter bridge traffic, don't look at anything
2768 * other than IP and ARP traffic. If the filter doesn't understand
2769 * IPv6, don't allow IPv6 through the bridge either. This is lame
2770 * since if we really wanted, say, an AppleTalk filter, we are hosed,
2771 * but of course we don't have an AppleTalk filter to begin with.
2772 * (Note that since IPF doesn't understand ARP it will pass *ALL*
2773 * ARP traffic.)
2774 */
2775 switch (ether_type) {
2776 case ETHERTYPE_ARP:
2777 case ETHERTYPE_REVARP:
2778 return 0; /* Automatically pass */
2779 case ETHERTYPE_IP:
2780 # ifdef INET6
2781 case ETHERTYPE_IPV6:
2782 # endif /* INET6 */
2783 break;
2784 default:
2785 goto bad;
2786 }
2787
2788 /* Strip off the Ethernet header and keep a copy. */
2789 m_copydata(*mp, 0, ETHER_HDR_LEN, (void *) &eh2);
2790 m_adj(*mp, ETHER_HDR_LEN);
2791
2792 /* Strip off snap header, if present */
2793 if (snap) {
2794 m_copydata(*mp, 0, sizeof(struct llc), (void *) &llc1);
2795 m_adj(*mp, sizeof(struct llc));
2796 }
2797
2798 /*
2799 * Check basic packet sanity and run IPF through pfil.
2800 */
2801 KASSERT(!cpu_intr_p());
2802 switch (ether_type)
2803 {
2804 case ETHERTYPE_IP :
2805 error = bridge_ip_checkbasic(mp);
2806 if (error == 0)
2807 error = pfil_run_hooks(inet_pfil_hook, mp, ifp, dir);
2808 break;
2809 # ifdef INET6
2810 case ETHERTYPE_IPV6 :
2811 error = bridge_ip6_checkbasic(mp);
2812 if (error == 0)
2813 error = pfil_run_hooks(inet6_pfil_hook, mp, ifp, dir);
2814 break;
2815 # endif
2816 default :
2817 error = 0;
2818 break;
2819 }
2820
2821 if (*mp == NULL)
2822 return error;
2823 if (error != 0)
2824 goto bad;
2825
2826 error = -1;
2827
2828 /*
2829 * Finally, put everything back the way it was and return
2830 */
2831 if (snap) {
2832 M_PREPEND(*mp, sizeof(struct llc), M_DONTWAIT);
2833 if (*mp == NULL)
2834 return error;
2835 bcopy(&llc1, mtod(*mp, void *), sizeof(struct llc));
2836 }
2837
2838 M_PREPEND(*mp, ETHER_HDR_LEN, M_DONTWAIT);
2839 if (*mp == NULL)
2840 return error;
2841 bcopy(&eh2, mtod(*mp, void *), ETHER_HDR_LEN);
2842
2843 return 0;
2844
2845 bad:
2846 m_freem(*mp);
2847 *mp = NULL;
2848 return error;
2849 }
2850
2851 /*
2852 * Perform basic checks on header size since
2853 * IPF assumes ip_input has already processed
2854 * it for it. Cut-and-pasted from ip_input.c.
2855 * Given how simple the IPv6 version is,
2856 * does the IPv4 version really need to be
2857 * this complicated?
2858 *
2859 * XXX Should we update ipstat here, or not?
2860 * XXX Right now we update ipstat but not
2861 * XXX csum_counter.
2862 */
2863 static int
2864 bridge_ip_checkbasic(struct mbuf **mp)
2865 {
2866 struct mbuf *m = *mp;
2867 struct ip *ip;
2868 int len, hlen;
2869
2870 if (*mp == NULL)
2871 return -1;
2872
2873 if (M_GET_ALIGNED_HDR(&m, struct ip, true) != 0) {
2874 /* XXXJRT new stat, please */
2875 ip_statinc(IP_STAT_TOOSMALL);
2876 goto bad;
2877 }
2878 ip = mtod(m, struct ip *);
2879 if (ip == NULL) goto bad;
2880
2881 if (ip->ip_v != IPVERSION) {
2882 ip_statinc(IP_STAT_BADVERS);
2883 goto bad;
2884 }
2885 hlen = ip->ip_hl << 2;
2886 if (hlen < sizeof(struct ip)) { /* minimum header length */
2887 ip_statinc(IP_STAT_BADHLEN);
2888 goto bad;
2889 }
2890 if (hlen > m->m_len) {
2891 if ((m = m_pullup(m, hlen)) == 0) {
2892 ip_statinc(IP_STAT_BADHLEN);
2893 goto bad;
2894 }
2895 ip = mtod(m, struct ip *);
2896 if (ip == NULL) goto bad;
2897 }
2898
2899 switch (m->m_pkthdr.csum_flags &
2900 ((m_get_rcvif_NOMPSAFE(m)->if_csum_flags_rx & M_CSUM_IPv4) |
2901 M_CSUM_IPv4_BAD)) {
2902 case M_CSUM_IPv4|M_CSUM_IPv4_BAD:
2903 /* INET_CSUM_COUNTER_INCR(&ip_hwcsum_bad); */
2904 goto bad;
2905
2906 case M_CSUM_IPv4:
2907 /* Checksum was okay. */
2908 /* INET_CSUM_COUNTER_INCR(&ip_hwcsum_ok); */
2909 break;
2910
2911 default:
2912 /* Must compute it ourselves. */
2913 /* INET_CSUM_COUNTER_INCR(&ip_swcsum); */
2914 if (in_cksum(m, hlen) != 0)
2915 goto bad;
2916 break;
2917 }
2918
2919 /* Retrieve the packet length. */
2920 len = ntohs(ip->ip_len);
2921
2922 /*
2923 * Check for additional length bogosity
2924 */
2925 if (len < hlen) {
2926 ip_statinc(IP_STAT_BADLEN);
2927 goto bad;
2928 }
2929
2930 /*
2931 * Check that the amount of data in the buffers
2932 * is as at least much as the IP header would have us expect.
2933 * Drop packet if shorter than we expect.
2934 */
2935 if (m->m_pkthdr.len < len) {
2936 ip_statinc(IP_STAT_TOOSHORT);
2937 goto bad;
2938 }
2939
2940 /* Checks out, proceed */
2941 *mp = m;
2942 return 0;
2943
2944 bad:
2945 *mp = m;
2946 return -1;
2947 }
2948
2949 # ifdef INET6
2950 /*
2951 * Same as above, but for IPv6.
2952 * Cut-and-pasted from ip6_input.c.
2953 * XXX Should we update ip6stat, or not?
2954 */
2955 static int
2956 bridge_ip6_checkbasic(struct mbuf **mp)
2957 {
2958 struct mbuf *m = *mp;
2959 struct ip6_hdr *ip6;
2960
2961 /*
2962 * If the IPv6 header is not aligned, slurp it up into a new
2963 * mbuf with space for link headers, in the event we forward
2964 * it. Otherwise, if it is aligned, make sure the entire base
2965 * IPv6 header is in the first mbuf of the chain.
2966 */
2967 if (M_GET_ALIGNED_HDR(&m, struct ip6_hdr, true) != 0) {
2968 struct ifnet *inifp = m_get_rcvif_NOMPSAFE(m);
2969 /* XXXJRT new stat, please */
2970 ip6_statinc(IP6_STAT_TOOSMALL);
2971 in6_ifstat_inc(inifp, ifs6_in_hdrerr);
2972 goto bad;
2973 }
2974
2975 ip6 = mtod(m, struct ip6_hdr *);
2976
2977 if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
2978 ip6_statinc(IP6_STAT_BADVERS);
2979 in6_ifstat_inc(m_get_rcvif_NOMPSAFE(m), ifs6_in_hdrerr);
2980 goto bad;
2981 }
2982
2983 /* Checks out, proceed */
2984 *mp = m;
2985 return 0;
2986
2987 bad:
2988 *mp = m;
2989 return -1;
2990 }
2991 # endif /* INET6 */
2992