if_bridge.c revision 1.189.4.2 1 /* $NetBSD: if_bridge.c,v 1.189.4.2 2024/09/05 09:27:12 martin Exp $ */
2
3 /*
4 * Copyright 2001 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Copyright (c) 1999, 2000 Jason L. Wright (jason (at) thought.net)
40 * All rights reserved.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. All advertising materials mentioning features or use of this software
51 * must display the following acknowledgement:
52 * This product includes software developed by Jason L. Wright
53 * 4. The name of the author may not be used to endorse or promote products
54 * derived from this software without specific prior written permission.
55 *
56 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
57 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
58 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
59 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
60 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
61 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
62 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
64 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
65 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
66 * POSSIBILITY OF SUCH DAMAGE.
67 *
68 * OpenBSD: if_bridge.c,v 1.60 2001/06/15 03:38:33 itojun Exp
69 */
70
71 /*
72 * Network interface bridge support.
73 *
74 * TODO:
75 *
76 * - Currently only supports Ethernet-like interfaces (Ethernet,
77 * 802.11, VLANs on Ethernet, etc.) Figure out a nice way
78 * to bridge other types of interfaces (FDDI-FDDI, and maybe
79 * consider heterogenous bridges).
80 */
81
82 #include <sys/cdefs.h>
83 __KERNEL_RCSID(0, "$NetBSD: if_bridge.c,v 1.189.4.2 2024/09/05 09:27:12 martin Exp $");
84
85 #ifdef _KERNEL_OPT
86 #include "opt_inet.h"
87 #include "opt_net_mpsafe.h"
88 #endif /* _KERNEL_OPT */
89
90 #include <sys/param.h>
91 #include <sys/kernel.h>
92 #include <sys/mbuf.h>
93 #include <sys/queue.h>
94 #include <sys/socket.h>
95 #include <sys/socketvar.h> /* for softnet_lock */
96 #include <sys/sockio.h>
97 #include <sys/systm.h>
98 #include <sys/proc.h>
99 #include <sys/pool.h>
100 #include <sys/kauth.h>
101 #include <sys/cpu.h>
102 #include <sys/cprng.h>
103 #include <sys/mutex.h>
104 #include <sys/kmem.h>
105 #include <sys/syslog.h>
106
107 #include <net/bpf.h>
108 #include <net/if.h>
109 #include <net/if_dl.h>
110 #include <net/if_types.h>
111 #include <net/if_llc.h>
112
113 #include <net/if_ether.h>
114 #include <net/if_bridgevar.h>
115 #include <net/ether_sw_offload.h>
116
117 /* Used for bridge_ip[6]_checkbasic */
118 #include <netinet/in.h>
119 #include <netinet/in_systm.h>
120 #include <netinet/ip.h>
121 #include <netinet/ip_var.h>
122 #include <netinet/ip_private.h> /* XXX */
123 #include <netinet/ip6.h>
124 #include <netinet6/in6_var.h>
125 #include <netinet6/ip6_var.h>
126 #include <netinet6/ip6_private.h> /* XXX */
127
128 /*
129 * Size of the route hash table. Must be a power of two.
130 */
131 #ifndef BRIDGE_RTHASH_SIZE
132 #define BRIDGE_RTHASH_SIZE 1024
133 #endif
134
135 #define BRIDGE_RTHASH_MASK (BRIDGE_RTHASH_SIZE - 1)
136
137 #include "carp.h"
138 #if NCARP > 0
139 #include <netinet/in.h>
140 #include <netinet/in_var.h>
141 #include <netinet/ip_carp.h>
142 #endif
143
144 #include "ioconf.h"
145
146 __CTASSERT(sizeof(struct ifbifconf) == sizeof(struct ifbaconf));
147 __CTASSERT(offsetof(struct ifbifconf, ifbic_len) == offsetof(struct ifbaconf, ifbac_len));
148 __CTASSERT(offsetof(struct ifbifconf, ifbic_buf) == offsetof(struct ifbaconf, ifbac_buf));
149
150 /*
151 * Maximum number of addresses to cache.
152 */
153 #ifndef BRIDGE_RTABLE_MAX
154 #define BRIDGE_RTABLE_MAX 100
155 #endif
156
157 /*
158 * Spanning tree defaults.
159 */
160 #define BSTP_DEFAULT_MAX_AGE (20 * 256)
161 #define BSTP_DEFAULT_HELLO_TIME (2 * 256)
162 #define BSTP_DEFAULT_FORWARD_DELAY (15 * 256)
163 #define BSTP_DEFAULT_HOLD_TIME (1 * 256)
164 #define BSTP_DEFAULT_BRIDGE_PRIORITY 0x8000
165 #define BSTP_DEFAULT_PORT_PRIORITY 0x80
166 #define BSTP_DEFAULT_PATH_COST 55
167
168 /*
169 * Timeout (in seconds) for entries learned dynamically.
170 */
171 #ifndef BRIDGE_RTABLE_TIMEOUT
172 #define BRIDGE_RTABLE_TIMEOUT (20 * 60) /* same as ARP */
173 #endif
174
175 /*
176 * Number of seconds between walks of the route list.
177 */
178 #ifndef BRIDGE_RTABLE_PRUNE_PERIOD
179 #define BRIDGE_RTABLE_PRUNE_PERIOD (5 * 60)
180 #endif
181
182 #define BRIDGE_RT_LOCK(_sc) mutex_enter((_sc)->sc_rtlist_lock)
183 #define BRIDGE_RT_UNLOCK(_sc) mutex_exit((_sc)->sc_rtlist_lock)
184 #define BRIDGE_RT_LOCKED(_sc) mutex_owned((_sc)->sc_rtlist_lock)
185
186 #define BRIDGE_RT_PSZ_PERFORM(_sc) \
187 pserialize_perform((_sc)->sc_rtlist_psz)
188
189 #define BRIDGE_RT_RENTER(__s) do { __s = pserialize_read_enter(); } while (0)
190 #define BRIDGE_RT_REXIT(__s) do { pserialize_read_exit(__s); } while (0)
191
192 #define BRIDGE_RTLIST_READER_FOREACH(_brt, _sc) \
193 PSLIST_READER_FOREACH((_brt), &((_sc)->sc_rtlist), \
194 struct bridge_rtnode, brt_list)
195 #define BRIDGE_RTLIST_WRITER_FOREACH(_brt, _sc) \
196 PSLIST_WRITER_FOREACH((_brt), &((_sc)->sc_rtlist), \
197 struct bridge_rtnode, brt_list)
198 #define BRIDGE_RTLIST_WRITER_INSERT_HEAD(_sc, _brt) \
199 PSLIST_WRITER_INSERT_HEAD(&(_sc)->sc_rtlist, brt, brt_list)
200 #define BRIDGE_RTLIST_WRITER_REMOVE(_brt) \
201 PSLIST_WRITER_REMOVE((_brt), brt_list)
202
203 #define BRIDGE_RTHASH_READER_FOREACH(_brt, _sc, _hash) \
204 PSLIST_READER_FOREACH((_brt), &(_sc)->sc_rthash[(_hash)], \
205 struct bridge_rtnode, brt_hash)
206 #define BRIDGE_RTHASH_WRITER_FOREACH(_brt, _sc, _hash) \
207 PSLIST_WRITER_FOREACH((_brt), &(_sc)->sc_rthash[(_hash)], \
208 struct bridge_rtnode, brt_hash)
209 #define BRIDGE_RTHASH_WRITER_INSERT_HEAD(_sc, _hash, _brt) \
210 PSLIST_WRITER_INSERT_HEAD(&(_sc)->sc_rthash[(_hash)], brt, brt_hash)
211 #define BRIDGE_RTHASH_WRITER_INSERT_AFTER(_brt, _new) \
212 PSLIST_WRITER_INSERT_AFTER((_brt), (_new), brt_hash)
213 #define BRIDGE_RTHASH_WRITER_REMOVE(_brt) \
214 PSLIST_WRITER_REMOVE((_brt), brt_hash)
215
216 #ifdef NET_MPSAFE
217 #define DECLARE_LOCK_VARIABLE
218 #define ACQUIRE_GLOBAL_LOCKS() do { } while (0)
219 #define RELEASE_GLOBAL_LOCKS() do { } while (0)
220 #else
221 #define DECLARE_LOCK_VARIABLE int __s
222 #define ACQUIRE_GLOBAL_LOCKS() do { \
223 KERNEL_LOCK(1, NULL); \
224 mutex_enter(softnet_lock); \
225 __s = splsoftnet(); \
226 } while (0)
227 #define RELEASE_GLOBAL_LOCKS() do { \
228 splx(__s); \
229 mutex_exit(softnet_lock); \
230 KERNEL_UNLOCK_ONE(NULL); \
231 } while (0)
232 #endif
233
234 struct psref_class *bridge_psref_class __read_mostly;
235
236 int bridge_rtable_prune_period = BRIDGE_RTABLE_PRUNE_PERIOD;
237
238 static struct pool bridge_rtnode_pool;
239
240 static int bridge_clone_create(struct if_clone *, int);
241 static int bridge_clone_destroy(struct ifnet *);
242
243 static int bridge_ioctl(struct ifnet *, u_long, void *);
244 static int bridge_init(struct ifnet *);
245 static void bridge_stop(struct ifnet *, int);
246 static void bridge_start(struct ifnet *);
247 static void bridge_ifdetach(void *);
248
249 static void bridge_input(struct ifnet *, struct mbuf *);
250 static void bridge_forward(struct bridge_softc *, struct mbuf *);
251
252 static void bridge_timer(void *);
253
254 static void bridge_broadcast(struct bridge_softc *, struct ifnet *, bool,
255 struct mbuf *);
256
257 static int bridge_rtupdate(struct bridge_softc *, const uint8_t *,
258 struct ifnet *, int, uint8_t);
259 static struct ifnet *bridge_rtlookup(struct bridge_softc *, const uint8_t *);
260 static void bridge_rttrim(struct bridge_softc *);
261 static void bridge_rtage(struct bridge_softc *);
262 static void bridge_rtage_work(struct work *, void *);
263 static void bridge_rtflush(struct bridge_softc *, int);
264 static int bridge_rtdaddr(struct bridge_softc *, const uint8_t *);
265 static void bridge_rtdelete(struct bridge_softc *, struct ifnet *ifp);
266
267 static void bridge_rtable_init(struct bridge_softc *);
268 static void bridge_rtable_fini(struct bridge_softc *);
269
270 static struct bridge_rtnode *bridge_rtnode_lookup(struct bridge_softc *,
271 const uint8_t *);
272 static int bridge_rtnode_insert(struct bridge_softc *,
273 struct bridge_rtnode *);
274 static void bridge_rtnode_remove(struct bridge_softc *,
275 struct bridge_rtnode *);
276 static void bridge_rtnode_destroy(struct bridge_rtnode *);
277
278 static struct bridge_iflist *bridge_lookup_member(struct bridge_softc *,
279 const char *name,
280 struct psref *);
281 static struct bridge_iflist *bridge_lookup_member_if(struct bridge_softc *,
282 struct ifnet *ifp,
283 struct psref *);
284 static void bridge_release_member(struct bridge_softc *, struct bridge_iflist *,
285 struct psref *);
286 static void bridge_delete_member(struct bridge_softc *,
287 struct bridge_iflist *);
288 static void bridge_acquire_member(struct bridge_softc *sc,
289 struct bridge_iflist *,
290 struct psref *);
291
292 static int bridge_ioctl_add(struct bridge_softc *, void *);
293 static int bridge_ioctl_del(struct bridge_softc *, void *);
294 static int bridge_ioctl_gifflags(struct bridge_softc *, void *);
295 static int bridge_ioctl_sifflags(struct bridge_softc *, void *);
296 static int bridge_ioctl_scache(struct bridge_softc *, void *);
297 static int bridge_ioctl_gcache(struct bridge_softc *, void *);
298 static int bridge_ioctl_gifs(struct bridge_softc *, void *);
299 static int bridge_ioctl_rts(struct bridge_softc *, void *);
300 static int bridge_ioctl_saddr(struct bridge_softc *, void *);
301 static int bridge_ioctl_sto(struct bridge_softc *, void *);
302 static int bridge_ioctl_gto(struct bridge_softc *, void *);
303 static int bridge_ioctl_daddr(struct bridge_softc *, void *);
304 static int bridge_ioctl_flush(struct bridge_softc *, void *);
305 static int bridge_ioctl_gpri(struct bridge_softc *, void *);
306 static int bridge_ioctl_spri(struct bridge_softc *, void *);
307 static int bridge_ioctl_ght(struct bridge_softc *, void *);
308 static int bridge_ioctl_sht(struct bridge_softc *, void *);
309 static int bridge_ioctl_gfd(struct bridge_softc *, void *);
310 static int bridge_ioctl_sfd(struct bridge_softc *, void *);
311 static int bridge_ioctl_gma(struct bridge_softc *, void *);
312 static int bridge_ioctl_sma(struct bridge_softc *, void *);
313 static int bridge_ioctl_sifprio(struct bridge_softc *, void *);
314 static int bridge_ioctl_sifcost(struct bridge_softc *, void *);
315 static int bridge_ioctl_gfilt(struct bridge_softc *, void *);
316 static int bridge_ioctl_sfilt(struct bridge_softc *, void *);
317 static int bridge_ipf(void *, struct mbuf **, struct ifnet *, int);
318 static int bridge_ip_checkbasic(struct mbuf **mp);
319 # ifdef INET6
320 static int bridge_ip6_checkbasic(struct mbuf **mp);
321 # endif /* INET6 */
322
323 struct bridge_control {
324 int (*bc_func)(struct bridge_softc *, void *);
325 int bc_argsize;
326 int bc_flags;
327 };
328
329 #define BC_F_COPYIN 0x01 /* copy arguments in */
330 #define BC_F_COPYOUT 0x02 /* copy arguments out */
331 #define BC_F_SUSER 0x04 /* do super-user check */
332 #define BC_F_XLATEIN 0x08 /* xlate arguments in */
333 #define BC_F_XLATEOUT 0x10 /* xlate arguments out */
334
335 static const struct bridge_control bridge_control_table[] = {
336 [BRDGADD] = {bridge_ioctl_add, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
337 [BRDGDEL] = {bridge_ioctl_del, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
338
339 [BRDGGIFFLGS] = {bridge_ioctl_gifflags, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_COPYOUT},
340 [BRDGSIFFLGS] = {bridge_ioctl_sifflags, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
341
342 [BRDGSCACHE] = {bridge_ioctl_scache, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
343 [BRDGGCACHE] = {bridge_ioctl_gcache, sizeof(struct ifbrparam), BC_F_COPYOUT},
344
345 [OBRDGGIFS] = {bridge_ioctl_gifs, sizeof(struct ifbifconf), BC_F_COPYIN|BC_F_COPYOUT},
346 [OBRDGRTS] = {bridge_ioctl_rts, sizeof(struct ifbaconf), BC_F_COPYIN|BC_F_COPYOUT},
347
348 [BRDGSADDR] = {bridge_ioctl_saddr, sizeof(struct ifbareq), BC_F_COPYIN|BC_F_SUSER},
349
350 [BRDGSTO] = {bridge_ioctl_sto, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
351 [BRDGGTO] = {bridge_ioctl_gto, sizeof(struct ifbrparam), BC_F_COPYOUT},
352
353 [BRDGDADDR] = {bridge_ioctl_daddr, sizeof(struct ifbareq), BC_F_COPYIN|BC_F_SUSER},
354
355 [BRDGFLUSH] = {bridge_ioctl_flush, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
356
357 [BRDGGPRI] = {bridge_ioctl_gpri, sizeof(struct ifbrparam), BC_F_COPYOUT},
358 [BRDGSPRI] = {bridge_ioctl_spri, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
359
360 [BRDGGHT] = {bridge_ioctl_ght, sizeof(struct ifbrparam), BC_F_COPYOUT},
361 [BRDGSHT] = {bridge_ioctl_sht, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
362
363 [BRDGGFD] = {bridge_ioctl_gfd, sizeof(struct ifbrparam), BC_F_COPYOUT},
364 [BRDGSFD] = {bridge_ioctl_sfd, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
365
366 [BRDGGMA] = {bridge_ioctl_gma, sizeof(struct ifbrparam), BC_F_COPYOUT},
367 [BRDGSMA] = {bridge_ioctl_sma, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
368
369 [BRDGSIFPRIO] = {bridge_ioctl_sifprio, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
370
371 [BRDGSIFCOST] = {bridge_ioctl_sifcost, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
372
373 [BRDGGFILT] = {bridge_ioctl_gfilt, sizeof(struct ifbrparam), BC_F_COPYOUT},
374 [BRDGSFILT] = {bridge_ioctl_sfilt, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
375
376 [BRDGGIFS] = {bridge_ioctl_gifs, sizeof(struct ifbifconf), BC_F_XLATEIN|BC_F_XLATEOUT},
377 [BRDGRTS] = {bridge_ioctl_rts, sizeof(struct ifbaconf), BC_F_XLATEIN|BC_F_XLATEOUT},
378 };
379
380 static const int bridge_control_table_size = __arraycount(bridge_control_table);
381
382 static struct if_clone bridge_cloner =
383 IF_CLONE_INITIALIZER("bridge", bridge_clone_create, bridge_clone_destroy);
384
385 /*
386 * bridgeattach:
387 *
388 * Pseudo-device attach routine.
389 */
390 void
391 bridgeattach(int n)
392 {
393
394 pool_init(&bridge_rtnode_pool, sizeof(struct bridge_rtnode),
395 0, 0, 0, "brtpl", NULL, IPL_NET);
396
397 bridge_psref_class = psref_class_create("bridge", IPL_SOFTNET);
398
399 if_clone_attach(&bridge_cloner);
400 }
401
402 /*
403 * bridge_clone_create:
404 *
405 * Create a new bridge instance.
406 */
407 static int
408 bridge_clone_create(struct if_clone *ifc, int unit)
409 {
410 struct bridge_softc *sc;
411 struct ifnet *ifp;
412 int error;
413
414 sc = kmem_zalloc(sizeof(*sc), KM_SLEEP);
415 ifp = &sc->sc_if;
416
417 sc->sc_brtmax = BRIDGE_RTABLE_MAX;
418 sc->sc_brttimeout = BRIDGE_RTABLE_TIMEOUT;
419 sc->sc_bridge_max_age = BSTP_DEFAULT_MAX_AGE;
420 sc->sc_bridge_hello_time = BSTP_DEFAULT_HELLO_TIME;
421 sc->sc_bridge_forward_delay = BSTP_DEFAULT_FORWARD_DELAY;
422 sc->sc_bridge_priority = BSTP_DEFAULT_BRIDGE_PRIORITY;
423 sc->sc_hold_time = BSTP_DEFAULT_HOLD_TIME;
424 sc->sc_filter_flags = 0;
425
426 /* Initialize our routing table. */
427 bridge_rtable_init(sc);
428
429 error = workqueue_create(&sc->sc_rtage_wq, "bridge_rtage",
430 bridge_rtage_work, sc, PRI_SOFTNET, IPL_SOFTNET, WQ_MPSAFE);
431 if (error)
432 panic("%s: workqueue_create %d\n", __func__, error);
433
434 callout_init(&sc->sc_brcallout, CALLOUT_MPSAFE);
435 callout_init(&sc->sc_bstpcallout, CALLOUT_MPSAFE);
436
437 mutex_init(&sc->sc_iflist_psref.bip_lock, MUTEX_DEFAULT, IPL_NONE);
438 PSLIST_INIT(&sc->sc_iflist_psref.bip_iflist);
439 sc->sc_iflist_psref.bip_psz = pserialize_create();
440
441 if_initname(ifp, ifc->ifc_name, unit);
442 ifp->if_softc = sc;
443 #ifdef NET_MPSAFE
444 ifp->if_extflags = IFEF_MPSAFE;
445 #endif
446 ifp->if_mtu = ETHERMTU;
447 ifp->if_ioctl = bridge_ioctl;
448 ifp->if_output = bridge_output;
449 ifp->if_start = bridge_start;
450 ifp->if_stop = bridge_stop;
451 ifp->if_init = bridge_init;
452 ifp->if_type = IFT_BRIDGE;
453 ifp->if_addrlen = 0;
454 ifp->if_dlt = DLT_EN10MB;
455 ifp->if_hdrlen = ETHER_HDR_LEN;
456 if_initialize(ifp);
457
458 /*
459 * Set the link state to down.
460 * When interfaces are added the link state will reflect
461 * the best link state of the combined interfaces.
462 */
463 ifp->if_link_state = LINK_STATE_DOWN;
464
465 if_alloc_sadl(ifp);
466 if_register(ifp);
467
468 return 0;
469 }
470
471 /*
472 * bridge_clone_destroy:
473 *
474 * Destroy a bridge instance.
475 */
476 static int
477 bridge_clone_destroy(struct ifnet *ifp)
478 {
479 struct bridge_softc *sc = ifp->if_softc;
480 struct bridge_iflist *bif;
481
482 if ((ifp->if_flags & IFF_RUNNING) != 0)
483 bridge_stop(ifp, 1);
484
485 BRIDGE_LOCK(sc);
486 for (;;) {
487 bif = PSLIST_WRITER_FIRST(&sc->sc_iflist_psref.bip_iflist, struct bridge_iflist,
488 bif_next);
489 if (bif == NULL)
490 break;
491 bridge_delete_member(sc, bif);
492 }
493 PSLIST_DESTROY(&sc->sc_iflist_psref.bip_iflist);
494 BRIDGE_UNLOCK(sc);
495
496 if_detach(ifp);
497
498 /* Tear down the routing table. */
499 bridge_rtable_fini(sc);
500
501 pserialize_destroy(sc->sc_iflist_psref.bip_psz);
502 mutex_destroy(&sc->sc_iflist_psref.bip_lock);
503 callout_destroy(&sc->sc_brcallout);
504 callout_destroy(&sc->sc_bstpcallout);
505 workqueue_destroy(sc->sc_rtage_wq);
506 kmem_free(sc, sizeof(*sc));
507
508 return 0;
509 }
510
511 /*
512 * bridge_ioctl:
513 *
514 * Handle a control request from the operator.
515 */
516 static int
517 bridge_ioctl(struct ifnet *ifp, u_long cmd, void *data)
518 {
519 struct bridge_softc *sc = ifp->if_softc;
520 struct lwp *l = curlwp; /* XXX */
521 union {
522 struct ifbreq ifbreq;
523 struct ifbifconf ifbifconf;
524 struct ifbareq ifbareq;
525 struct ifbaconf ifbaconf;
526 struct ifbrparam ifbrparam;
527 } args;
528 struct ifdrv *ifd = (struct ifdrv *) data;
529 const struct bridge_control *bc = NULL; /* XXXGCC */
530 int error = 0;
531
532 /* Authorize command before calling splsoftnet(). */
533 switch (cmd) {
534 case SIOCGDRVSPEC:
535 case SIOCSDRVSPEC:
536 if (ifd->ifd_cmd >= bridge_control_table_size
537 || (bc = &bridge_control_table[ifd->ifd_cmd]) == NULL) {
538 error = EINVAL;
539 return error;
540 }
541
542 /* We only care about BC_F_SUSER at this point. */
543 if ((bc->bc_flags & BC_F_SUSER) == 0)
544 break;
545
546 error = kauth_authorize_network(l->l_cred,
547 KAUTH_NETWORK_INTERFACE_BRIDGE,
548 cmd == SIOCGDRVSPEC ?
549 KAUTH_REQ_NETWORK_INTERFACE_BRIDGE_GETPRIV :
550 KAUTH_REQ_NETWORK_INTERFACE_BRIDGE_SETPRIV,
551 ifd, NULL, NULL);
552 if (error)
553 return error;
554
555 break;
556 }
557
558 const int s = splsoftnet();
559
560 switch (cmd) {
561 case SIOCGDRVSPEC:
562 case SIOCSDRVSPEC:
563 KASSERT(bc != NULL);
564 if (cmd == SIOCGDRVSPEC &&
565 (bc->bc_flags & (BC_F_COPYOUT|BC_F_XLATEOUT)) == 0) {
566 error = EINVAL;
567 break;
568 }
569 else if (cmd == SIOCSDRVSPEC &&
570 (bc->bc_flags & (BC_F_COPYOUT|BC_F_XLATEOUT)) != 0) {
571 error = EINVAL;
572 break;
573 }
574
575 /* BC_F_SUSER is checked above, before splsoftnet(). */
576
577 if ((bc->bc_flags & (BC_F_XLATEIN|BC_F_XLATEOUT)) == 0
578 && (ifd->ifd_len != bc->bc_argsize
579 || ifd->ifd_len > sizeof(args))) {
580 error = EINVAL;
581 break;
582 }
583
584 memset(&args, 0, sizeof(args));
585 if (bc->bc_flags & BC_F_COPYIN) {
586 error = copyin(ifd->ifd_data, &args, ifd->ifd_len);
587 if (error)
588 break;
589 } else if (bc->bc_flags & BC_F_XLATEIN) {
590 args.ifbifconf.ifbic_len = ifd->ifd_len;
591 args.ifbifconf.ifbic_buf = ifd->ifd_data;
592 }
593
594 error = (*bc->bc_func)(sc, &args);
595 if (error)
596 break;
597
598 if (bc->bc_flags & BC_F_COPYOUT) {
599 error = copyout(&args, ifd->ifd_data, ifd->ifd_len);
600 } else if (bc->bc_flags & BC_F_XLATEOUT) {
601 ifd->ifd_len = args.ifbifconf.ifbic_len;
602 ifd->ifd_data = args.ifbifconf.ifbic_buf;
603 }
604 break;
605
606 case SIOCSIFFLAGS:
607 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
608 break;
609 switch (ifp->if_flags & (IFF_UP|IFF_RUNNING)) {
610 case IFF_RUNNING:
611 /*
612 * If interface is marked down and it is running,
613 * then stop and disable it.
614 */
615 if_stop(ifp, 1);
616 break;
617 case IFF_UP:
618 /*
619 * If interface is marked up and it is stopped, then
620 * start it.
621 */
622 error = if_init(ifp);
623 break;
624 default:
625 break;
626 }
627 break;
628
629 case SIOCSIFMTU:
630 if ((error = ifioctl_common(ifp, cmd, data)) == ENETRESET)
631 error = 0;
632 break;
633
634 case SIOCGIFCAP:
635 {
636 struct ifcapreq *ifcr = (struct ifcapreq *)data;
637 ifcr->ifcr_capabilities = sc->sc_capenable;
638 ifcr->ifcr_capenable = sc->sc_capenable;
639 break;
640 }
641
642 default:
643 error = ifioctl_common(ifp, cmd, data);
644 break;
645 }
646
647 splx(s);
648
649 return error;
650 }
651
652 /*
653 * bridge_lookup_member:
654 *
655 * Lookup a bridge member interface.
656 */
657 static struct bridge_iflist *
658 bridge_lookup_member(struct bridge_softc *sc, const char *name, struct psref *psref)
659 {
660 struct bridge_iflist *bif;
661 struct ifnet *ifp;
662 int s;
663
664 BRIDGE_PSZ_RENTER(s);
665
666 BRIDGE_IFLIST_READER_FOREACH(bif, sc) {
667 ifp = bif->bif_ifp;
668 if (strcmp(ifp->if_xname, name) == 0)
669 break;
670 }
671 if (bif != NULL)
672 bridge_acquire_member(sc, bif, psref);
673
674 BRIDGE_PSZ_REXIT(s);
675
676 return bif;
677 }
678
679 /*
680 * bridge_lookup_member_if:
681 *
682 * Lookup a bridge member interface by ifnet*.
683 */
684 static struct bridge_iflist *
685 bridge_lookup_member_if(struct bridge_softc *sc, struct ifnet *member_ifp,
686 struct psref *psref)
687 {
688 struct bridge_iflist *bif;
689 int s;
690
691 BRIDGE_PSZ_RENTER(s);
692
693 bif = member_ifp->if_bridgeif;
694 if (bif != NULL) {
695 psref_acquire(psref, &bif->bif_psref,
696 bridge_psref_class);
697 }
698
699 BRIDGE_PSZ_REXIT(s);
700
701 return bif;
702 }
703
704 static void
705 bridge_acquire_member(struct bridge_softc *sc, struct bridge_iflist *bif,
706 struct psref *psref)
707 {
708
709 psref_acquire(psref, &bif->bif_psref, bridge_psref_class);
710 }
711
712 /*
713 * bridge_release_member:
714 *
715 * Release the specified member interface.
716 */
717 static void
718 bridge_release_member(struct bridge_softc *sc, struct bridge_iflist *bif,
719 struct psref *psref)
720 {
721
722 psref_release(psref, &bif->bif_psref, bridge_psref_class);
723 }
724
725 /*
726 * bridge_delete_member:
727 *
728 * Delete the specified member interface.
729 */
730 static void
731 bridge_delete_member(struct bridge_softc *sc, struct bridge_iflist *bif)
732 {
733 struct ifnet *ifs = bif->bif_ifp;
734
735 KASSERT(BRIDGE_LOCKED(sc));
736
737 ifs->_if_input = ether_input;
738 ifs->if_bridge = NULL;
739 ifs->if_bridgeif = NULL;
740
741 PSLIST_WRITER_REMOVE(bif, bif_next);
742 BRIDGE_PSZ_PERFORM(sc);
743
744 if_linkstate_change_disestablish(ifs,
745 bif->bif_linkstate_hook, BRIDGE_LOCK_OBJ(sc));
746 ether_ifdetachhook_disestablish(ifs,
747 bif->bif_ifdetach_hook, BRIDGE_LOCK_OBJ(sc));
748
749 BRIDGE_UNLOCK(sc);
750
751 switch (ifs->if_type) {
752 case IFT_ETHER:
753 case IFT_L2TP:
754 /*
755 * Take the interface out of promiscuous mode.
756 * Don't call it with holding a spin lock.
757 */
758 (void) ifpromisc(ifs, 0);
759 IFNET_LOCK(ifs);
760 (void) ether_disable_vlan_mtu(ifs);
761 IFNET_UNLOCK(ifs);
762 break;
763 default:
764 #ifdef DIAGNOSTIC
765 panic("%s: impossible", __func__);
766 #endif
767 break;
768 }
769
770 psref_target_destroy(&bif->bif_psref, bridge_psref_class);
771
772 PSLIST_ENTRY_DESTROY(bif, bif_next);
773 kmem_free(bif, sizeof(*bif));
774
775 BRIDGE_LOCK(sc);
776 }
777
778 /*
779 * bridge_calc_csum_flags:
780 *
781 * Calculate logical and b/w csum flags each member interface supports.
782 */
783 void
784 bridge_calc_csum_flags(struct bridge_softc *sc)
785 {
786 struct bridge_iflist *bif;
787 struct ifnet *ifs = NULL;
788 int flags = ~0;
789 int capenable = ~0;
790
791 BRIDGE_LOCK(sc);
792 BRIDGE_IFLIST_READER_FOREACH(bif, sc) {
793 ifs = bif->bif_ifp;
794 flags &= ifs->if_csum_flags_tx;
795 capenable &= ifs->if_capenable;
796 }
797 sc->sc_csum_flags_tx = flags;
798 sc->sc_capenable = (ifs != NULL) ? capenable : 0;
799 BRIDGE_UNLOCK(sc);
800 }
801
802 /*
803 * bridge_calc_link_state:
804 *
805 * Calculate the link state based on each member interface.
806 */
807 static void
808 bridge_calc_link_state(void *xsc)
809 {
810 struct bridge_softc *sc = xsc;
811 struct bridge_iflist *bif;
812 struct ifnet *ifs;
813 int link_state = LINK_STATE_DOWN;
814
815 BRIDGE_LOCK(sc);
816 BRIDGE_IFLIST_READER_FOREACH(bif, sc) {
817 ifs = bif->bif_ifp;
818 if (ifs->if_link_state == LINK_STATE_UP) {
819 link_state = LINK_STATE_UP;
820 break;
821 }
822 if (ifs->if_link_state == LINK_STATE_UNKNOWN)
823 link_state = LINK_STATE_UNKNOWN;
824 }
825 if_link_state_change(&sc->sc_if, link_state);
826 BRIDGE_UNLOCK(sc);
827 }
828
829 static int
830 bridge_ioctl_add(struct bridge_softc *sc, void *arg)
831 {
832 struct ifbreq *req = arg;
833 struct bridge_iflist *bif = NULL;
834 struct ifnet *ifs;
835 int error = 0;
836 struct psref psref;
837
838 ifs = if_get(req->ifbr_ifsname, &psref);
839 if (ifs == NULL)
840 return ENOENT;
841
842 if (ifs->if_bridge == sc) {
843 error = EEXIST;
844 goto out;
845 }
846
847 if (ifs->if_bridge != NULL) {
848 error = EBUSY;
849 goto out;
850 }
851
852 if (ifs->_if_input != ether_input) {
853 error = EINVAL;
854 goto out;
855 }
856
857 /* FIXME: doesn't work with non-IFF_SIMPLEX interfaces */
858 if ((ifs->if_flags & IFF_SIMPLEX) == 0) {
859 error = EINVAL;
860 goto out;
861 }
862
863 bif = kmem_alloc(sizeof(*bif), KM_SLEEP);
864
865 switch (ifs->if_type) {
866 case IFT_ETHER:
867 if (sc->sc_if.if_mtu != ifs->if_mtu) {
868 /* Change MTU of added interface to bridge MTU */
869 struct ifreq ifr;
870 memset(&ifr, 0, sizeof(ifr));
871 ifr.ifr_mtu = sc->sc_if.if_mtu;
872 IFNET_LOCK(ifs);
873 error = if_ioctl(ifs, SIOCSIFMTU, &ifr);
874 IFNET_UNLOCK(ifs);
875 if (error != 0)
876 goto out;
877 }
878 /* FALLTHROUGH */
879 case IFT_L2TP:
880 IFNET_LOCK(ifs);
881 error = ether_enable_vlan_mtu(ifs);
882 IFNET_UNLOCK(ifs);
883 if (error > 0)
884 goto out;
885 /*
886 * Place the interface into promiscuous mode.
887 */
888 error = ifpromisc(ifs, 1);
889 if (error)
890 goto out;
891 break;
892 default:
893 error = EINVAL;
894 goto out;
895 }
896
897 bif->bif_ifp = ifs;
898 bif->bif_flags = IFBIF_LEARNING | IFBIF_DISCOVER;
899 bif->bif_priority = BSTP_DEFAULT_PORT_PRIORITY;
900 bif->bif_path_cost = BSTP_DEFAULT_PATH_COST;
901 bif->bif_linkstate_hook = if_linkstate_change_establish(ifs,
902 bridge_calc_link_state, sc);
903 PSLIST_ENTRY_INIT(bif, bif_next);
904 psref_target_init(&bif->bif_psref, bridge_psref_class);
905
906 BRIDGE_LOCK(sc);
907
908 ifs->if_bridge = sc;
909 ifs->if_bridgeif = bif;
910 PSLIST_WRITER_INSERT_HEAD(&sc->sc_iflist_psref.bip_iflist, bif, bif_next);
911 ifs->_if_input = bridge_input;
912
913 BRIDGE_UNLOCK(sc);
914
915 bif->bif_ifdetach_hook = ether_ifdetachhook_establish(ifs,
916 bridge_ifdetach, (void *)ifs);
917
918 bridge_calc_csum_flags(sc);
919 bridge_calc_link_state(sc);
920
921 if (sc->sc_if.if_flags & IFF_RUNNING)
922 bstp_initialization(sc);
923 else
924 bstp_stop(sc);
925
926 out:
927 if_put(ifs, &psref);
928 if (error) {
929 if (bif != NULL)
930 kmem_free(bif, sizeof(*bif));
931 }
932 return error;
933 }
934
935 static int
936 bridge_ioctl_del(struct bridge_softc *sc, void *arg)
937 {
938 struct ifbreq *req = arg;
939 const char *name = req->ifbr_ifsname;
940 struct bridge_iflist *bif;
941 struct ifnet *ifs;
942
943 BRIDGE_LOCK(sc);
944
945 /*
946 * Don't use bridge_lookup_member. We want to get a member
947 * with bif_refs == 0.
948 */
949 BRIDGE_IFLIST_WRITER_FOREACH(bif, sc) {
950 ifs = bif->bif_ifp;
951 if (strcmp(ifs->if_xname, name) == 0)
952 break;
953 }
954
955 if (bif == NULL) {
956 BRIDGE_UNLOCK(sc);
957 return ENOENT;
958 }
959
960 bridge_delete_member(sc, bif);
961
962 BRIDGE_UNLOCK(sc);
963
964 bridge_rtdelete(sc, ifs);
965 bridge_calc_csum_flags(sc);
966 bridge_calc_link_state(sc);
967
968 if (sc->sc_if.if_flags & IFF_RUNNING)
969 bstp_initialization(sc);
970
971 return 0;
972 }
973
974 static int
975 bridge_ioctl_gifflags(struct bridge_softc *sc, void *arg)
976 {
977 struct ifbreq *req = arg;
978 struct bridge_iflist *bif;
979 struct psref psref;
980
981 bif = bridge_lookup_member(sc, req->ifbr_ifsname, &psref);
982 if (bif == NULL)
983 return ENOENT;
984
985 req->ifbr_ifsflags = bif->bif_flags;
986 req->ifbr_state = bif->bif_state;
987 req->ifbr_priority = bif->bif_priority;
988 req->ifbr_path_cost = bif->bif_path_cost;
989 req->ifbr_portno = bif->bif_ifp->if_index & 0xff;
990
991 bridge_release_member(sc, bif, &psref);
992
993 return 0;
994 }
995
996 static int
997 bridge_ioctl_sifflags(struct bridge_softc *sc, void *arg)
998 {
999 struct ifbreq *req = arg;
1000 struct bridge_iflist *bif;
1001 struct psref psref;
1002
1003 bif = bridge_lookup_member(sc, req->ifbr_ifsname, &psref);
1004 if (bif == NULL)
1005 return ENOENT;
1006
1007 if (req->ifbr_ifsflags & IFBIF_STP) {
1008 switch (bif->bif_ifp->if_type) {
1009 case IFT_ETHER:
1010 case IFT_L2TP:
1011 /* These can do spanning tree. */
1012 break;
1013
1014 default:
1015 /* Nothing else can. */
1016 bridge_release_member(sc, bif, &psref);
1017 return EINVAL;
1018 }
1019 }
1020
1021 if (bif->bif_flags & IFBIF_PROTECTED) {
1022 if ((req->ifbr_ifsflags & IFBIF_PROTECTED) == 0) {
1023 log(LOG_INFO, "%s: disabling protection on %s\n",
1024 sc->sc_if.if_xname, bif->bif_ifp->if_xname);
1025 }
1026 } else {
1027 if (req->ifbr_ifsflags & IFBIF_PROTECTED) {
1028 log(LOG_INFO, "%s: enabling protection on %s\n",
1029 sc->sc_if.if_xname, bif->bif_ifp->if_xname);
1030 }
1031 }
1032
1033 bif->bif_flags = req->ifbr_ifsflags;
1034
1035 bridge_release_member(sc, bif, &psref);
1036
1037 if (sc->sc_if.if_flags & IFF_RUNNING)
1038 bstp_initialization(sc);
1039
1040 return 0;
1041 }
1042
1043 static int
1044 bridge_ioctl_scache(struct bridge_softc *sc, void *arg)
1045 {
1046 struct ifbrparam *param = arg;
1047
1048 sc->sc_brtmax = param->ifbrp_csize;
1049 bridge_rttrim(sc);
1050
1051 return 0;
1052 }
1053
1054 static int
1055 bridge_ioctl_gcache(struct bridge_softc *sc, void *arg)
1056 {
1057 struct ifbrparam *param = arg;
1058
1059 param->ifbrp_csize = sc->sc_brtmax;
1060
1061 return 0;
1062 }
1063
1064 static int
1065 bridge_ioctl_gifs(struct bridge_softc *sc, void *arg)
1066 {
1067 struct ifbifconf *bifc = arg;
1068 struct bridge_iflist *bif;
1069 struct ifbreq *breqs;
1070 int i, count, error = 0;
1071
1072 retry:
1073 BRIDGE_LOCK(sc);
1074 count = 0;
1075 BRIDGE_IFLIST_WRITER_FOREACH(bif, sc)
1076 count++;
1077 BRIDGE_UNLOCK(sc);
1078
1079 if (count == 0) {
1080 bifc->ifbic_len = 0;
1081 return 0;
1082 }
1083
1084 if (bifc->ifbic_len == 0 || bifc->ifbic_len < (sizeof(*breqs) * count)) {
1085 /* Tell that a larger buffer is needed */
1086 bifc->ifbic_len = sizeof(*breqs) * count;
1087 return 0;
1088 }
1089
1090 breqs = kmem_alloc(sizeof(*breqs) * count, KM_SLEEP);
1091
1092 BRIDGE_LOCK(sc);
1093
1094 i = 0;
1095 BRIDGE_IFLIST_WRITER_FOREACH(bif, sc)
1096 i++;
1097 if (i > count) {
1098 /*
1099 * The number of members has been increased.
1100 * We need more memory!
1101 */
1102 BRIDGE_UNLOCK(sc);
1103 kmem_free(breqs, sizeof(*breqs) * count);
1104 goto retry;
1105 }
1106
1107 i = 0;
1108 BRIDGE_IFLIST_WRITER_FOREACH(bif, sc) {
1109 struct ifbreq *breq = &breqs[i++];
1110 memset(breq, 0, sizeof(*breq));
1111
1112 strlcpy(breq->ifbr_ifsname, bif->bif_ifp->if_xname,
1113 sizeof(breq->ifbr_ifsname));
1114 breq->ifbr_ifsflags = bif->bif_flags;
1115 breq->ifbr_state = bif->bif_state;
1116 breq->ifbr_priority = bif->bif_priority;
1117 breq->ifbr_path_cost = bif->bif_path_cost;
1118 breq->ifbr_portno = bif->bif_ifp->if_index & 0xff;
1119 }
1120
1121 /* Don't call copyout with holding the mutex */
1122 BRIDGE_UNLOCK(sc);
1123
1124 for (i = 0; i < count; i++) {
1125 error = copyout(&breqs[i], bifc->ifbic_req + i, sizeof(*breqs));
1126 if (error)
1127 break;
1128 }
1129 bifc->ifbic_len = sizeof(*breqs) * i;
1130
1131 kmem_free(breqs, sizeof(*breqs) * count);
1132
1133 return error;
1134 }
1135
1136 static int
1137 bridge_ioctl_rts(struct bridge_softc *sc, void *arg)
1138 {
1139 struct ifbaconf *bac = arg;
1140 struct bridge_rtnode *brt;
1141 struct ifbareq bareq;
1142 int count = 0, error = 0, len;
1143
1144 if (bac->ifbac_len == 0)
1145 return 0;
1146
1147 BRIDGE_RT_LOCK(sc);
1148
1149 /* The passed buffer is not enough, tell a required size. */
1150 if (bac->ifbac_len < (sizeof(bareq) * sc->sc_brtcnt)) {
1151 count = sc->sc_brtcnt;
1152 goto out;
1153 }
1154
1155 len = bac->ifbac_len;
1156 BRIDGE_RTLIST_WRITER_FOREACH(brt, sc) {
1157 if (len < sizeof(bareq))
1158 goto out;
1159 memset(&bareq, 0, sizeof(bareq));
1160 strlcpy(bareq.ifba_ifsname, brt->brt_ifp->if_xname,
1161 sizeof(bareq.ifba_ifsname));
1162 memcpy(bareq.ifba_dst, brt->brt_addr, sizeof(brt->brt_addr));
1163 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
1164 bareq.ifba_expire = brt->brt_expire - time_uptime;
1165 } else
1166 bareq.ifba_expire = 0;
1167 bareq.ifba_flags = brt->brt_flags;
1168
1169 error = copyout(&bareq, bac->ifbac_req + count, sizeof(bareq));
1170 if (error)
1171 goto out;
1172 count++;
1173 len -= sizeof(bareq);
1174 }
1175 out:
1176 BRIDGE_RT_UNLOCK(sc);
1177
1178 bac->ifbac_len = sizeof(bareq) * count;
1179 return error;
1180 }
1181
1182 static int
1183 bridge_ioctl_saddr(struct bridge_softc *sc, void *arg)
1184 {
1185 struct ifbareq *req = arg;
1186 struct bridge_iflist *bif;
1187 int error;
1188 struct psref psref;
1189
1190 bif = bridge_lookup_member(sc, req->ifba_ifsname, &psref);
1191 if (bif == NULL)
1192 return ENOENT;
1193
1194 error = bridge_rtupdate(sc, req->ifba_dst, bif->bif_ifp, 1,
1195 req->ifba_flags);
1196
1197 bridge_release_member(sc, bif, &psref);
1198
1199 return error;
1200 }
1201
1202 static int
1203 bridge_ioctl_sto(struct bridge_softc *sc, void *arg)
1204 {
1205 struct ifbrparam *param = arg;
1206
1207 sc->sc_brttimeout = param->ifbrp_ctime;
1208
1209 return 0;
1210 }
1211
1212 static int
1213 bridge_ioctl_gto(struct bridge_softc *sc, void *arg)
1214 {
1215 struct ifbrparam *param = arg;
1216
1217 param->ifbrp_ctime = sc->sc_brttimeout;
1218
1219 return 0;
1220 }
1221
1222 static int
1223 bridge_ioctl_daddr(struct bridge_softc *sc, void *arg)
1224 {
1225 struct ifbareq *req = arg;
1226
1227 return (bridge_rtdaddr(sc, req->ifba_dst));
1228 }
1229
1230 static int
1231 bridge_ioctl_flush(struct bridge_softc *sc, void *arg)
1232 {
1233 struct ifbreq *req = arg;
1234
1235 bridge_rtflush(sc, req->ifbr_ifsflags);
1236
1237 return 0;
1238 }
1239
1240 static int
1241 bridge_ioctl_gpri(struct bridge_softc *sc, void *arg)
1242 {
1243 struct ifbrparam *param = arg;
1244
1245 param->ifbrp_prio = sc->sc_bridge_priority;
1246
1247 return 0;
1248 }
1249
1250 static int
1251 bridge_ioctl_spri(struct bridge_softc *sc, void *arg)
1252 {
1253 struct ifbrparam *param = arg;
1254
1255 sc->sc_bridge_priority = param->ifbrp_prio;
1256
1257 if (sc->sc_if.if_flags & IFF_RUNNING)
1258 bstp_initialization(sc);
1259
1260 return 0;
1261 }
1262
1263 static int
1264 bridge_ioctl_ght(struct bridge_softc *sc, void *arg)
1265 {
1266 struct ifbrparam *param = arg;
1267
1268 param->ifbrp_hellotime = sc->sc_bridge_hello_time >> 8;
1269
1270 return 0;
1271 }
1272
1273 static int
1274 bridge_ioctl_sht(struct bridge_softc *sc, void *arg)
1275 {
1276 struct ifbrparam *param = arg;
1277
1278 if (param->ifbrp_hellotime == 0)
1279 return EINVAL;
1280 sc->sc_bridge_hello_time = param->ifbrp_hellotime << 8;
1281
1282 if (sc->sc_if.if_flags & IFF_RUNNING)
1283 bstp_initialization(sc);
1284
1285 return 0;
1286 }
1287
1288 static int
1289 bridge_ioctl_gfd(struct bridge_softc *sc, void *arg)
1290 {
1291 struct ifbrparam *param = arg;
1292
1293 param->ifbrp_fwddelay = sc->sc_bridge_forward_delay >> 8;
1294
1295 return 0;
1296 }
1297
1298 static int
1299 bridge_ioctl_sfd(struct bridge_softc *sc, void *arg)
1300 {
1301 struct ifbrparam *param = arg;
1302
1303 if (param->ifbrp_fwddelay == 0)
1304 return EINVAL;
1305 sc->sc_bridge_forward_delay = param->ifbrp_fwddelay << 8;
1306
1307 if (sc->sc_if.if_flags & IFF_RUNNING)
1308 bstp_initialization(sc);
1309
1310 return 0;
1311 }
1312
1313 static int
1314 bridge_ioctl_gma(struct bridge_softc *sc, void *arg)
1315 {
1316 struct ifbrparam *param = arg;
1317
1318 param->ifbrp_maxage = sc->sc_bridge_max_age >> 8;
1319
1320 return 0;
1321 }
1322
1323 static int
1324 bridge_ioctl_sma(struct bridge_softc *sc, void *arg)
1325 {
1326 struct ifbrparam *param = arg;
1327
1328 if (param->ifbrp_maxage == 0)
1329 return EINVAL;
1330 sc->sc_bridge_max_age = param->ifbrp_maxage << 8;
1331
1332 if (sc->sc_if.if_flags & IFF_RUNNING)
1333 bstp_initialization(sc);
1334
1335 return 0;
1336 }
1337
1338 static int
1339 bridge_ioctl_sifprio(struct bridge_softc *sc, void *arg)
1340 {
1341 struct ifbreq *req = arg;
1342 struct bridge_iflist *bif;
1343 struct psref psref;
1344
1345 bif = bridge_lookup_member(sc, req->ifbr_ifsname, &psref);
1346 if (bif == NULL)
1347 return ENOENT;
1348
1349 bif->bif_priority = req->ifbr_priority;
1350
1351 if (sc->sc_if.if_flags & IFF_RUNNING)
1352 bstp_initialization(sc);
1353
1354 bridge_release_member(sc, bif, &psref);
1355
1356 return 0;
1357 }
1358
1359 static int
1360 bridge_ioctl_gfilt(struct bridge_softc *sc, void *arg)
1361 {
1362 struct ifbrparam *param = arg;
1363
1364 param->ifbrp_filter = sc->sc_filter_flags;
1365
1366 return 0;
1367 }
1368
1369 static int
1370 bridge_ioctl_sfilt(struct bridge_softc *sc, void *arg)
1371 {
1372 struct ifbrparam *param = arg;
1373 uint32_t nflags, oflags;
1374
1375 if (param->ifbrp_filter & ~IFBF_FILT_MASK)
1376 return EINVAL;
1377
1378 nflags = param->ifbrp_filter;
1379 oflags = sc->sc_filter_flags;
1380
1381 if ((nflags & IFBF_FILT_USEIPF) && !(oflags & IFBF_FILT_USEIPF)) {
1382 pfil_add_hook((void *)bridge_ipf, NULL, PFIL_IN|PFIL_OUT,
1383 sc->sc_if.if_pfil);
1384 }
1385 if (!(nflags & IFBF_FILT_USEIPF) && (oflags & IFBF_FILT_USEIPF)) {
1386 pfil_remove_hook((void *)bridge_ipf, NULL, PFIL_IN|PFIL_OUT,
1387 sc->sc_if.if_pfil);
1388 }
1389
1390 sc->sc_filter_flags = nflags;
1391
1392 return 0;
1393 }
1394
1395 static int
1396 bridge_ioctl_sifcost(struct bridge_softc *sc, void *arg)
1397 {
1398 struct ifbreq *req = arg;
1399 struct bridge_iflist *bif;
1400 struct psref psref;
1401
1402 bif = bridge_lookup_member(sc, req->ifbr_ifsname, &psref);
1403 if (bif == NULL)
1404 return ENOENT;
1405
1406 bif->bif_path_cost = req->ifbr_path_cost;
1407
1408 if (sc->sc_if.if_flags & IFF_RUNNING)
1409 bstp_initialization(sc);
1410
1411 bridge_release_member(sc, bif, &psref);
1412
1413 return 0;
1414 }
1415
1416 /*
1417 * bridge_ifdetach:
1418 *
1419 * Detach an interface from a bridge. Called when a member
1420 * interface is detaching.
1421 */
1422 static void
1423 bridge_ifdetach(void *xifs)
1424 {
1425 struct ifnet *ifs;
1426 struct bridge_softc *sc;
1427 struct ifbreq breq;
1428
1429 ifs = (struct ifnet *)xifs;
1430 sc = ifs->if_bridge;
1431
1432 /* ioctl_lock should prevent this from happening */
1433 KASSERT(sc != NULL);
1434
1435 memset(&breq, 0, sizeof(breq));
1436 strlcpy(breq.ifbr_ifsname, ifs->if_xname, sizeof(breq.ifbr_ifsname));
1437
1438 (void) bridge_ioctl_del(sc, &breq);
1439 }
1440
1441 /*
1442 * bridge_init:
1443 *
1444 * Initialize a bridge interface.
1445 */
1446 static int
1447 bridge_init(struct ifnet *ifp)
1448 {
1449 struct bridge_softc *sc = ifp->if_softc;
1450
1451 KASSERT((ifp->if_flags & IFF_RUNNING) == 0);
1452
1453 callout_reset(&sc->sc_brcallout, bridge_rtable_prune_period * hz,
1454 bridge_timer, sc);
1455 bstp_initialization(sc);
1456
1457 ifp->if_flags |= IFF_RUNNING;
1458 return 0;
1459 }
1460
1461 /*
1462 * bridge_stop:
1463 *
1464 * Stop the bridge interface.
1465 */
1466 static void
1467 bridge_stop(struct ifnet *ifp, int disable)
1468 {
1469 struct bridge_softc *sc = ifp->if_softc;
1470
1471 KASSERT((ifp->if_flags & IFF_RUNNING) != 0);
1472 ifp->if_flags &= ~IFF_RUNNING;
1473
1474 callout_halt(&sc->sc_brcallout, NULL);
1475 workqueue_wait(sc->sc_rtage_wq, &sc->sc_rtage_wk);
1476 bstp_stop(sc);
1477 bridge_rtflush(sc, IFBF_FLUSHDYN);
1478 }
1479
1480 /*
1481 * bridge_enqueue:
1482 *
1483 * Enqueue a packet on a bridge member interface.
1484 */
1485 void
1486 bridge_enqueue(struct bridge_softc *sc, struct ifnet *dst_ifp, struct mbuf *m,
1487 int runfilt)
1488 {
1489 int len, error;
1490 short mflags;
1491
1492 if (runfilt) {
1493 if (pfil_run_hooks(sc->sc_if.if_pfil, &m,
1494 dst_ifp, PFIL_OUT) != 0) {
1495 if (m != NULL)
1496 m_freem(m);
1497 return;
1498 }
1499 if (m == NULL)
1500 return;
1501 }
1502
1503 #ifdef ALTQ
1504 KERNEL_LOCK(1, NULL);
1505 /*
1506 * If ALTQ is enabled on the member interface, do
1507 * classification; the queueing discipline might
1508 * not require classification, but might require
1509 * the address family/header pointer in the pktattr.
1510 */
1511 if (ALTQ_IS_ENABLED(&dst_ifp->if_snd)) {
1512 /* XXX IFT_ETHER */
1513 altq_etherclassify(&dst_ifp->if_snd, m);
1514 }
1515 KERNEL_UNLOCK_ONE(NULL);
1516 #endif /* ALTQ */
1517
1518 if (vlan_has_tag(m) &&
1519 !vlan_is_hwtag_enabled(dst_ifp)) {
1520 (void)ether_inject_vlantag(&m, ETHERTYPE_VLAN,
1521 vlan_get_tag(m));
1522 if (m == NULL) {
1523 if_statinc(&sc->sc_if, if_oerrors);
1524 return;
1525 }
1526 }
1527
1528 len = m->m_pkthdr.len;
1529 mflags = m->m_flags;
1530
1531 error = if_transmit_lock(dst_ifp, m);
1532 if (error) {
1533 /* mbuf is already freed */
1534 if_statinc(&sc->sc_if, if_oerrors);
1535 return;
1536 }
1537
1538 net_stat_ref_t nsr = IF_STAT_GETREF(&sc->sc_if);
1539 if_statinc_ref(nsr, if_opackets);
1540 if_statadd_ref(nsr, if_obytes, len);
1541 if (mflags & M_MCAST)
1542 if_statinc_ref(nsr, if_omcasts);
1543 IF_STAT_PUTREF(&sc->sc_if);
1544 }
1545
1546 /*
1547 * bridge_output:
1548 *
1549 * Send output from a bridge member interface. This
1550 * performs the bridging function for locally originated
1551 * packets.
1552 *
1553 * The mbuf has the Ethernet header already attached. We must
1554 * enqueue or free the mbuf before returning.
1555 */
1556 int
1557 bridge_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *sa,
1558 const struct rtentry *rt)
1559 {
1560 struct ether_header *eh;
1561 struct ifnet *dst_if;
1562 struct bridge_softc *sc;
1563 struct mbuf *n;
1564 int s, bound;
1565
1566 /*
1567 * bridge_output() is called from ether_output(), furthermore
1568 * ifp argument doesn't point to bridge(4). So, don't assert
1569 * IFEF_MPSAFE here.
1570 */
1571
1572 KASSERT(m->m_len >= ETHER_HDR_LEN);
1573
1574 eh = mtod(m, struct ether_header *);
1575 sc = ifp->if_bridge;
1576
1577 if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
1578 if (memcmp(etherbroadcastaddr,
1579 eh->ether_dhost, ETHER_ADDR_LEN) == 0)
1580 m->m_flags |= M_BCAST;
1581 else
1582 m->m_flags |= M_MCAST;
1583 }
1584
1585 /*
1586 * If bridge is down, but the original output interface is up,
1587 * go ahead and send out that interface. Otherwise, the packet
1588 * is dropped below.
1589 */
1590 if (__predict_false(sc == NULL) ||
1591 (sc->sc_if.if_flags & IFF_RUNNING) == 0) {
1592 dst_if = ifp;
1593 goto unicast_asis;
1594 }
1595
1596 /*
1597 * If the packet is a multicast, or we don't know a better way to
1598 * get there, send to all interfaces.
1599 */
1600 if ((m->m_flags & (M_MCAST | M_BCAST)) != 0)
1601 dst_if = NULL;
1602 else
1603 dst_if = bridge_rtlookup(sc, eh->ether_dhost);
1604
1605 /*
1606 * In general, we need to handle TX offload in software before
1607 * enqueueing a packet. However, we can send it as is in the
1608 * cases of unicast via (1) the source interface, or (2) an
1609 * interface which supports the specified offload options.
1610 * For multicast or broadcast, send it as is only if (3) all
1611 * the member interfaces support the specified options.
1612 */
1613
1614 /*
1615 * Unicast via the source interface.
1616 */
1617 if (dst_if == ifp)
1618 goto unicast_asis;
1619
1620 /*
1621 * Unicast via other interface.
1622 */
1623 if (dst_if != NULL) {
1624 KASSERT(m->m_flags & M_PKTHDR);
1625 if (TX_OFFLOAD_SUPPORTED(dst_if->if_csum_flags_tx,
1626 m->m_pkthdr.csum_flags)) {
1627 /*
1628 * Unicast via an interface which supports the
1629 * specified offload options.
1630 */
1631 goto unicast_asis;
1632 }
1633
1634 /*
1635 * Handle TX offload in software. For TSO, a packet is
1636 * split into multiple chunks. Thus, the return value of
1637 * ether_sw_offload_tx() is mbuf queue consists of them.
1638 */
1639 m = ether_sw_offload_tx(ifp, m);
1640 if (m == NULL)
1641 return 0;
1642
1643 do {
1644 n = m->m_nextpkt;
1645 if ((dst_if->if_flags & IFF_RUNNING) == 0)
1646 m_freem(m);
1647 else
1648 bridge_enqueue(sc, dst_if, m, 0);
1649 m = n;
1650 } while (m != NULL);
1651
1652 return 0;
1653 }
1654
1655 /*
1656 * Multicast or broadcast.
1657 */
1658 if (TX_OFFLOAD_SUPPORTED(sc->sc_csum_flags_tx,
1659 m->m_pkthdr.csum_flags)) {
1660 /*
1661 * Specified TX offload options are supported by all
1662 * the member interfaces of this bridge.
1663 */
1664 m->m_nextpkt = NULL; /* XXX */
1665 } else {
1666 /*
1667 * Otherwise, handle TX offload in software.
1668 */
1669 m = ether_sw_offload_tx(ifp, m);
1670 if (m == NULL)
1671 return 0;
1672 }
1673
1674 /*
1675 * When we use pppoe over bridge, bridge_output() can be called
1676 * in a lwp context by pppoe_timeout_wk().
1677 */
1678 bound = curlwp_bind();
1679 do {
1680 /* XXX Should call bridge_broadcast, but there are locking
1681 * issues which need resolving first. */
1682 struct bridge_iflist *bif;
1683 struct mbuf *mc;
1684 bool used = false;
1685
1686 n = m->m_nextpkt;
1687
1688 BRIDGE_PSZ_RENTER(s);
1689 BRIDGE_IFLIST_READER_FOREACH(bif, sc) {
1690 struct psref psref;
1691
1692 bridge_acquire_member(sc, bif, &psref);
1693 BRIDGE_PSZ_REXIT(s);
1694
1695 dst_if = bif->bif_ifp;
1696 if ((dst_if->if_flags & IFF_RUNNING) == 0)
1697 goto next;
1698
1699 /*
1700 * If this is not the original output interface,
1701 * and the interface is participating in spanning
1702 * tree, make sure the port is in a state that
1703 * allows forwarding.
1704 */
1705 if (dst_if != ifp &&
1706 (bif->bif_flags & IFBIF_STP) != 0) {
1707 switch (bif->bif_state) {
1708 case BSTP_IFSTATE_BLOCKING:
1709 case BSTP_IFSTATE_LISTENING:
1710 case BSTP_IFSTATE_DISABLED:
1711 goto next;
1712 }
1713 }
1714
1715 if (PSLIST_READER_NEXT(bif, struct bridge_iflist,
1716 bif_next) == NULL &&
1717 ((m->m_flags & (M_MCAST | M_BCAST)) == 0 ||
1718 dst_if == ifp))
1719 {
1720 used = true;
1721 mc = m;
1722 } else {
1723 mc = m_copypacket(m, M_DONTWAIT);
1724 if (mc == NULL) {
1725 if_statinc(&sc->sc_if, if_oerrors);
1726 goto next;
1727 }
1728 }
1729
1730 bridge_enqueue(sc, dst_if, mc, 0);
1731
1732 if ((m->m_flags & (M_MCAST | M_BCAST)) != 0 &&
1733 dst_if != ifp)
1734 {
1735 if (PSLIST_READER_NEXT(bif,
1736 struct bridge_iflist, bif_next) == NULL)
1737 {
1738 used = true;
1739 mc = m;
1740 } else {
1741 mc = m_copypacket(m, M_DONTWAIT);
1742 if (mc == NULL) {
1743 if_statinc(&sc->sc_if,
1744 if_oerrors);
1745 goto next;
1746 }
1747 }
1748
1749 m_set_rcvif(mc, dst_if);
1750 mc->m_flags &= ~M_PROMISC;
1751
1752 const int _s = splsoftnet();
1753 KERNEL_LOCK_UNLESS_IFP_MPSAFE(dst_if);
1754 ether_input(dst_if, mc);
1755 KERNEL_UNLOCK_UNLESS_IFP_MPSAFE(dst_if);
1756 splx(_s);
1757 }
1758
1759 next:
1760 BRIDGE_PSZ_RENTER(s);
1761 bridge_release_member(sc, bif, &psref);
1762
1763 /* Guarantee we don't re-enter the loop as we already
1764 * decided we're at the end. */
1765 if (used)
1766 break;
1767 }
1768 BRIDGE_PSZ_REXIT(s);
1769
1770 if (!used)
1771 m_freem(m);
1772
1773 m = n;
1774 } while (m != NULL);
1775 curlwp_bindx(bound);
1776
1777 return 0;
1778
1779 unicast_asis:
1780 /*
1781 * XXX Spanning tree consideration here?
1782 */
1783 if ((dst_if->if_flags & IFF_RUNNING) == 0)
1784 m_freem(m);
1785 else
1786 bridge_enqueue(sc, dst_if, m, 0);
1787 return 0;
1788 }
1789
1790 /*
1791 * bridge_start:
1792 *
1793 * Start output on a bridge.
1794 *
1795 * NOTE: This routine should never be called in this implementation.
1796 */
1797 static void
1798 bridge_start(struct ifnet *ifp)
1799 {
1800
1801 printf("%s: bridge_start() called\n", ifp->if_xname);
1802 }
1803
1804 /*
1805 * bridge_forward:
1806 *
1807 * The forwarding function of the bridge.
1808 */
1809 static void
1810 bridge_forward(struct bridge_softc *sc, struct mbuf *m)
1811 {
1812 struct bridge_iflist *bif;
1813 struct ifnet *src_if, *dst_if;
1814 struct ether_header *eh;
1815 struct psref psref;
1816 struct psref psref_src;
1817 DECLARE_LOCK_VARIABLE;
1818 bool src_if_protected;
1819
1820 if ((sc->sc_if.if_flags & IFF_RUNNING) == 0)
1821 return;
1822
1823 src_if = m_get_rcvif_psref(m, &psref_src);
1824 if (src_if == NULL) {
1825 /* Interface is being destroyed? */
1826 m_freem(m);
1827 goto out;
1828 }
1829
1830 if_statadd2(&sc->sc_if, if_ipackets, 1, if_ibytes, m->m_pkthdr.len);
1831
1832 /*
1833 * Look up the bridge_iflist.
1834 */
1835 bif = bridge_lookup_member_if(sc, src_if, &psref);
1836 if (bif == NULL) {
1837 /* Interface is not a bridge member (anymore?) */
1838 m_freem(m);
1839 goto out;
1840 }
1841
1842 if (bif->bif_flags & IFBIF_STP) {
1843 switch (bif->bif_state) {
1844 case BSTP_IFSTATE_BLOCKING:
1845 case BSTP_IFSTATE_LISTENING:
1846 case BSTP_IFSTATE_DISABLED:
1847 m_freem(m);
1848 bridge_release_member(sc, bif, &psref);
1849 goto out;
1850 }
1851 }
1852
1853 eh = mtod(m, struct ether_header *);
1854
1855 /*
1856 * If the interface is learning, and the source
1857 * address is valid and not multicast, record
1858 * the address.
1859 */
1860 if ((bif->bif_flags & IFBIF_LEARNING) != 0 &&
1861 ETHER_IS_MULTICAST(eh->ether_shost) == 0 &&
1862 (eh->ether_shost[0] == 0 &&
1863 eh->ether_shost[1] == 0 &&
1864 eh->ether_shost[2] == 0 &&
1865 eh->ether_shost[3] == 0 &&
1866 eh->ether_shost[4] == 0 &&
1867 eh->ether_shost[5] == 0) == 0) {
1868 (void) bridge_rtupdate(sc, eh->ether_shost,
1869 src_if, 0, IFBAF_DYNAMIC);
1870 }
1871
1872 if ((bif->bif_flags & IFBIF_STP) != 0 &&
1873 bif->bif_state == BSTP_IFSTATE_LEARNING) {
1874 m_freem(m);
1875 bridge_release_member(sc, bif, &psref);
1876 goto out;
1877 }
1878
1879 src_if_protected = ((bif->bif_flags & IFBIF_PROTECTED) != 0);
1880
1881 bridge_release_member(sc, bif, &psref);
1882
1883 /*
1884 * At this point, the port either doesn't participate
1885 * in spanning tree or it is in the forwarding state.
1886 */
1887
1888 /*
1889 * If the packet is unicast, destined for someone on
1890 * "this" side of the bridge, drop it.
1891 */
1892 if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) {
1893 dst_if = bridge_rtlookup(sc, eh->ether_dhost);
1894 if (src_if == dst_if) {
1895 m_freem(m);
1896 goto out;
1897 }
1898 } else {
1899 /* ...forward it to all interfaces. */
1900 if_statinc(&sc->sc_if, if_imcasts);
1901 dst_if = NULL;
1902 }
1903
1904 if (pfil_run_hooks(sc->sc_if.if_pfil, &m, src_if, PFIL_IN) != 0) {
1905 if (m != NULL)
1906 m_freem(m);
1907 goto out;
1908 }
1909 if (m == NULL)
1910 goto out;
1911
1912 if (dst_if == NULL) {
1913 bridge_broadcast(sc, src_if, src_if_protected, m);
1914 goto out;
1915 }
1916
1917 m_put_rcvif_psref(src_if, &psref_src);
1918 src_if = NULL;
1919
1920 /*
1921 * At this point, we're dealing with a unicast frame
1922 * going to a different interface.
1923 */
1924 if ((dst_if->if_flags & IFF_RUNNING) == 0) {
1925 m_freem(m);
1926 goto out;
1927 }
1928
1929 bif = bridge_lookup_member_if(sc, dst_if, &psref);
1930 if (bif == NULL) {
1931 /* Not a member of the bridge (anymore?) */
1932 m_freem(m);
1933 goto out;
1934 }
1935
1936 if (bif->bif_flags & IFBIF_STP) {
1937 switch (bif->bif_state) {
1938 case BSTP_IFSTATE_DISABLED:
1939 case BSTP_IFSTATE_BLOCKING:
1940 m_freem(m);
1941 bridge_release_member(sc, bif, &psref);
1942 goto out;
1943 }
1944 }
1945
1946 if ((bif->bif_flags & IFBIF_PROTECTED) && src_if_protected) {
1947 m_freem(m);
1948 bridge_release_member(sc, bif, &psref);
1949 goto out;
1950 }
1951
1952 bridge_release_member(sc, bif, &psref);
1953
1954 /*
1955 * Before enqueueing this packet to the destination interface,
1956 * clear any in-bound checksum flags to prevent them from being
1957 * misused as out-bound flags.
1958 */
1959 m->m_pkthdr.csum_flags = 0;
1960
1961 ACQUIRE_GLOBAL_LOCKS();
1962 bridge_enqueue(sc, dst_if, m, 1);
1963 RELEASE_GLOBAL_LOCKS();
1964 out:
1965 if (src_if != NULL)
1966 m_put_rcvif_psref(src_if, &psref_src);
1967 return;
1968 }
1969
1970 static bool
1971 bstp_state_before_learning(struct bridge_iflist *bif)
1972 {
1973 if (bif->bif_flags & IFBIF_STP) {
1974 switch (bif->bif_state) {
1975 case BSTP_IFSTATE_BLOCKING:
1976 case BSTP_IFSTATE_LISTENING:
1977 case BSTP_IFSTATE_DISABLED:
1978 return true;
1979 }
1980 }
1981 return false;
1982 }
1983
1984 static bool
1985 bridge_ourether(struct bridge_iflist *bif, struct ether_header *eh, int src)
1986 {
1987 uint8_t *ether = src ? eh->ether_shost : eh->ether_dhost;
1988
1989 if (memcmp(CLLADDR(bif->bif_ifp->if_sadl), ether, ETHER_ADDR_LEN) == 0
1990 #if NCARP > 0
1991 || (bif->bif_ifp->if_carp &&
1992 carp_ourether(bif->bif_ifp->if_carp, eh, IFT_ETHER, src) != NULL)
1993 #endif /* NCARP > 0 */
1994 )
1995 return true;
1996
1997 return false;
1998 }
1999
2000 /*
2001 * bridge_input:
2002 *
2003 * Receive input from a member interface. Queue the packet for
2004 * bridging if it is not for us.
2005 */
2006 static void
2007 bridge_input(struct ifnet *ifp, struct mbuf *m)
2008 {
2009 struct bridge_softc *sc = ifp->if_bridge;
2010 struct bridge_iflist *bif;
2011 struct ether_header *eh;
2012 struct psref psref;
2013 int bound;
2014 DECLARE_LOCK_VARIABLE;
2015
2016 KASSERT(!cpu_intr_p());
2017
2018 if (__predict_false(sc == NULL) ||
2019 (sc->sc_if.if_flags & IFF_RUNNING) == 0) {
2020 ACQUIRE_GLOBAL_LOCKS();
2021 ether_input(ifp, m);
2022 RELEASE_GLOBAL_LOCKS();
2023 return;
2024 }
2025
2026 bound = curlwp_bind();
2027 bif = bridge_lookup_member_if(sc, ifp, &psref);
2028 if (bif == NULL) {
2029 curlwp_bindx(bound);
2030 ACQUIRE_GLOBAL_LOCKS();
2031 ether_input(ifp, m);
2032 RELEASE_GLOBAL_LOCKS();
2033 return;
2034 }
2035
2036 eh = mtod(m, struct ether_header *);
2037
2038 if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
2039 if (memcmp(etherbroadcastaddr,
2040 eh->ether_dhost, ETHER_ADDR_LEN) == 0)
2041 m->m_flags |= M_BCAST;
2042 else
2043 m->m_flags |= M_MCAST;
2044 }
2045
2046 /*
2047 * A 'fast' path for packets addressed to interfaces that are
2048 * part of this bridge.
2049 */
2050 if (!(m->m_flags & (M_BCAST|M_MCAST)) &&
2051 !bstp_state_before_learning(bif)) {
2052 struct bridge_iflist *_bif;
2053 struct ifnet *_ifp = NULL;
2054 int s;
2055 struct psref _psref;
2056
2057 BRIDGE_PSZ_RENTER(s);
2058 BRIDGE_IFLIST_READER_FOREACH(_bif, sc) {
2059 /* It is destined for us. */
2060 if (bridge_ourether(_bif, eh, 0)) {
2061 bridge_acquire_member(sc, _bif, &_psref);
2062 BRIDGE_PSZ_REXIT(s);
2063 if (_bif->bif_flags & IFBIF_LEARNING)
2064 (void) bridge_rtupdate(sc,
2065 eh->ether_shost, ifp, 0, IFBAF_DYNAMIC);
2066 m_set_rcvif(m, _bif->bif_ifp);
2067 _ifp = _bif->bif_ifp;
2068 bridge_release_member(sc, _bif, &_psref);
2069 goto out;
2070 }
2071
2072 /* We just received a packet that we sent out. */
2073 if (bridge_ourether(_bif, eh, 1))
2074 break;
2075 }
2076 BRIDGE_PSZ_REXIT(s);
2077 out:
2078
2079 if (_bif != NULL) {
2080 bridge_release_member(sc, bif, &psref);
2081 curlwp_bindx(bound);
2082 if (_ifp != NULL) {
2083 m->m_flags &= ~M_PROMISC;
2084 ACQUIRE_GLOBAL_LOCKS();
2085 ether_input(_ifp, m);
2086 RELEASE_GLOBAL_LOCKS();
2087 } else
2088 m_freem(m);
2089 return;
2090 }
2091 }
2092
2093 /* Tap off 802.1D packets; they do not get forwarded. */
2094 if (bif->bif_flags & IFBIF_STP &&
2095 memcmp(eh->ether_dhost, bstp_etheraddr, ETHER_ADDR_LEN) == 0) {
2096 bstp_input(sc, bif, m);
2097 bridge_release_member(sc, bif, &psref);
2098 curlwp_bindx(bound);
2099 return;
2100 }
2101
2102 /*
2103 * A normal switch would discard the packet here, but that's not what
2104 * we've done historically. This also prevents some obnoxious behaviour.
2105 */
2106 if (bstp_state_before_learning(bif)) {
2107 bridge_release_member(sc, bif, &psref);
2108 curlwp_bindx(bound);
2109 ACQUIRE_GLOBAL_LOCKS();
2110 ether_input(ifp, m);
2111 RELEASE_GLOBAL_LOCKS();
2112 return;
2113 }
2114
2115 bridge_release_member(sc, bif, &psref);
2116
2117 bridge_forward(sc, m);
2118
2119 curlwp_bindx(bound);
2120 }
2121
2122 /*
2123 * bridge_broadcast:
2124 *
2125 * Send a frame to all interfaces that are members of
2126 * the bridge, except for the one on which the packet
2127 * arrived.
2128 */
2129 static void
2130 bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if,
2131 bool src_if_protected, struct mbuf *m)
2132 {
2133 struct bridge_iflist *bif;
2134 struct mbuf *mc;
2135 struct ifnet *dst_if;
2136 bool bmcast;
2137 int s;
2138 DECLARE_LOCK_VARIABLE;
2139
2140 bmcast = m->m_flags & (M_BCAST|M_MCAST);
2141
2142 BRIDGE_PSZ_RENTER(s);
2143 BRIDGE_IFLIST_READER_FOREACH(bif, sc) {
2144 struct psref psref;
2145
2146 bridge_acquire_member(sc, bif, &psref);
2147 BRIDGE_PSZ_REXIT(s);
2148
2149 dst_if = bif->bif_ifp;
2150
2151 if (bif->bif_flags & IFBIF_STP) {
2152 switch (bif->bif_state) {
2153 case BSTP_IFSTATE_BLOCKING:
2154 case BSTP_IFSTATE_DISABLED:
2155 goto next;
2156 }
2157 }
2158
2159 if ((bif->bif_flags & IFBIF_DISCOVER) == 0 && !bmcast)
2160 goto next;
2161
2162 if ((dst_if->if_flags & IFF_RUNNING) == 0)
2163 goto next;
2164
2165 if (dst_if != src_if) {
2166 if ((bif->bif_flags & IFBIF_PROTECTED) &&
2167 src_if_protected) {
2168 goto next;
2169 }
2170
2171 mc = m_copypacket(m, M_DONTWAIT);
2172 if (mc == NULL) {
2173 if_statinc(&sc->sc_if, if_oerrors);
2174 goto next;
2175 }
2176 /*
2177 * Before enqueueing this packet to the destination
2178 * interface, clear any in-bound checksum flags to
2179 * prevent them from being misused as out-bound flags.
2180 */
2181 mc->m_pkthdr.csum_flags = 0;
2182
2183 ACQUIRE_GLOBAL_LOCKS();
2184 bridge_enqueue(sc, dst_if, mc, 1);
2185 RELEASE_GLOBAL_LOCKS();
2186 }
2187
2188 if (bmcast) {
2189 mc = m_copypacket(m, M_DONTWAIT);
2190 if (mc == NULL) {
2191 if_statinc(&sc->sc_if, if_oerrors);
2192 goto next;
2193 }
2194 /*
2195 * Before enqueueing this packet to the destination
2196 * interface, clear any in-bound checksum flags to
2197 * prevent them from being misused as out-bound flags.
2198 */
2199 mc->m_pkthdr.csum_flags = 0;
2200
2201 m_set_rcvif(mc, dst_if);
2202 mc->m_flags &= ~M_PROMISC;
2203
2204 ACQUIRE_GLOBAL_LOCKS();
2205 ether_input(dst_if, mc);
2206 RELEASE_GLOBAL_LOCKS();
2207 }
2208 next:
2209 BRIDGE_PSZ_RENTER(s);
2210 bridge_release_member(sc, bif, &psref);
2211 }
2212 BRIDGE_PSZ_REXIT(s);
2213
2214 m_freem(m);
2215 }
2216
2217 static int
2218 bridge_rtalloc(struct bridge_softc *sc, const uint8_t *dst,
2219 struct bridge_rtnode **brtp)
2220 {
2221 struct bridge_rtnode *brt;
2222 int error;
2223
2224 if (sc->sc_brtcnt >= sc->sc_brtmax)
2225 return ENOSPC;
2226
2227 /*
2228 * Allocate a new bridge forwarding node, and
2229 * initialize the expiration time and Ethernet
2230 * address.
2231 */
2232 brt = pool_get(&bridge_rtnode_pool, PR_NOWAIT);
2233 if (brt == NULL)
2234 return ENOMEM;
2235
2236 memset(brt, 0, sizeof(*brt));
2237 brt->brt_expire = time_uptime + sc->sc_brttimeout;
2238 brt->brt_flags = IFBAF_DYNAMIC;
2239 memcpy(brt->brt_addr, dst, ETHER_ADDR_LEN);
2240 PSLIST_ENTRY_INIT(brt, brt_list);
2241 PSLIST_ENTRY_INIT(brt, brt_hash);
2242
2243 BRIDGE_RT_LOCK(sc);
2244 error = bridge_rtnode_insert(sc, brt);
2245 BRIDGE_RT_UNLOCK(sc);
2246
2247 if (error != 0) {
2248 pool_put(&bridge_rtnode_pool, brt);
2249 return error;
2250 }
2251
2252 *brtp = brt;
2253 return 0;
2254 }
2255
2256 /*
2257 * bridge_rtupdate:
2258 *
2259 * Add a bridge routing entry.
2260 */
2261 static int
2262 bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst,
2263 struct ifnet *dst_if, int setflags, uint8_t flags)
2264 {
2265 struct bridge_rtnode *brt;
2266 int s;
2267
2268 again:
2269 /*
2270 * A route for this destination might already exist. If so,
2271 * update it, otherwise create a new one.
2272 */
2273 BRIDGE_RT_RENTER(s);
2274 brt = bridge_rtnode_lookup(sc, dst);
2275
2276 if (brt != NULL) {
2277 brt->brt_ifp = dst_if;
2278 if (setflags) {
2279 brt->brt_flags = flags;
2280 if (flags & IFBAF_STATIC)
2281 brt->brt_expire = 0;
2282 else
2283 brt->brt_expire = time_uptime + sc->sc_brttimeout;
2284 } else {
2285 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
2286 brt->brt_expire = time_uptime + sc->sc_brttimeout;
2287 }
2288 }
2289 BRIDGE_RT_REXIT(s);
2290
2291 if (brt == NULL) {
2292 int r;
2293
2294 r = bridge_rtalloc(sc, dst, &brt);
2295 if (r != 0)
2296 return r;
2297 goto again;
2298 }
2299
2300 return 0;
2301 }
2302
2303 /*
2304 * bridge_rtlookup:
2305 *
2306 * Lookup the destination interface for an address.
2307 */
2308 static struct ifnet *
2309 bridge_rtlookup(struct bridge_softc *sc, const uint8_t *addr)
2310 {
2311 struct bridge_rtnode *brt;
2312 struct ifnet *ifs = NULL;
2313 int s;
2314
2315 BRIDGE_RT_RENTER(s);
2316 brt = bridge_rtnode_lookup(sc, addr);
2317 if (brt != NULL)
2318 ifs = brt->brt_ifp;
2319 BRIDGE_RT_REXIT(s);
2320
2321 return ifs;
2322 }
2323
2324 typedef bool (*bridge_iterate_cb_t)
2325 (struct bridge_softc *, struct bridge_rtnode *, bool *, void *);
2326
2327 /*
2328 * bridge_rtlist_iterate_remove:
2329 *
2330 * It iterates on sc->sc_rtlist and removes rtnodes of it which func
2331 * callback judges to remove. Removals of rtnodes are done in a manner
2332 * of pserialize. To this end, all kmem_* operations are placed out of
2333 * mutexes.
2334 */
2335 static void
2336 bridge_rtlist_iterate_remove(struct bridge_softc *sc, bridge_iterate_cb_t func, void *arg)
2337 {
2338 struct bridge_rtnode *brt;
2339 struct bridge_rtnode **brt_list;
2340 int i, count;
2341
2342 retry:
2343 count = sc->sc_brtcnt;
2344 if (count == 0)
2345 return;
2346 brt_list = kmem_alloc(sizeof(*brt_list) * count, KM_SLEEP);
2347
2348 BRIDGE_RT_LOCK(sc);
2349 if (__predict_false(sc->sc_brtcnt > count)) {
2350 /* The rtnodes increased, we need more memory */
2351 BRIDGE_RT_UNLOCK(sc);
2352 kmem_free(brt_list, sizeof(*brt_list) * count);
2353 goto retry;
2354 }
2355
2356 i = 0;
2357 /*
2358 * We don't need to use a _SAFE variant here because we know
2359 * that a removed item keeps its next pointer as-is thanks to
2360 * pslist(9) and isn't freed in the loop.
2361 */
2362 BRIDGE_RTLIST_WRITER_FOREACH(brt, sc) {
2363 bool need_break = false;
2364 if (func(sc, brt, &need_break, arg)) {
2365 bridge_rtnode_remove(sc, brt);
2366 brt_list[i++] = brt;
2367 }
2368 if (need_break)
2369 break;
2370 }
2371
2372 if (i > 0)
2373 BRIDGE_RT_PSZ_PERFORM(sc);
2374 BRIDGE_RT_UNLOCK(sc);
2375
2376 while (--i >= 0)
2377 bridge_rtnode_destroy(brt_list[i]);
2378
2379 kmem_free(brt_list, sizeof(*brt_list) * count);
2380 }
2381
2382 static bool
2383 bridge_rttrim0_cb(struct bridge_softc *sc, struct bridge_rtnode *brt,
2384 bool *need_break, void *arg)
2385 {
2386 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
2387 /* Take into account of the subsequent removal */
2388 if ((sc->sc_brtcnt - 1) <= sc->sc_brtmax)
2389 *need_break = true;
2390 return true;
2391 } else
2392 return false;
2393 }
2394
2395 static void
2396 bridge_rttrim0(struct bridge_softc *sc)
2397 {
2398 bridge_rtlist_iterate_remove(sc, bridge_rttrim0_cb, NULL);
2399 }
2400
2401 /*
2402 * bridge_rttrim:
2403 *
2404 * Trim the routine table so that we have a number
2405 * of routing entries less than or equal to the
2406 * maximum number.
2407 */
2408 static void
2409 bridge_rttrim(struct bridge_softc *sc)
2410 {
2411
2412 /* Make sure we actually need to do this. */
2413 if (sc->sc_brtcnt <= sc->sc_brtmax)
2414 return;
2415
2416 /* Force an aging cycle; this might trim enough addresses. */
2417 bridge_rtage(sc);
2418 if (sc->sc_brtcnt <= sc->sc_brtmax)
2419 return;
2420
2421 bridge_rttrim0(sc);
2422
2423 return;
2424 }
2425
2426 /*
2427 * bridge_timer:
2428 *
2429 * Aging timer for the bridge.
2430 */
2431 static void
2432 bridge_timer(void *arg)
2433 {
2434 struct bridge_softc *sc = arg;
2435
2436 workqueue_enqueue(sc->sc_rtage_wq, &sc->sc_rtage_wk, NULL);
2437 }
2438
2439 static void
2440 bridge_rtage_work(struct work *wk, void *arg)
2441 {
2442 struct bridge_softc *sc = arg;
2443
2444 KASSERT(wk == &sc->sc_rtage_wk);
2445
2446 bridge_rtage(sc);
2447
2448 if (sc->sc_if.if_flags & IFF_RUNNING)
2449 callout_reset(&sc->sc_brcallout,
2450 bridge_rtable_prune_period * hz, bridge_timer, sc);
2451 }
2452
2453 static bool
2454 bridge_rtage_cb(struct bridge_softc *sc, struct bridge_rtnode *brt,
2455 bool *need_break, void *arg)
2456 {
2457 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
2458 time_uptime >= brt->brt_expire)
2459 return true;
2460 else
2461 return false;
2462 }
2463
2464 /*
2465 * bridge_rtage:
2466 *
2467 * Perform an aging cycle.
2468 */
2469 static void
2470 bridge_rtage(struct bridge_softc *sc)
2471 {
2472 bridge_rtlist_iterate_remove(sc, bridge_rtage_cb, NULL);
2473 }
2474
2475
2476 static bool
2477 bridge_rtflush_cb(struct bridge_softc *sc, struct bridge_rtnode *brt,
2478 bool *need_break, void *arg)
2479 {
2480 int full = *(int*)arg;
2481
2482 if (full || (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
2483 return true;
2484 else
2485 return false;
2486 }
2487
2488 /*
2489 * bridge_rtflush:
2490 *
2491 * Remove all dynamic addresses from the bridge.
2492 */
2493 static void
2494 bridge_rtflush(struct bridge_softc *sc, int full)
2495 {
2496 bridge_rtlist_iterate_remove(sc, bridge_rtflush_cb, &full);
2497 }
2498
2499 /*
2500 * bridge_rtdaddr:
2501 *
2502 * Remove an address from the table.
2503 */
2504 static int
2505 bridge_rtdaddr(struct bridge_softc *sc, const uint8_t *addr)
2506 {
2507 struct bridge_rtnode *brt;
2508
2509 BRIDGE_RT_LOCK(sc);
2510 if ((brt = bridge_rtnode_lookup(sc, addr)) == NULL) {
2511 BRIDGE_RT_UNLOCK(sc);
2512 return ENOENT;
2513 }
2514 bridge_rtnode_remove(sc, brt);
2515 BRIDGE_RT_PSZ_PERFORM(sc);
2516 BRIDGE_RT_UNLOCK(sc);
2517
2518 bridge_rtnode_destroy(brt);
2519
2520 return 0;
2521 }
2522
2523 /*
2524 * bridge_rtdelete:
2525 *
2526 * Delete routes to a speicifc member interface.
2527 */
2528 static void
2529 bridge_rtdelete(struct bridge_softc *sc, struct ifnet *ifp)
2530 {
2531 struct bridge_rtnode *brt;
2532
2533 /* XXX pserialize_perform for each entry is slow */
2534 again:
2535 BRIDGE_RT_LOCK(sc);
2536 BRIDGE_RTLIST_WRITER_FOREACH(brt, sc) {
2537 if (brt->brt_ifp == ifp)
2538 break;
2539 }
2540 if (brt == NULL) {
2541 BRIDGE_RT_UNLOCK(sc);
2542 return;
2543 }
2544 bridge_rtnode_remove(sc, brt);
2545 BRIDGE_RT_PSZ_PERFORM(sc);
2546 BRIDGE_RT_UNLOCK(sc);
2547
2548 bridge_rtnode_destroy(brt);
2549
2550 goto again;
2551 }
2552
2553 /*
2554 * bridge_rtable_init:
2555 *
2556 * Initialize the route table for this bridge.
2557 */
2558 static void
2559 bridge_rtable_init(struct bridge_softc *sc)
2560 {
2561 int i;
2562
2563 sc->sc_rthash = kmem_alloc(sizeof(*sc->sc_rthash) * BRIDGE_RTHASH_SIZE,
2564 KM_SLEEP);
2565
2566 for (i = 0; i < BRIDGE_RTHASH_SIZE; i++)
2567 PSLIST_INIT(&sc->sc_rthash[i]);
2568
2569 sc->sc_rthash_key = cprng_fast32();
2570
2571 PSLIST_INIT(&sc->sc_rtlist);
2572
2573 sc->sc_rtlist_psz = pserialize_create();
2574 sc->sc_rtlist_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET);
2575 }
2576
2577 /*
2578 * bridge_rtable_fini:
2579 *
2580 * Deconstruct the route table for this bridge.
2581 */
2582 static void
2583 bridge_rtable_fini(struct bridge_softc *sc)
2584 {
2585
2586 kmem_free(sc->sc_rthash, sizeof(*sc->sc_rthash) * BRIDGE_RTHASH_SIZE);
2587 mutex_obj_free(sc->sc_rtlist_lock);
2588 pserialize_destroy(sc->sc_rtlist_psz);
2589 }
2590
2591 /*
2592 * The following hash function is adapted from "Hash Functions" by Bob Jenkins
2593 * ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
2594 */
2595 #define mix(a, b, c) \
2596 do { \
2597 a -= b; a -= c; a ^= (c >> 13); \
2598 b -= c; b -= a; b ^= (a << 8); \
2599 c -= a; c -= b; c ^= (b >> 13); \
2600 a -= b; a -= c; a ^= (c >> 12); \
2601 b -= c; b -= a; b ^= (a << 16); \
2602 c -= a; c -= b; c ^= (b >> 5); \
2603 a -= b; a -= c; a ^= (c >> 3); \
2604 b -= c; b -= a; b ^= (a << 10); \
2605 c -= a; c -= b; c ^= (b >> 15); \
2606 } while (/*CONSTCOND*/0)
2607
2608 static inline uint32_t
2609 bridge_rthash(struct bridge_softc *sc, const uint8_t *addr)
2610 {
2611 uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = sc->sc_rthash_key;
2612
2613 b += addr[5] << 8;
2614 b += addr[4];
2615 a += (uint32_t)addr[3] << 24;
2616 a += addr[2] << 16;
2617 a += addr[1] << 8;
2618 a += addr[0];
2619
2620 mix(a, b, c);
2621
2622 return (c & BRIDGE_RTHASH_MASK);
2623 }
2624
2625 #undef mix
2626
2627 /*
2628 * bridge_rtnode_lookup:
2629 *
2630 * Look up a bridge route node for the specified destination.
2631 */
2632 static struct bridge_rtnode *
2633 bridge_rtnode_lookup(struct bridge_softc *sc, const uint8_t *addr)
2634 {
2635 struct bridge_rtnode *brt;
2636 uint32_t hash;
2637 int dir;
2638
2639 hash = bridge_rthash(sc, addr);
2640 BRIDGE_RTHASH_READER_FOREACH(brt, sc, hash) {
2641 dir = memcmp(addr, brt->brt_addr, ETHER_ADDR_LEN);
2642 if (dir == 0)
2643 return brt;
2644 if (dir > 0)
2645 return NULL;
2646 }
2647
2648 return NULL;
2649 }
2650
2651 /*
2652 * bridge_rtnode_insert:
2653 *
2654 * Insert the specified bridge node into the route table. We
2655 * assume the entry is not already in the table.
2656 */
2657 static int
2658 bridge_rtnode_insert(struct bridge_softc *sc, struct bridge_rtnode *brt)
2659 {
2660 struct bridge_rtnode *lbrt, *prev = NULL;
2661 uint32_t hash;
2662
2663 KASSERT(BRIDGE_RT_LOCKED(sc));
2664
2665 hash = bridge_rthash(sc, brt->brt_addr);
2666 BRIDGE_RTHASH_WRITER_FOREACH(lbrt, sc, hash) {
2667 int dir = memcmp(brt->brt_addr, lbrt->brt_addr, ETHER_ADDR_LEN);
2668 if (dir == 0)
2669 return EEXIST;
2670 if (dir > 0)
2671 break;
2672 prev = lbrt;
2673 }
2674 if (prev == NULL)
2675 BRIDGE_RTHASH_WRITER_INSERT_HEAD(sc, hash, brt);
2676 else
2677 BRIDGE_RTHASH_WRITER_INSERT_AFTER(prev, brt);
2678
2679 BRIDGE_RTLIST_WRITER_INSERT_HEAD(sc, brt);
2680 sc->sc_brtcnt++;
2681
2682 return 0;
2683 }
2684
2685 /*
2686 * bridge_rtnode_remove:
2687 *
2688 * Remove a bridge rtnode from the rthash and the rtlist of a bridge.
2689 */
2690 static void
2691 bridge_rtnode_remove(struct bridge_softc *sc, struct bridge_rtnode *brt)
2692 {
2693
2694 KASSERT(BRIDGE_RT_LOCKED(sc));
2695
2696 BRIDGE_RTHASH_WRITER_REMOVE(brt);
2697 BRIDGE_RTLIST_WRITER_REMOVE(brt);
2698 sc->sc_brtcnt--;
2699 }
2700
2701 /*
2702 * bridge_rtnode_destroy:
2703 *
2704 * Destroy a bridge rtnode.
2705 */
2706 static void
2707 bridge_rtnode_destroy(struct bridge_rtnode *brt)
2708 {
2709
2710 PSLIST_ENTRY_DESTROY(brt, brt_list);
2711 PSLIST_ENTRY_DESTROY(brt, brt_hash);
2712 pool_put(&bridge_rtnode_pool, brt);
2713 }
2714
2715 extern pfil_head_t *inet_pfil_hook; /* XXX */
2716 extern pfil_head_t *inet6_pfil_hook; /* XXX */
2717
2718 /*
2719 * Send bridge packets through IPF if they are one of the types IPF can deal
2720 * with, or if they are ARP or REVARP. (IPF will pass ARP and REVARP without
2721 * question.)
2722 */
2723 static int
2724 bridge_ipf(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
2725 {
2726 int snap, error;
2727 struct ether_header *eh1, eh2;
2728 struct llc llc1;
2729 uint16_t ether_type;
2730
2731 snap = 0;
2732 error = -1; /* Default error if not error == 0 */
2733 eh1 = mtod(*mp, struct ether_header *);
2734 ether_type = ntohs(eh1->ether_type);
2735
2736 /*
2737 * Check for SNAP/LLC.
2738 */
2739 if (ether_type < ETHERMTU) {
2740 struct llc *llc2 = (struct llc *)(eh1 + 1);
2741
2742 if ((*mp)->m_len >= ETHER_HDR_LEN + 8 &&
2743 llc2->llc_dsap == LLC_SNAP_LSAP &&
2744 llc2->llc_ssap == LLC_SNAP_LSAP &&
2745 llc2->llc_control == LLC_UI) {
2746 ether_type = htons(llc2->llc_un.type_snap.ether_type);
2747 snap = 1;
2748 }
2749 }
2750
2751 /* drop VLAN traffic untagged by hardware offloading */
2752 if (vlan_has_tag(*mp))
2753 goto bad;
2754
2755 /*
2756 * If we're trying to filter bridge traffic, don't look at anything
2757 * other than IP and ARP traffic. If the filter doesn't understand
2758 * IPv6, don't allow IPv6 through the bridge either. This is lame
2759 * since if we really wanted, say, an AppleTalk filter, we are hosed,
2760 * but of course we don't have an AppleTalk filter to begin with.
2761 * (Note that since IPF doesn't understand ARP it will pass *ALL*
2762 * ARP traffic.)
2763 */
2764 switch (ether_type) {
2765 case ETHERTYPE_ARP:
2766 case ETHERTYPE_REVARP:
2767 return 0; /* Automatically pass */
2768 case ETHERTYPE_IP:
2769 # ifdef INET6
2770 case ETHERTYPE_IPV6:
2771 # endif /* INET6 */
2772 break;
2773 default:
2774 goto bad;
2775 }
2776
2777 /* Strip off the Ethernet header and keep a copy. */
2778 m_copydata(*mp, 0, ETHER_HDR_LEN, (void *) &eh2);
2779 m_adj(*mp, ETHER_HDR_LEN);
2780
2781 /* Strip off snap header, if present */
2782 if (snap) {
2783 m_copydata(*mp, 0, sizeof(struct llc), (void *) &llc1);
2784 m_adj(*mp, sizeof(struct llc));
2785 }
2786
2787 /*
2788 * Check basic packet sanity and run IPF through pfil.
2789 */
2790 KASSERT(!cpu_intr_p());
2791 switch (ether_type)
2792 {
2793 case ETHERTYPE_IP :
2794 error = bridge_ip_checkbasic(mp);
2795 if (error == 0)
2796 error = pfil_run_hooks(inet_pfil_hook, mp, ifp, dir);
2797 break;
2798 # ifdef INET6
2799 case ETHERTYPE_IPV6 :
2800 error = bridge_ip6_checkbasic(mp);
2801 if (error == 0)
2802 error = pfil_run_hooks(inet6_pfil_hook, mp, ifp, dir);
2803 break;
2804 # endif
2805 default :
2806 error = 0;
2807 break;
2808 }
2809
2810 if (*mp == NULL)
2811 return error;
2812 if (error != 0)
2813 goto bad;
2814
2815 error = -1;
2816
2817 /*
2818 * Finally, put everything back the way it was and return
2819 */
2820 if (snap) {
2821 M_PREPEND(*mp, sizeof(struct llc), M_DONTWAIT);
2822 if (*mp == NULL)
2823 return error;
2824 bcopy(&llc1, mtod(*mp, void *), sizeof(struct llc));
2825 }
2826
2827 M_PREPEND(*mp, ETHER_HDR_LEN, M_DONTWAIT);
2828 if (*mp == NULL)
2829 return error;
2830 bcopy(&eh2, mtod(*mp, void *), ETHER_HDR_LEN);
2831
2832 return 0;
2833
2834 bad:
2835 m_freem(*mp);
2836 *mp = NULL;
2837 return error;
2838 }
2839
2840 /*
2841 * Perform basic checks on header size since
2842 * IPF assumes ip_input has already processed
2843 * it for it. Cut-and-pasted from ip_input.c.
2844 * Given how simple the IPv6 version is,
2845 * does the IPv4 version really need to be
2846 * this complicated?
2847 *
2848 * XXX Should we update ipstat here, or not?
2849 * XXX Right now we update ipstat but not
2850 * XXX csum_counter.
2851 */
2852 static int
2853 bridge_ip_checkbasic(struct mbuf **mp)
2854 {
2855 struct mbuf *m = *mp;
2856 struct ip *ip;
2857 int len, hlen;
2858
2859 if (*mp == NULL)
2860 return -1;
2861
2862 if (M_GET_ALIGNED_HDR(&m, struct ip, true) != 0) {
2863 /* XXXJRT new stat, please */
2864 ip_statinc(IP_STAT_TOOSMALL);
2865 goto bad;
2866 }
2867 ip = mtod(m, struct ip *);
2868 if (ip == NULL) goto bad;
2869
2870 if (ip->ip_v != IPVERSION) {
2871 ip_statinc(IP_STAT_BADVERS);
2872 goto bad;
2873 }
2874 hlen = ip->ip_hl << 2;
2875 if (hlen < sizeof(struct ip)) { /* minimum header length */
2876 ip_statinc(IP_STAT_BADHLEN);
2877 goto bad;
2878 }
2879 if (hlen > m->m_len) {
2880 if ((m = m_pullup(m, hlen)) == 0) {
2881 ip_statinc(IP_STAT_BADHLEN);
2882 goto bad;
2883 }
2884 ip = mtod(m, struct ip *);
2885 if (ip == NULL) goto bad;
2886 }
2887
2888 switch (m->m_pkthdr.csum_flags &
2889 ((m_get_rcvif_NOMPSAFE(m)->if_csum_flags_rx & M_CSUM_IPv4) |
2890 M_CSUM_IPv4_BAD)) {
2891 case M_CSUM_IPv4|M_CSUM_IPv4_BAD:
2892 /* INET_CSUM_COUNTER_INCR(&ip_hwcsum_bad); */
2893 goto bad;
2894
2895 case M_CSUM_IPv4:
2896 /* Checksum was okay. */
2897 /* INET_CSUM_COUNTER_INCR(&ip_hwcsum_ok); */
2898 break;
2899
2900 default:
2901 /* Must compute it ourselves. */
2902 /* INET_CSUM_COUNTER_INCR(&ip_swcsum); */
2903 if (in_cksum(m, hlen) != 0)
2904 goto bad;
2905 break;
2906 }
2907
2908 /* Retrieve the packet length. */
2909 len = ntohs(ip->ip_len);
2910
2911 /*
2912 * Check for additional length bogosity
2913 */
2914 if (len < hlen) {
2915 ip_statinc(IP_STAT_BADLEN);
2916 goto bad;
2917 }
2918
2919 /*
2920 * Check that the amount of data in the buffers
2921 * is as at least much as the IP header would have us expect.
2922 * Drop packet if shorter than we expect.
2923 */
2924 if (m->m_pkthdr.len < len) {
2925 ip_statinc(IP_STAT_TOOSHORT);
2926 goto bad;
2927 }
2928
2929 /* Checks out, proceed */
2930 *mp = m;
2931 return 0;
2932
2933 bad:
2934 *mp = m;
2935 return -1;
2936 }
2937
2938 # ifdef INET6
2939 /*
2940 * Same as above, but for IPv6.
2941 * Cut-and-pasted from ip6_input.c.
2942 * XXX Should we update ip6stat, or not?
2943 */
2944 static int
2945 bridge_ip6_checkbasic(struct mbuf **mp)
2946 {
2947 struct mbuf *m = *mp;
2948 struct ip6_hdr *ip6;
2949
2950 /*
2951 * If the IPv6 header is not aligned, slurp it up into a new
2952 * mbuf with space for link headers, in the event we forward
2953 * it. Otherwise, if it is aligned, make sure the entire base
2954 * IPv6 header is in the first mbuf of the chain.
2955 */
2956 if (M_GET_ALIGNED_HDR(&m, struct ip6_hdr, true) != 0) {
2957 struct ifnet *inifp = m_get_rcvif_NOMPSAFE(m);
2958 /* XXXJRT new stat, please */
2959 ip6_statinc(IP6_STAT_TOOSMALL);
2960 in6_ifstat_inc(inifp, ifs6_in_hdrerr);
2961 goto bad;
2962 }
2963
2964 ip6 = mtod(m, struct ip6_hdr *);
2965
2966 if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
2967 ip6_statinc(IP6_STAT_BADVERS);
2968 in6_ifstat_inc(m_get_rcvif_NOMPSAFE(m), ifs6_in_hdrerr);
2969 goto bad;
2970 }
2971
2972 /* Checks out, proceed */
2973 *mp = m;
2974 return 0;
2975
2976 bad:
2977 *mp = m;
2978 return -1;
2979 }
2980 # endif /* INET6 */
2981