if_bridge.c revision 1.179 1 /* $NetBSD: if_bridge.c,v 1.179 2021/02/19 14:51:59 christos Exp $ */
2
3 /*
4 * Copyright 2001 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Copyright (c) 1999, 2000 Jason L. Wright (jason (at) thought.net)
40 * All rights reserved.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. All advertising materials mentioning features or use of this software
51 * must display the following acknowledgement:
52 * This product includes software developed by Jason L. Wright
53 * 4. The name of the author may not be used to endorse or promote products
54 * derived from this software without specific prior written permission.
55 *
56 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
57 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
58 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
59 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
60 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
61 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
62 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
64 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
65 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
66 * POSSIBILITY OF SUCH DAMAGE.
67 *
68 * OpenBSD: if_bridge.c,v 1.60 2001/06/15 03:38:33 itojun Exp
69 */
70
71 /*
72 * Network interface bridge support.
73 *
74 * TODO:
75 *
76 * - Currently only supports Ethernet-like interfaces (Ethernet,
77 * 802.11, VLANs on Ethernet, etc.) Figure out a nice way
78 * to bridge other types of interfaces (FDDI-FDDI, and maybe
79 * consider heterogenous bridges).
80 */
81
82 #include <sys/cdefs.h>
83 __KERNEL_RCSID(0, "$NetBSD: if_bridge.c,v 1.179 2021/02/19 14:51:59 christos Exp $");
84
85 #ifdef _KERNEL_OPT
86 #include "opt_inet.h"
87 #include "opt_net_mpsafe.h"
88 #endif /* _KERNEL_OPT */
89
90 #include <sys/param.h>
91 #include <sys/kernel.h>
92 #include <sys/mbuf.h>
93 #include <sys/queue.h>
94 #include <sys/socket.h>
95 #include <sys/socketvar.h> /* for softnet_lock */
96 #include <sys/sockio.h>
97 #include <sys/systm.h>
98 #include <sys/proc.h>
99 #include <sys/pool.h>
100 #include <sys/kauth.h>
101 #include <sys/cpu.h>
102 #include <sys/cprng.h>
103 #include <sys/mutex.h>
104 #include <sys/kmem.h>
105
106 #include <net/bpf.h>
107 #include <net/if.h>
108 #include <net/if_dl.h>
109 #include <net/if_types.h>
110 #include <net/if_llc.h>
111
112 #include <net/if_ether.h>
113 #include <net/if_bridgevar.h>
114 #include <net/ether_sw_offload.h>
115
116 /* Used for bridge_ip[6]_checkbasic */
117 #include <netinet/in.h>
118 #include <netinet/in_systm.h>
119 #include <netinet/ip.h>
120 #include <netinet/ip_var.h>
121 #include <netinet/ip_private.h> /* XXX */
122 #include <netinet/ip6.h>
123 #include <netinet6/in6_var.h>
124 #include <netinet6/ip6_var.h>
125 #include <netinet6/ip6_private.h> /* XXX */
126
127 /*
128 * Size of the route hash table. Must be a power of two.
129 */
130 #ifndef BRIDGE_RTHASH_SIZE
131 #define BRIDGE_RTHASH_SIZE 1024
132 #endif
133
134 #define BRIDGE_RTHASH_MASK (BRIDGE_RTHASH_SIZE - 1)
135
136 #include "carp.h"
137 #if NCARP > 0
138 #include <netinet/in.h>
139 #include <netinet/in_var.h>
140 #include <netinet/ip_carp.h>
141 #endif
142
143 #include "ioconf.h"
144
145 __CTASSERT(sizeof(struct ifbifconf) == sizeof(struct ifbaconf));
146 __CTASSERT(offsetof(struct ifbifconf, ifbic_len) == offsetof(struct ifbaconf, ifbac_len));
147 __CTASSERT(offsetof(struct ifbifconf, ifbic_buf) == offsetof(struct ifbaconf, ifbac_buf));
148
149 /*
150 * Maximum number of addresses to cache.
151 */
152 #ifndef BRIDGE_RTABLE_MAX
153 #define BRIDGE_RTABLE_MAX 100
154 #endif
155
156 /*
157 * Spanning tree defaults.
158 */
159 #define BSTP_DEFAULT_MAX_AGE (20 * 256)
160 #define BSTP_DEFAULT_HELLO_TIME (2 * 256)
161 #define BSTP_DEFAULT_FORWARD_DELAY (15 * 256)
162 #define BSTP_DEFAULT_HOLD_TIME (1 * 256)
163 #define BSTP_DEFAULT_BRIDGE_PRIORITY 0x8000
164 #define BSTP_DEFAULT_PORT_PRIORITY 0x80
165 #define BSTP_DEFAULT_PATH_COST 55
166
167 /*
168 * Timeout (in seconds) for entries learned dynamically.
169 */
170 #ifndef BRIDGE_RTABLE_TIMEOUT
171 #define BRIDGE_RTABLE_TIMEOUT (20 * 60) /* same as ARP */
172 #endif
173
174 /*
175 * Number of seconds between walks of the route list.
176 */
177 #ifndef BRIDGE_RTABLE_PRUNE_PERIOD
178 #define BRIDGE_RTABLE_PRUNE_PERIOD (5 * 60)
179 #endif
180
181 #define BRIDGE_RT_LOCK(_sc) mutex_enter((_sc)->sc_rtlist_lock)
182 #define BRIDGE_RT_UNLOCK(_sc) mutex_exit((_sc)->sc_rtlist_lock)
183 #define BRIDGE_RT_LOCKED(_sc) mutex_owned((_sc)->sc_rtlist_lock)
184
185 #define BRIDGE_RT_PSZ_PERFORM(_sc) \
186 pserialize_perform((_sc)->sc_rtlist_psz)
187
188 #define BRIDGE_RT_RENTER(__s) do { __s = pserialize_read_enter(); } while (0)
189 #define BRIDGE_RT_REXIT(__s) do { pserialize_read_exit(__s); } while (0)
190
191 #define BRIDGE_RTLIST_READER_FOREACH(_brt, _sc) \
192 PSLIST_READER_FOREACH((_brt), &((_sc)->sc_rtlist), \
193 struct bridge_rtnode, brt_list)
194 #define BRIDGE_RTLIST_WRITER_FOREACH(_brt, _sc) \
195 PSLIST_WRITER_FOREACH((_brt), &((_sc)->sc_rtlist), \
196 struct bridge_rtnode, brt_list)
197 #define BRIDGE_RTLIST_WRITER_INSERT_HEAD(_sc, _brt) \
198 PSLIST_WRITER_INSERT_HEAD(&(_sc)->sc_rtlist, brt, brt_list)
199 #define BRIDGE_RTLIST_WRITER_REMOVE(_brt) \
200 PSLIST_WRITER_REMOVE((_brt), brt_list)
201
202 #define BRIDGE_RTHASH_READER_FOREACH(_brt, _sc, _hash) \
203 PSLIST_READER_FOREACH((_brt), &(_sc)->sc_rthash[(_hash)], \
204 struct bridge_rtnode, brt_hash)
205 #define BRIDGE_RTHASH_WRITER_FOREACH(_brt, _sc, _hash) \
206 PSLIST_WRITER_FOREACH((_brt), &(_sc)->sc_rthash[(_hash)], \
207 struct bridge_rtnode, brt_hash)
208 #define BRIDGE_RTHASH_WRITER_INSERT_HEAD(_sc, _hash, _brt) \
209 PSLIST_WRITER_INSERT_HEAD(&(_sc)->sc_rthash[(_hash)], brt, brt_hash)
210 #define BRIDGE_RTHASH_WRITER_INSERT_AFTER(_brt, _new) \
211 PSLIST_WRITER_INSERT_AFTER((_brt), (_new), brt_hash)
212 #define BRIDGE_RTHASH_WRITER_REMOVE(_brt) \
213 PSLIST_WRITER_REMOVE((_brt), brt_hash)
214
215 #ifdef NET_MPSAFE
216 #define DECLARE_LOCK_VARIABLE
217 #define ACQUIRE_GLOBAL_LOCKS() do { } while (0)
218 #define RELEASE_GLOBAL_LOCKS() do { } while (0)
219 #else
220 #define DECLARE_LOCK_VARIABLE int __s
221 #define ACQUIRE_GLOBAL_LOCKS() do { \
222 KERNEL_LOCK(1, NULL); \
223 mutex_enter(softnet_lock); \
224 __s = splsoftnet(); \
225 } while (0)
226 #define RELEASE_GLOBAL_LOCKS() do { \
227 splx(__s); \
228 mutex_exit(softnet_lock); \
229 KERNEL_UNLOCK_ONE(NULL); \
230 } while (0)
231 #endif
232
233 struct psref_class *bridge_psref_class __read_mostly;
234
235 int bridge_rtable_prune_period = BRIDGE_RTABLE_PRUNE_PERIOD;
236
237 static struct pool bridge_rtnode_pool;
238
239 static int bridge_clone_create(struct if_clone *, int);
240 static int bridge_clone_destroy(struct ifnet *);
241
242 static int bridge_ioctl(struct ifnet *, u_long, void *);
243 static int bridge_init(struct ifnet *);
244 static void bridge_stop(struct ifnet *, int);
245 static void bridge_start(struct ifnet *);
246
247 static void bridge_input(struct ifnet *, struct mbuf *);
248 static void bridge_forward(struct bridge_softc *, struct mbuf *);
249
250 static void bridge_timer(void *);
251
252 static void bridge_broadcast(struct bridge_softc *, struct ifnet *,
253 struct mbuf *);
254
255 static int bridge_rtupdate(struct bridge_softc *, const uint8_t *,
256 struct ifnet *, int, uint8_t);
257 static struct ifnet *bridge_rtlookup(struct bridge_softc *, const uint8_t *);
258 static void bridge_rttrim(struct bridge_softc *);
259 static void bridge_rtage(struct bridge_softc *);
260 static void bridge_rtage_work(struct work *, void *);
261 static void bridge_rtflush(struct bridge_softc *, int);
262 static int bridge_rtdaddr(struct bridge_softc *, const uint8_t *);
263 static void bridge_rtdelete(struct bridge_softc *, struct ifnet *ifp);
264
265 static void bridge_rtable_init(struct bridge_softc *);
266 static void bridge_rtable_fini(struct bridge_softc *);
267
268 static struct bridge_rtnode *bridge_rtnode_lookup(struct bridge_softc *,
269 const uint8_t *);
270 static int bridge_rtnode_insert(struct bridge_softc *,
271 struct bridge_rtnode *);
272 static void bridge_rtnode_remove(struct bridge_softc *,
273 struct bridge_rtnode *);
274 static void bridge_rtnode_destroy(struct bridge_rtnode *);
275
276 static struct bridge_iflist *bridge_lookup_member(struct bridge_softc *,
277 const char *name,
278 struct psref *);
279 static struct bridge_iflist *bridge_lookup_member_if(struct bridge_softc *,
280 struct ifnet *ifp,
281 struct psref *);
282 static void bridge_release_member(struct bridge_softc *, struct bridge_iflist *,
283 struct psref *);
284 static void bridge_delete_member(struct bridge_softc *,
285 struct bridge_iflist *);
286 static void bridge_acquire_member(struct bridge_softc *sc,
287 struct bridge_iflist *,
288 struct psref *);
289
290 static int bridge_ioctl_add(struct bridge_softc *, void *);
291 static int bridge_ioctl_del(struct bridge_softc *, void *);
292 static int bridge_ioctl_gifflags(struct bridge_softc *, void *);
293 static int bridge_ioctl_sifflags(struct bridge_softc *, void *);
294 static int bridge_ioctl_scache(struct bridge_softc *, void *);
295 static int bridge_ioctl_gcache(struct bridge_softc *, void *);
296 static int bridge_ioctl_gifs(struct bridge_softc *, void *);
297 static int bridge_ioctl_rts(struct bridge_softc *, void *);
298 static int bridge_ioctl_saddr(struct bridge_softc *, void *);
299 static int bridge_ioctl_sto(struct bridge_softc *, void *);
300 static int bridge_ioctl_gto(struct bridge_softc *, void *);
301 static int bridge_ioctl_daddr(struct bridge_softc *, void *);
302 static int bridge_ioctl_flush(struct bridge_softc *, void *);
303 static int bridge_ioctl_gpri(struct bridge_softc *, void *);
304 static int bridge_ioctl_spri(struct bridge_softc *, void *);
305 static int bridge_ioctl_ght(struct bridge_softc *, void *);
306 static int bridge_ioctl_sht(struct bridge_softc *, void *);
307 static int bridge_ioctl_gfd(struct bridge_softc *, void *);
308 static int bridge_ioctl_sfd(struct bridge_softc *, void *);
309 static int bridge_ioctl_gma(struct bridge_softc *, void *);
310 static int bridge_ioctl_sma(struct bridge_softc *, void *);
311 static int bridge_ioctl_sifprio(struct bridge_softc *, void *);
312 static int bridge_ioctl_sifcost(struct bridge_softc *, void *);
313 static int bridge_ioctl_gfilt(struct bridge_softc *, void *);
314 static int bridge_ioctl_sfilt(struct bridge_softc *, void *);
315 static int bridge_ipf(void *, struct mbuf **, struct ifnet *, int);
316 static int bridge_ip_checkbasic(struct mbuf **mp);
317 # ifdef INET6
318 static int bridge_ip6_checkbasic(struct mbuf **mp);
319 # endif /* INET6 */
320
321 struct bridge_control {
322 int (*bc_func)(struct bridge_softc *, void *);
323 int bc_argsize;
324 int bc_flags;
325 };
326
327 #define BC_F_COPYIN 0x01 /* copy arguments in */
328 #define BC_F_COPYOUT 0x02 /* copy arguments out */
329 #define BC_F_SUSER 0x04 /* do super-user check */
330 #define BC_F_XLATEIN 0x08 /* xlate arguments in */
331 #define BC_F_XLATEOUT 0x10 /* xlate arguments out */
332
333 static const struct bridge_control bridge_control_table[] = {
334 [BRDGADD] = {bridge_ioctl_add, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
335 [BRDGDEL] = {bridge_ioctl_del, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
336
337 [BRDGGIFFLGS] = {bridge_ioctl_gifflags, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_COPYOUT},
338 [BRDGSIFFLGS] = {bridge_ioctl_sifflags, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
339
340 [BRDGSCACHE] = {bridge_ioctl_scache, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
341 [BRDGGCACHE] = {bridge_ioctl_gcache, sizeof(struct ifbrparam), BC_F_COPYOUT},
342
343 [OBRDGGIFS] = {bridge_ioctl_gifs, sizeof(struct ifbifconf), BC_F_COPYIN|BC_F_COPYOUT},
344 [OBRDGRTS] = {bridge_ioctl_rts, sizeof(struct ifbaconf), BC_F_COPYIN|BC_F_COPYOUT},
345
346 [BRDGSADDR] = {bridge_ioctl_saddr, sizeof(struct ifbareq), BC_F_COPYIN|BC_F_SUSER},
347
348 [BRDGSTO] = {bridge_ioctl_sto, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
349 [BRDGGTO] = {bridge_ioctl_gto, sizeof(struct ifbrparam), BC_F_COPYOUT},
350
351 [BRDGDADDR] = {bridge_ioctl_daddr, sizeof(struct ifbareq), BC_F_COPYIN|BC_F_SUSER},
352
353 [BRDGFLUSH] = {bridge_ioctl_flush, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
354
355 [BRDGGPRI] = {bridge_ioctl_gpri, sizeof(struct ifbrparam), BC_F_COPYOUT},
356 [BRDGSPRI] = {bridge_ioctl_spri, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
357
358 [BRDGGHT] = {bridge_ioctl_ght, sizeof(struct ifbrparam), BC_F_COPYOUT},
359 [BRDGSHT] = {bridge_ioctl_sht, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
360
361 [BRDGGFD] = {bridge_ioctl_gfd, sizeof(struct ifbrparam), BC_F_COPYOUT},
362 [BRDGSFD] = {bridge_ioctl_sfd, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
363
364 [BRDGGMA] = {bridge_ioctl_gma, sizeof(struct ifbrparam), BC_F_COPYOUT},
365 [BRDGSMA] = {bridge_ioctl_sma, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
366
367 [BRDGSIFPRIO] = {bridge_ioctl_sifprio, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
368
369 [BRDGSIFCOST] = {bridge_ioctl_sifcost, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
370
371 [BRDGGFILT] = {bridge_ioctl_gfilt, sizeof(struct ifbrparam), BC_F_COPYOUT},
372 [BRDGSFILT] = {bridge_ioctl_sfilt, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
373
374 [BRDGGIFS] = {bridge_ioctl_gifs, sizeof(struct ifbifconf), BC_F_XLATEIN|BC_F_XLATEOUT},
375 [BRDGRTS] = {bridge_ioctl_rts, sizeof(struct ifbaconf), BC_F_XLATEIN|BC_F_XLATEOUT},
376 };
377
378 static const int bridge_control_table_size = __arraycount(bridge_control_table);
379
380 static struct if_clone bridge_cloner =
381 IF_CLONE_INITIALIZER("bridge", bridge_clone_create, bridge_clone_destroy);
382
383 /*
384 * bridgeattach:
385 *
386 * Pseudo-device attach routine.
387 */
388 void
389 bridgeattach(int n)
390 {
391
392 pool_init(&bridge_rtnode_pool, sizeof(struct bridge_rtnode),
393 0, 0, 0, "brtpl", NULL, IPL_NET);
394
395 bridge_psref_class = psref_class_create("bridge", IPL_SOFTNET);
396
397 if_clone_attach(&bridge_cloner);
398 }
399
400 /*
401 * bridge_clone_create:
402 *
403 * Create a new bridge instance.
404 */
405 static int
406 bridge_clone_create(struct if_clone *ifc, int unit)
407 {
408 struct bridge_softc *sc;
409 struct ifnet *ifp;
410 int error;
411
412 sc = kmem_zalloc(sizeof(*sc), KM_SLEEP);
413 ifp = &sc->sc_if;
414
415 sc->sc_brtmax = BRIDGE_RTABLE_MAX;
416 sc->sc_brttimeout = BRIDGE_RTABLE_TIMEOUT;
417 sc->sc_bridge_max_age = BSTP_DEFAULT_MAX_AGE;
418 sc->sc_bridge_hello_time = BSTP_DEFAULT_HELLO_TIME;
419 sc->sc_bridge_forward_delay = BSTP_DEFAULT_FORWARD_DELAY;
420 sc->sc_bridge_priority = BSTP_DEFAULT_BRIDGE_PRIORITY;
421 sc->sc_hold_time = BSTP_DEFAULT_HOLD_TIME;
422 sc->sc_filter_flags = 0;
423
424 /* Initialize our routing table. */
425 bridge_rtable_init(sc);
426
427 error = workqueue_create(&sc->sc_rtage_wq, "bridge_rtage",
428 bridge_rtage_work, sc, PRI_SOFTNET, IPL_SOFTNET, WQ_MPSAFE);
429 if (error)
430 panic("%s: workqueue_create %d\n", __func__, error);
431
432 callout_init(&sc->sc_brcallout, CALLOUT_MPSAFE);
433 callout_init(&sc->sc_bstpcallout, CALLOUT_MPSAFE);
434
435 mutex_init(&sc->sc_iflist_psref.bip_lock, MUTEX_DEFAULT, IPL_NONE);
436 PSLIST_INIT(&sc->sc_iflist_psref.bip_iflist);
437 sc->sc_iflist_psref.bip_psz = pserialize_create();
438
439 if_initname(ifp, ifc->ifc_name, unit);
440 ifp->if_softc = sc;
441 #ifdef NET_MPSAFE
442 ifp->if_extflags = IFEF_MPSAFE;
443 #endif
444 ifp->if_mtu = ETHERMTU;
445 ifp->if_ioctl = bridge_ioctl;
446 ifp->if_output = bridge_output;
447 ifp->if_start = bridge_start;
448 ifp->if_stop = bridge_stop;
449 ifp->if_init = bridge_init;
450 ifp->if_type = IFT_BRIDGE;
451 ifp->if_addrlen = 0;
452 ifp->if_dlt = DLT_EN10MB;
453 ifp->if_hdrlen = ETHER_HDR_LEN;
454
455 error = if_initialize(ifp);
456 if (error != 0) {
457 pserialize_destroy(sc->sc_iflist_psref.bip_psz);
458 mutex_destroy(&sc->sc_iflist_psref.bip_lock);
459 callout_destroy(&sc->sc_brcallout);
460 callout_destroy(&sc->sc_bstpcallout);
461 workqueue_destroy(sc->sc_rtage_wq);
462 bridge_rtable_fini(sc);
463 kmem_free(sc, sizeof(*sc));
464
465 return error;
466 }
467
468 /*
469 * Set the link state to down.
470 * When interfaces are added the link state will reflect
471 * the best link state of the combined interfaces.
472 */
473 ifp->if_link_state = LINK_STATE_DOWN;
474
475 if_alloc_sadl(ifp);
476 if_register(ifp);
477
478 return 0;
479 }
480
481 /*
482 * bridge_clone_destroy:
483 *
484 * Destroy a bridge instance.
485 */
486 static int
487 bridge_clone_destroy(struct ifnet *ifp)
488 {
489 struct bridge_softc *sc = ifp->if_softc;
490 struct bridge_iflist *bif;
491
492 if ((ifp->if_flags & IFF_RUNNING) != 0)
493 bridge_stop(ifp, 1);
494
495 BRIDGE_LOCK(sc);
496 for (;;) {
497 bif = PSLIST_WRITER_FIRST(&sc->sc_iflist_psref.bip_iflist, struct bridge_iflist,
498 bif_next);
499 if (bif == NULL)
500 break;
501 bridge_delete_member(sc, bif);
502 }
503 PSLIST_DESTROY(&sc->sc_iflist_psref.bip_iflist);
504 BRIDGE_UNLOCK(sc);
505
506 if_detach(ifp);
507
508 /* Tear down the routing table. */
509 bridge_rtable_fini(sc);
510
511 pserialize_destroy(sc->sc_iflist_psref.bip_psz);
512 mutex_destroy(&sc->sc_iflist_psref.bip_lock);
513 callout_destroy(&sc->sc_brcallout);
514 callout_destroy(&sc->sc_bstpcallout);
515 workqueue_destroy(sc->sc_rtage_wq);
516 kmem_free(sc, sizeof(*sc));
517
518 return 0;
519 }
520
521 /*
522 * bridge_ioctl:
523 *
524 * Handle a control request from the operator.
525 */
526 static int
527 bridge_ioctl(struct ifnet *ifp, u_long cmd, void *data)
528 {
529 struct bridge_softc *sc = ifp->if_softc;
530 struct lwp *l = curlwp; /* XXX */
531 union {
532 struct ifbreq ifbreq;
533 struct ifbifconf ifbifconf;
534 struct ifbareq ifbareq;
535 struct ifbaconf ifbaconf;
536 struct ifbrparam ifbrparam;
537 } args;
538 struct ifdrv *ifd = (struct ifdrv *) data;
539 const struct bridge_control *bc = NULL; /* XXXGCC */
540 int s, error = 0;
541
542 /* Authorize command before calling splsoftnet(). */
543 switch (cmd) {
544 case SIOCGDRVSPEC:
545 case SIOCSDRVSPEC:
546 if (ifd->ifd_cmd >= bridge_control_table_size
547 || (bc = &bridge_control_table[ifd->ifd_cmd]) == NULL) {
548 error = EINVAL;
549 return error;
550 }
551
552 /* We only care about BC_F_SUSER at this point. */
553 if ((bc->bc_flags & BC_F_SUSER) == 0)
554 break;
555
556 error = kauth_authorize_network(l->l_cred,
557 KAUTH_NETWORK_INTERFACE_BRIDGE,
558 cmd == SIOCGDRVSPEC ?
559 KAUTH_REQ_NETWORK_INTERFACE_BRIDGE_GETPRIV :
560 KAUTH_REQ_NETWORK_INTERFACE_BRIDGE_SETPRIV,
561 ifd, NULL, NULL);
562 if (error)
563 return error;
564
565 break;
566 }
567
568 s = splsoftnet();
569
570 switch (cmd) {
571 case SIOCGDRVSPEC:
572 case SIOCSDRVSPEC:
573 KASSERT(bc != NULL);
574 if (cmd == SIOCGDRVSPEC &&
575 (bc->bc_flags & (BC_F_COPYOUT|BC_F_XLATEOUT)) == 0) {
576 error = EINVAL;
577 break;
578 }
579 else if (cmd == SIOCSDRVSPEC &&
580 (bc->bc_flags & (BC_F_COPYOUT|BC_F_XLATEOUT)) != 0) {
581 error = EINVAL;
582 break;
583 }
584
585 /* BC_F_SUSER is checked above, before splsoftnet(). */
586
587 if ((bc->bc_flags & (BC_F_XLATEIN|BC_F_XLATEOUT)) == 0
588 && (ifd->ifd_len != bc->bc_argsize
589 || ifd->ifd_len > sizeof(args))) {
590 error = EINVAL;
591 break;
592 }
593
594 memset(&args, 0, sizeof(args));
595 if (bc->bc_flags & BC_F_COPYIN) {
596 error = copyin(ifd->ifd_data, &args, ifd->ifd_len);
597 if (error)
598 break;
599 } else if (bc->bc_flags & BC_F_XLATEIN) {
600 args.ifbifconf.ifbic_len = ifd->ifd_len;
601 args.ifbifconf.ifbic_buf = ifd->ifd_data;
602 }
603
604 error = (*bc->bc_func)(sc, &args);
605 if (error)
606 break;
607
608 if (bc->bc_flags & BC_F_COPYOUT) {
609 error = copyout(&args, ifd->ifd_data, ifd->ifd_len);
610 } else if (bc->bc_flags & BC_F_XLATEOUT) {
611 ifd->ifd_len = args.ifbifconf.ifbic_len;
612 ifd->ifd_data = args.ifbifconf.ifbic_buf;
613 }
614 break;
615
616 case SIOCSIFFLAGS:
617 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
618 break;
619 switch (ifp->if_flags & (IFF_UP|IFF_RUNNING)) {
620 case IFF_RUNNING:
621 /*
622 * If interface is marked down and it is running,
623 * then stop and disable it.
624 */
625 (*ifp->if_stop)(ifp, 1);
626 break;
627 case IFF_UP:
628 /*
629 * If interface is marked up and it is stopped, then
630 * start it.
631 */
632 error = (*ifp->if_init)(ifp);
633 break;
634 default:
635 break;
636 }
637 break;
638
639 case SIOCSIFMTU:
640 if ((error = ifioctl_common(ifp, cmd, data)) == ENETRESET)
641 error = 0;
642 break;
643
644 case SIOCGIFCAP:
645 {
646 struct ifcapreq *ifcr = (struct ifcapreq *)data;
647 ifcr->ifcr_capabilities = sc->sc_capenable;
648 ifcr->ifcr_capenable = sc->sc_capenable;
649 break;
650 }
651
652 default:
653 error = ifioctl_common(ifp, cmd, data);
654 break;
655 }
656
657 splx(s);
658
659 return error;
660 }
661
662 /*
663 * bridge_lookup_member:
664 *
665 * Lookup a bridge member interface.
666 */
667 static struct bridge_iflist *
668 bridge_lookup_member(struct bridge_softc *sc, const char *name, struct psref *psref)
669 {
670 struct bridge_iflist *bif;
671 struct ifnet *ifp;
672 int s;
673
674 BRIDGE_PSZ_RENTER(s);
675
676 BRIDGE_IFLIST_READER_FOREACH(bif, sc) {
677 ifp = bif->bif_ifp;
678 if (strcmp(ifp->if_xname, name) == 0)
679 break;
680 }
681 if (bif != NULL)
682 bridge_acquire_member(sc, bif, psref);
683
684 BRIDGE_PSZ_REXIT(s);
685
686 return bif;
687 }
688
689 /*
690 * bridge_lookup_member_if:
691 *
692 * Lookup a bridge member interface by ifnet*.
693 */
694 static struct bridge_iflist *
695 bridge_lookup_member_if(struct bridge_softc *sc, struct ifnet *member_ifp,
696 struct psref *psref)
697 {
698 struct bridge_iflist *bif;
699 int s;
700
701 BRIDGE_PSZ_RENTER(s);
702
703 bif = member_ifp->if_bridgeif;
704 if (bif != NULL) {
705 psref_acquire(psref, &bif->bif_psref,
706 bridge_psref_class);
707 }
708
709 BRIDGE_PSZ_REXIT(s);
710
711 return bif;
712 }
713
714 static void
715 bridge_acquire_member(struct bridge_softc *sc, struct bridge_iflist *bif,
716 struct psref *psref)
717 {
718
719 psref_acquire(psref, &bif->bif_psref, bridge_psref_class);
720 }
721
722 /*
723 * bridge_release_member:
724 *
725 * Release the specified member interface.
726 */
727 static void
728 bridge_release_member(struct bridge_softc *sc, struct bridge_iflist *bif,
729 struct psref *psref)
730 {
731
732 psref_release(psref, &bif->bif_psref, bridge_psref_class);
733 }
734
735 /*
736 * bridge_delete_member:
737 *
738 * Delete the specified member interface.
739 */
740 static void
741 bridge_delete_member(struct bridge_softc *sc, struct bridge_iflist *bif)
742 {
743 struct ifnet *ifs = bif->bif_ifp;
744
745 KASSERT(BRIDGE_LOCKED(sc));
746
747 ifs->_if_input = ether_input;
748 ifs->if_bridge = NULL;
749 ifs->if_bridgeif = NULL;
750
751 PSLIST_WRITER_REMOVE(bif, bif_next);
752 BRIDGE_PSZ_PERFORM(sc);
753 BRIDGE_UNLOCK(sc);
754
755 switch (ifs->if_type) {
756 case IFT_ETHER:
757 case IFT_L2TP:
758 /*
759 * Take the interface out of promiscuous mode.
760 * Don't call it with holding a spin lock.
761 */
762 (void) ifpromisc(ifs, 0);
763 IFNET_LOCK(ifs);
764 (void) ether_disable_vlan_mtu(ifs);
765 IFNET_UNLOCK(ifs);
766 break;
767 default:
768 #ifdef DIAGNOSTIC
769 panic("%s: impossible", __func__);
770 #endif
771 break;
772 }
773
774 psref_target_destroy(&bif->bif_psref, bridge_psref_class);
775
776 PSLIST_ENTRY_DESTROY(bif, bif_next);
777 kmem_free(bif, sizeof(*bif));
778
779 BRIDGE_LOCK(sc);
780 }
781
782 /*
783 * bridge_calc_csum_flags:
784 *
785 * Calculate logical and b/w csum flags each member interface supports.
786 */
787 void
788 bridge_calc_csum_flags(struct bridge_softc *sc)
789 {
790 struct bridge_iflist *bif;
791 struct ifnet *ifs = NULL;
792 int flags = ~0;
793 int capenable = ~0;
794
795 BRIDGE_LOCK(sc);
796 BRIDGE_IFLIST_READER_FOREACH(bif, sc) {
797 ifs = bif->bif_ifp;
798 flags &= ifs->if_csum_flags_tx;
799 capenable &= ifs->if_capenable;
800 }
801 sc->sc_csum_flags_tx = flags;
802 sc->sc_capenable = (ifs != NULL) ? capenable : 0;
803 BRIDGE_UNLOCK(sc);
804 }
805
806 /*
807 * bridge_calc_link_state:
808 *
809 * Calculate the link state based on each member interface.
810 */
811 void
812 bridge_calc_link_state(struct bridge_softc *sc)
813 {
814 struct bridge_iflist *bif;
815 struct ifnet *ifs;
816 int link_state = LINK_STATE_DOWN;
817
818 BRIDGE_LOCK(sc);
819 BRIDGE_IFLIST_READER_FOREACH(bif, sc) {
820 ifs = bif->bif_ifp;
821 if (ifs->if_link_state == LINK_STATE_UP) {
822 link_state = LINK_STATE_UP;
823 break;
824 }
825 if (ifs->if_link_state == LINK_STATE_UNKNOWN)
826 link_state = LINK_STATE_UNKNOWN;
827 }
828 if_link_state_change(&sc->sc_if, link_state);
829 BRIDGE_UNLOCK(sc);
830 }
831
832 static int
833 bridge_ioctl_add(struct bridge_softc *sc, void *arg)
834 {
835 struct ifbreq *req = arg;
836 struct bridge_iflist *bif = NULL;
837 struct ifnet *ifs;
838 int error = 0;
839 struct psref psref;
840
841 ifs = if_get(req->ifbr_ifsname, &psref);
842 if (ifs == NULL)
843 return ENOENT;
844
845 if (ifs->if_bridge == sc) {
846 error = EEXIST;
847 goto out;
848 }
849
850 if (ifs->if_bridge != NULL) {
851 error = EBUSY;
852 goto out;
853 }
854
855 if (ifs->_if_input != ether_input) {
856 error = EINVAL;
857 goto out;
858 }
859
860 /* FIXME: doesn't work with non-IFF_SIMPLEX interfaces */
861 if ((ifs->if_flags & IFF_SIMPLEX) == 0) {
862 error = EINVAL;
863 goto out;
864 }
865
866 bif = kmem_alloc(sizeof(*bif), KM_SLEEP);
867
868 switch (ifs->if_type) {
869 case IFT_ETHER:
870 if (sc->sc_if.if_mtu != ifs->if_mtu) {
871 /* Change MTU of added interface to bridge MTU */
872 struct ifreq ifr;
873 memset(&ifr, 0, sizeof(ifr));
874 ifr.ifr_mtu = sc->sc_if.if_mtu;
875 IFNET_LOCK(ifs);
876 error = ether_ioctl(ifs, SIOCSIFMTU, &ifr);
877 IFNET_UNLOCK(ifs);
878 if (error != 0)
879 goto out;
880 }
881 /* FALLTHROUGH */
882 case IFT_L2TP:
883 IFNET_LOCK(ifs);
884 error = ether_enable_vlan_mtu(ifs);
885 IFNET_UNLOCK(ifs);
886 if (error > 0)
887 goto out;
888 /*
889 * Place the interface into promiscuous mode.
890 */
891 error = ifpromisc(ifs, 1);
892 if (error)
893 goto out;
894 break;
895 default:
896 error = EINVAL;
897 goto out;
898 }
899
900 bif->bif_ifp = ifs;
901 bif->bif_flags = IFBIF_LEARNING | IFBIF_DISCOVER;
902 bif->bif_priority = BSTP_DEFAULT_PORT_PRIORITY;
903 bif->bif_path_cost = BSTP_DEFAULT_PATH_COST;
904 PSLIST_ENTRY_INIT(bif, bif_next);
905 psref_target_init(&bif->bif_psref, bridge_psref_class);
906
907 BRIDGE_LOCK(sc);
908
909 ifs->if_bridge = sc;
910 ifs->if_bridgeif = bif;
911 PSLIST_WRITER_INSERT_HEAD(&sc->sc_iflist_psref.bip_iflist, bif, bif_next);
912 ifs->_if_input = bridge_input;
913
914 BRIDGE_UNLOCK(sc);
915
916 bridge_calc_csum_flags(sc);
917 bridge_calc_link_state(sc);
918
919 if (sc->sc_if.if_flags & IFF_RUNNING)
920 bstp_initialization(sc);
921 else
922 bstp_stop(sc);
923
924 out:
925 if_put(ifs, &psref);
926 if (error) {
927 if (bif != NULL)
928 kmem_free(bif, sizeof(*bif));
929 }
930 return error;
931 }
932
933 static int
934 bridge_ioctl_del(struct bridge_softc *sc, void *arg)
935 {
936 struct ifbreq *req = arg;
937 const char *name = req->ifbr_ifsname;
938 struct bridge_iflist *bif;
939 struct ifnet *ifs;
940
941 BRIDGE_LOCK(sc);
942
943 /*
944 * Don't use bridge_lookup_member. We want to get a member
945 * with bif_refs == 0.
946 */
947 BRIDGE_IFLIST_WRITER_FOREACH(bif, sc) {
948 ifs = bif->bif_ifp;
949 if (strcmp(ifs->if_xname, name) == 0)
950 break;
951 }
952
953 if (bif == NULL) {
954 BRIDGE_UNLOCK(sc);
955 return ENOENT;
956 }
957
958 bridge_delete_member(sc, bif);
959
960 BRIDGE_UNLOCK(sc);
961
962 bridge_rtdelete(sc, ifs);
963 bridge_calc_csum_flags(sc);
964 bridge_calc_link_state(sc);
965
966 if (sc->sc_if.if_flags & IFF_RUNNING)
967 bstp_initialization(sc);
968
969 return 0;
970 }
971
972 static int
973 bridge_ioctl_gifflags(struct bridge_softc *sc, void *arg)
974 {
975 struct ifbreq *req = arg;
976 struct bridge_iflist *bif;
977 struct psref psref;
978
979 bif = bridge_lookup_member(sc, req->ifbr_ifsname, &psref);
980 if (bif == NULL)
981 return ENOENT;
982
983 req->ifbr_ifsflags = bif->bif_flags;
984 req->ifbr_state = bif->bif_state;
985 req->ifbr_priority = bif->bif_priority;
986 req->ifbr_path_cost = bif->bif_path_cost;
987 req->ifbr_portno = bif->bif_ifp->if_index & 0xff;
988
989 bridge_release_member(sc, bif, &psref);
990
991 return 0;
992 }
993
994 static int
995 bridge_ioctl_sifflags(struct bridge_softc *sc, void *arg)
996 {
997 struct ifbreq *req = arg;
998 struct bridge_iflist *bif;
999 struct psref psref;
1000
1001 bif = bridge_lookup_member(sc, req->ifbr_ifsname, &psref);
1002 if (bif == NULL)
1003 return ENOENT;
1004
1005 if (req->ifbr_ifsflags & IFBIF_STP) {
1006 switch (bif->bif_ifp->if_type) {
1007 case IFT_ETHER:
1008 case IFT_L2TP:
1009 /* These can do spanning tree. */
1010 break;
1011
1012 default:
1013 /* Nothing else can. */
1014 bridge_release_member(sc, bif, &psref);
1015 return EINVAL;
1016 }
1017 }
1018
1019 bif->bif_flags = req->ifbr_ifsflags;
1020
1021 bridge_release_member(sc, bif, &psref);
1022
1023 if (sc->sc_if.if_flags & IFF_RUNNING)
1024 bstp_initialization(sc);
1025
1026 return 0;
1027 }
1028
1029 static int
1030 bridge_ioctl_scache(struct bridge_softc *sc, void *arg)
1031 {
1032 struct ifbrparam *param = arg;
1033
1034 sc->sc_brtmax = param->ifbrp_csize;
1035 bridge_rttrim(sc);
1036
1037 return 0;
1038 }
1039
1040 static int
1041 bridge_ioctl_gcache(struct bridge_softc *sc, void *arg)
1042 {
1043 struct ifbrparam *param = arg;
1044
1045 param->ifbrp_csize = sc->sc_brtmax;
1046
1047 return 0;
1048 }
1049
1050 static int
1051 bridge_ioctl_gifs(struct bridge_softc *sc, void *arg)
1052 {
1053 struct ifbifconf *bifc = arg;
1054 struct bridge_iflist *bif;
1055 struct ifbreq *breqs;
1056 int i, count, error = 0;
1057
1058 retry:
1059 BRIDGE_LOCK(sc);
1060 count = 0;
1061 BRIDGE_IFLIST_WRITER_FOREACH(bif, sc)
1062 count++;
1063 BRIDGE_UNLOCK(sc);
1064
1065 if (count == 0) {
1066 bifc->ifbic_len = 0;
1067 return 0;
1068 }
1069
1070 if (bifc->ifbic_len == 0 || bifc->ifbic_len < (sizeof(*breqs) * count)) {
1071 /* Tell that a larger buffer is needed */
1072 bifc->ifbic_len = sizeof(*breqs) * count;
1073 return 0;
1074 }
1075
1076 breqs = kmem_alloc(sizeof(*breqs) * count, KM_SLEEP);
1077
1078 BRIDGE_LOCK(sc);
1079
1080 i = 0;
1081 BRIDGE_IFLIST_WRITER_FOREACH(bif, sc)
1082 i++;
1083 if (i > count) {
1084 /*
1085 * The number of members has been increased.
1086 * We need more memory!
1087 */
1088 BRIDGE_UNLOCK(sc);
1089 kmem_free(breqs, sizeof(*breqs) * count);
1090 goto retry;
1091 }
1092
1093 i = 0;
1094 BRIDGE_IFLIST_WRITER_FOREACH(bif, sc) {
1095 struct ifbreq *breq = &breqs[i++];
1096 memset(breq, 0, sizeof(*breq));
1097
1098 strlcpy(breq->ifbr_ifsname, bif->bif_ifp->if_xname,
1099 sizeof(breq->ifbr_ifsname));
1100 breq->ifbr_ifsflags = bif->bif_flags;
1101 breq->ifbr_state = bif->bif_state;
1102 breq->ifbr_priority = bif->bif_priority;
1103 breq->ifbr_path_cost = bif->bif_path_cost;
1104 breq->ifbr_portno = bif->bif_ifp->if_index & 0xff;
1105 }
1106
1107 /* Don't call copyout with holding the mutex */
1108 BRIDGE_UNLOCK(sc);
1109
1110 for (i = 0; i < count; i++) {
1111 error = copyout(&breqs[i], bifc->ifbic_req + i, sizeof(*breqs));
1112 if (error)
1113 break;
1114 }
1115 bifc->ifbic_len = sizeof(*breqs) * i;
1116
1117 kmem_free(breqs, sizeof(*breqs) * count);
1118
1119 return error;
1120 }
1121
1122 static int
1123 bridge_ioctl_rts(struct bridge_softc *sc, void *arg)
1124 {
1125 struct ifbaconf *bac = arg;
1126 struct bridge_rtnode *brt;
1127 struct ifbareq bareq;
1128 int count = 0, error = 0, len;
1129
1130 if (bac->ifbac_len == 0)
1131 return 0;
1132
1133 BRIDGE_RT_LOCK(sc);
1134
1135 /* The passed buffer is not enough, tell a required size. */
1136 if (bac->ifbac_len < (sizeof(bareq) * sc->sc_brtcnt)) {
1137 count = sc->sc_brtcnt;
1138 goto out;
1139 }
1140
1141 len = bac->ifbac_len;
1142 BRIDGE_RTLIST_WRITER_FOREACH(brt, sc) {
1143 if (len < sizeof(bareq))
1144 goto out;
1145 memset(&bareq, 0, sizeof(bareq));
1146 strlcpy(bareq.ifba_ifsname, brt->brt_ifp->if_xname,
1147 sizeof(bareq.ifba_ifsname));
1148 memcpy(bareq.ifba_dst, brt->brt_addr, sizeof(brt->brt_addr));
1149 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
1150 bareq.ifba_expire = brt->brt_expire - time_uptime;
1151 } else
1152 bareq.ifba_expire = 0;
1153 bareq.ifba_flags = brt->brt_flags;
1154
1155 error = copyout(&bareq, bac->ifbac_req + count, sizeof(bareq));
1156 if (error)
1157 goto out;
1158 count++;
1159 len -= sizeof(bareq);
1160 }
1161 out:
1162 BRIDGE_RT_UNLOCK(sc);
1163
1164 bac->ifbac_len = sizeof(bareq) * count;
1165 return error;
1166 }
1167
1168 static int
1169 bridge_ioctl_saddr(struct bridge_softc *sc, void *arg)
1170 {
1171 struct ifbareq *req = arg;
1172 struct bridge_iflist *bif;
1173 int error;
1174 struct psref psref;
1175
1176 bif = bridge_lookup_member(sc, req->ifba_ifsname, &psref);
1177 if (bif == NULL)
1178 return ENOENT;
1179
1180 error = bridge_rtupdate(sc, req->ifba_dst, bif->bif_ifp, 1,
1181 req->ifba_flags);
1182
1183 bridge_release_member(sc, bif, &psref);
1184
1185 return error;
1186 }
1187
1188 static int
1189 bridge_ioctl_sto(struct bridge_softc *sc, void *arg)
1190 {
1191 struct ifbrparam *param = arg;
1192
1193 sc->sc_brttimeout = param->ifbrp_ctime;
1194
1195 return 0;
1196 }
1197
1198 static int
1199 bridge_ioctl_gto(struct bridge_softc *sc, void *arg)
1200 {
1201 struct ifbrparam *param = arg;
1202
1203 param->ifbrp_ctime = sc->sc_brttimeout;
1204
1205 return 0;
1206 }
1207
1208 static int
1209 bridge_ioctl_daddr(struct bridge_softc *sc, void *arg)
1210 {
1211 struct ifbareq *req = arg;
1212
1213 return (bridge_rtdaddr(sc, req->ifba_dst));
1214 }
1215
1216 static int
1217 bridge_ioctl_flush(struct bridge_softc *sc, void *arg)
1218 {
1219 struct ifbreq *req = arg;
1220
1221 bridge_rtflush(sc, req->ifbr_ifsflags);
1222
1223 return 0;
1224 }
1225
1226 static int
1227 bridge_ioctl_gpri(struct bridge_softc *sc, void *arg)
1228 {
1229 struct ifbrparam *param = arg;
1230
1231 param->ifbrp_prio = sc->sc_bridge_priority;
1232
1233 return 0;
1234 }
1235
1236 static int
1237 bridge_ioctl_spri(struct bridge_softc *sc, void *arg)
1238 {
1239 struct ifbrparam *param = arg;
1240
1241 sc->sc_bridge_priority = param->ifbrp_prio;
1242
1243 if (sc->sc_if.if_flags & IFF_RUNNING)
1244 bstp_initialization(sc);
1245
1246 return 0;
1247 }
1248
1249 static int
1250 bridge_ioctl_ght(struct bridge_softc *sc, void *arg)
1251 {
1252 struct ifbrparam *param = arg;
1253
1254 param->ifbrp_hellotime = sc->sc_bridge_hello_time >> 8;
1255
1256 return 0;
1257 }
1258
1259 static int
1260 bridge_ioctl_sht(struct bridge_softc *sc, void *arg)
1261 {
1262 struct ifbrparam *param = arg;
1263
1264 if (param->ifbrp_hellotime == 0)
1265 return EINVAL;
1266 sc->sc_bridge_hello_time = param->ifbrp_hellotime << 8;
1267
1268 if (sc->sc_if.if_flags & IFF_RUNNING)
1269 bstp_initialization(sc);
1270
1271 return 0;
1272 }
1273
1274 static int
1275 bridge_ioctl_gfd(struct bridge_softc *sc, void *arg)
1276 {
1277 struct ifbrparam *param = arg;
1278
1279 param->ifbrp_fwddelay = sc->sc_bridge_forward_delay >> 8;
1280
1281 return 0;
1282 }
1283
1284 static int
1285 bridge_ioctl_sfd(struct bridge_softc *sc, void *arg)
1286 {
1287 struct ifbrparam *param = arg;
1288
1289 if (param->ifbrp_fwddelay == 0)
1290 return EINVAL;
1291 sc->sc_bridge_forward_delay = param->ifbrp_fwddelay << 8;
1292
1293 if (sc->sc_if.if_flags & IFF_RUNNING)
1294 bstp_initialization(sc);
1295
1296 return 0;
1297 }
1298
1299 static int
1300 bridge_ioctl_gma(struct bridge_softc *sc, void *arg)
1301 {
1302 struct ifbrparam *param = arg;
1303
1304 param->ifbrp_maxage = sc->sc_bridge_max_age >> 8;
1305
1306 return 0;
1307 }
1308
1309 static int
1310 bridge_ioctl_sma(struct bridge_softc *sc, void *arg)
1311 {
1312 struct ifbrparam *param = arg;
1313
1314 if (param->ifbrp_maxage == 0)
1315 return EINVAL;
1316 sc->sc_bridge_max_age = param->ifbrp_maxage << 8;
1317
1318 if (sc->sc_if.if_flags & IFF_RUNNING)
1319 bstp_initialization(sc);
1320
1321 return 0;
1322 }
1323
1324 static int
1325 bridge_ioctl_sifprio(struct bridge_softc *sc, void *arg)
1326 {
1327 struct ifbreq *req = arg;
1328 struct bridge_iflist *bif;
1329 struct psref psref;
1330
1331 bif = bridge_lookup_member(sc, req->ifbr_ifsname, &psref);
1332 if (bif == NULL)
1333 return ENOENT;
1334
1335 bif->bif_priority = req->ifbr_priority;
1336
1337 if (sc->sc_if.if_flags & IFF_RUNNING)
1338 bstp_initialization(sc);
1339
1340 bridge_release_member(sc, bif, &psref);
1341
1342 return 0;
1343 }
1344
1345 static int
1346 bridge_ioctl_gfilt(struct bridge_softc *sc, void *arg)
1347 {
1348 struct ifbrparam *param = arg;
1349
1350 param->ifbrp_filter = sc->sc_filter_flags;
1351
1352 return 0;
1353 }
1354
1355 static int
1356 bridge_ioctl_sfilt(struct bridge_softc *sc, void *arg)
1357 {
1358 struct ifbrparam *param = arg;
1359 uint32_t nflags, oflags;
1360
1361 if (param->ifbrp_filter & ~IFBF_FILT_MASK)
1362 return EINVAL;
1363
1364 nflags = param->ifbrp_filter;
1365 oflags = sc->sc_filter_flags;
1366
1367 if ((nflags & IFBF_FILT_USEIPF) && !(oflags & IFBF_FILT_USEIPF)) {
1368 pfil_add_hook((void *)bridge_ipf, NULL, PFIL_IN|PFIL_OUT,
1369 sc->sc_if.if_pfil);
1370 }
1371 if (!(nflags & IFBF_FILT_USEIPF) && (oflags & IFBF_FILT_USEIPF)) {
1372 pfil_remove_hook((void *)bridge_ipf, NULL, PFIL_IN|PFIL_OUT,
1373 sc->sc_if.if_pfil);
1374 }
1375
1376 sc->sc_filter_flags = nflags;
1377
1378 return 0;
1379 }
1380
1381 static int
1382 bridge_ioctl_sifcost(struct bridge_softc *sc, void *arg)
1383 {
1384 struct ifbreq *req = arg;
1385 struct bridge_iflist *bif;
1386 struct psref psref;
1387
1388 bif = bridge_lookup_member(sc, req->ifbr_ifsname, &psref);
1389 if (bif == NULL)
1390 return ENOENT;
1391
1392 bif->bif_path_cost = req->ifbr_path_cost;
1393
1394 if (sc->sc_if.if_flags & IFF_RUNNING)
1395 bstp_initialization(sc);
1396
1397 bridge_release_member(sc, bif, &psref);
1398
1399 return 0;
1400 }
1401
1402 /*
1403 * bridge_ifdetach:
1404 *
1405 * Detach an interface from a bridge. Called when a member
1406 * interface is detaching.
1407 */
1408 void
1409 bridge_ifdetach(struct ifnet *ifp)
1410 {
1411 struct bridge_softc *sc = ifp->if_bridge;
1412 struct ifbreq breq;
1413
1414 /* ioctl_lock should prevent this from happening */
1415 KASSERT(sc != NULL);
1416
1417 memset(&breq, 0, sizeof(breq));
1418 strlcpy(breq.ifbr_ifsname, ifp->if_xname, sizeof(breq.ifbr_ifsname));
1419
1420 (void) bridge_ioctl_del(sc, &breq);
1421 }
1422
1423 /*
1424 * bridge_init:
1425 *
1426 * Initialize a bridge interface.
1427 */
1428 static int
1429 bridge_init(struct ifnet *ifp)
1430 {
1431 struct bridge_softc *sc = ifp->if_softc;
1432
1433 KASSERT((ifp->if_flags & IFF_RUNNING) == 0);
1434
1435 callout_reset(&sc->sc_brcallout, bridge_rtable_prune_period * hz,
1436 bridge_timer, sc);
1437 bstp_initialization(sc);
1438
1439 ifp->if_flags |= IFF_RUNNING;
1440 return 0;
1441 }
1442
1443 /*
1444 * bridge_stop:
1445 *
1446 * Stop the bridge interface.
1447 */
1448 static void
1449 bridge_stop(struct ifnet *ifp, int disable)
1450 {
1451 struct bridge_softc *sc = ifp->if_softc;
1452
1453 KASSERT((ifp->if_flags & IFF_RUNNING) != 0);
1454 ifp->if_flags &= ~IFF_RUNNING;
1455
1456 callout_halt(&sc->sc_brcallout, NULL);
1457 workqueue_wait(sc->sc_rtage_wq, &sc->sc_rtage_wk);
1458 bstp_stop(sc);
1459 bridge_rtflush(sc, IFBF_FLUSHDYN);
1460 }
1461
1462 /*
1463 * bridge_enqueue:
1464 *
1465 * Enqueue a packet on a bridge member interface.
1466 */
1467 void
1468 bridge_enqueue(struct bridge_softc *sc, struct ifnet *dst_ifp, struct mbuf *m,
1469 int runfilt)
1470 {
1471 int len, error;
1472 short mflags;
1473
1474 if (runfilt) {
1475 if (pfil_run_hooks(sc->sc_if.if_pfil, &m,
1476 dst_ifp, PFIL_OUT) != 0) {
1477 if (m != NULL)
1478 m_freem(m);
1479 return;
1480 }
1481 if (m == NULL)
1482 return;
1483 }
1484
1485 #ifdef ALTQ
1486 KERNEL_LOCK(1, NULL);
1487 /*
1488 * If ALTQ is enabled on the member interface, do
1489 * classification; the queueing discipline might
1490 * not require classification, but might require
1491 * the address family/header pointer in the pktattr.
1492 */
1493 if (ALTQ_IS_ENABLED(&dst_ifp->if_snd)) {
1494 /* XXX IFT_ETHER */
1495 altq_etherclassify(&dst_ifp->if_snd, m);
1496 }
1497 KERNEL_UNLOCK_ONE(NULL);
1498 #endif /* ALTQ */
1499
1500 len = m->m_pkthdr.len;
1501 mflags = m->m_flags;
1502
1503 error = if_transmit_lock(dst_ifp, m);
1504 if (error) {
1505 /* mbuf is already freed */
1506 if_statinc(&sc->sc_if, if_oerrors);
1507 return;
1508 }
1509
1510 net_stat_ref_t nsr = IF_STAT_GETREF(&sc->sc_if);
1511 if_statinc_ref(nsr, if_opackets);
1512 if_statadd_ref(nsr, if_obytes, len);
1513 if (mflags & M_MCAST)
1514 if_statinc_ref(nsr, if_omcasts);
1515 IF_STAT_PUTREF(&sc->sc_if);
1516 }
1517
1518 /*
1519 * bridge_output:
1520 *
1521 * Send output from a bridge member interface. This
1522 * performs the bridging function for locally originated
1523 * packets.
1524 *
1525 * The mbuf has the Ethernet header already attached. We must
1526 * enqueue or free the mbuf before returning.
1527 */
1528 int
1529 bridge_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *sa,
1530 const struct rtentry *rt)
1531 {
1532 struct ether_header *eh;
1533 struct ifnet *dst_if;
1534 struct bridge_softc *sc;
1535 struct mbuf *n;
1536 int s;
1537
1538 /*
1539 * bridge_output() is called from ether_output(), furthermore
1540 * ifp argument doesn't point to bridge(4). So, don't assert
1541 * IFEF_MPSAFE here.
1542 */
1543
1544 KASSERT(m->m_len >= ETHER_HDR_LEN);
1545
1546 eh = mtod(m, struct ether_header *);
1547 sc = ifp->if_bridge;
1548
1549 if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
1550 if (memcmp(etherbroadcastaddr,
1551 eh->ether_dhost, ETHER_ADDR_LEN) == 0)
1552 m->m_flags |= M_BCAST;
1553 else
1554 m->m_flags |= M_MCAST;
1555 }
1556
1557 /*
1558 * If bridge is down, but the original output interface is up,
1559 * go ahead and send out that interface. Otherwise, the packet
1560 * is dropped below.
1561 */
1562 if (__predict_false(sc == NULL) ||
1563 (sc->sc_if.if_flags & IFF_RUNNING) == 0) {
1564 dst_if = ifp;
1565 goto unicast_asis;
1566 }
1567
1568 /*
1569 * If the packet is a multicast, or we don't know a better way to
1570 * get there, send to all interfaces.
1571 */
1572 if ((m->m_flags & (M_MCAST | M_BCAST)) != 0)
1573 dst_if = NULL;
1574 else
1575 dst_if = bridge_rtlookup(sc, eh->ether_dhost);
1576
1577 /*
1578 * In general, we need to handle TX offload in software before
1579 * enqueueing a packet. However, we can send it as is in the
1580 * cases of unicast via (1) the source interface, or (2) an
1581 * interface which supports the specified offload options.
1582 * For multicast or broadcast, send it as is only if (3) all
1583 * the member interfaces support the specified options.
1584 */
1585
1586 /*
1587 * Unicast via the source interface.
1588 */
1589 if (dst_if == ifp)
1590 goto unicast_asis;
1591
1592 /*
1593 * Unicast via other interface.
1594 */
1595 if (dst_if != NULL) {
1596 KASSERT(m->m_flags & M_PKTHDR);
1597 if (TX_OFFLOAD_SUPPORTED(dst_if->if_csum_flags_tx,
1598 m->m_pkthdr.csum_flags)) {
1599 /*
1600 * Unicast via an interface which supports the
1601 * specified offload options.
1602 */
1603 goto unicast_asis;
1604 }
1605
1606 /*
1607 * Handle TX offload in software. For TSO, a packet is
1608 * split into multiple chunks. Thus, the return value of
1609 * ether_sw_offload_tx() is mbuf queue consists of them.
1610 */
1611 m = ether_sw_offload_tx(ifp, m);
1612 if (m == NULL)
1613 return 0;
1614
1615 do {
1616 n = m->m_nextpkt;
1617 if ((dst_if->if_flags & IFF_RUNNING) == 0)
1618 m_freem(m);
1619 else
1620 bridge_enqueue(sc, dst_if, m, 0);
1621 m = n;
1622 } while (m != NULL);
1623
1624 return 0;
1625 }
1626
1627 /*
1628 * Multicast or broadcast.
1629 */
1630 if (TX_OFFLOAD_SUPPORTED(sc->sc_csum_flags_tx,
1631 m->m_pkthdr.csum_flags)) {
1632 /*
1633 * Specified TX offload options are supported by all
1634 * the member interfaces of this bridge.
1635 */
1636 m->m_nextpkt = NULL; /* XXX */
1637 } else {
1638 /*
1639 * Otherwise, handle TX offload in software.
1640 */
1641 m = ether_sw_offload_tx(ifp, m);
1642 if (m == NULL)
1643 return 0;
1644 }
1645
1646 do {
1647 /* XXX Should call bridge_broadcast, but there are locking
1648 * issues which need resolving first. */
1649 struct bridge_iflist *bif;
1650 struct mbuf *mc;
1651 bool used = false;
1652
1653 n = m->m_nextpkt;
1654
1655 BRIDGE_PSZ_RENTER(s);
1656 BRIDGE_IFLIST_READER_FOREACH(bif, sc) {
1657 struct psref psref;
1658
1659 bridge_acquire_member(sc, bif, &psref);
1660 BRIDGE_PSZ_REXIT(s);
1661
1662 dst_if = bif->bif_ifp;
1663 if ((dst_if->if_flags & IFF_RUNNING) == 0)
1664 goto next;
1665
1666 /*
1667 * If this is not the original output interface,
1668 * and the interface is participating in spanning
1669 * tree, make sure the port is in a state that
1670 * allows forwarding.
1671 */
1672 if (dst_if != ifp &&
1673 (bif->bif_flags & IFBIF_STP) != 0) {
1674 switch (bif->bif_state) {
1675 case BSTP_IFSTATE_BLOCKING:
1676 case BSTP_IFSTATE_LISTENING:
1677 case BSTP_IFSTATE_DISABLED:
1678 goto next;
1679 }
1680 }
1681
1682 if (PSLIST_READER_NEXT(bif, struct bridge_iflist,
1683 bif_next) == NULL &&
1684 ((m->m_flags & (M_MCAST | M_BCAST)) == 0 ||
1685 dst_if == ifp))
1686 {
1687 used = true;
1688 mc = m;
1689 } else {
1690 mc = m_copypacket(m, M_DONTWAIT);
1691 if (mc == NULL) {
1692 if_statinc(&sc->sc_if, if_oerrors);
1693 goto next;
1694 }
1695 }
1696
1697 bridge_enqueue(sc, dst_if, mc, 0);
1698
1699 if ((m->m_flags & (M_MCAST | M_BCAST)) != 0 &&
1700 dst_if != ifp)
1701 {
1702 if (PSLIST_READER_NEXT(bif,
1703 struct bridge_iflist, bif_next) == NULL)
1704 {
1705 used = true;
1706 mc = m;
1707 } else {
1708 mc = m_copypacket(m, M_DONTWAIT);
1709 if (mc == NULL) {
1710 if_statinc(&sc->sc_if,
1711 if_oerrors);
1712 goto next;
1713 }
1714 }
1715
1716 m_set_rcvif(mc, dst_if);
1717 mc->m_flags &= ~M_PROMISC;
1718
1719 s = splsoftnet();
1720 KERNEL_LOCK_UNLESS_IFP_MPSAFE(dst_if);
1721 ether_input(dst_if, mc);
1722 KERNEL_UNLOCK_UNLESS_IFP_MPSAFE(dst_if);
1723 splx(s);
1724 }
1725
1726 next:
1727 BRIDGE_PSZ_RENTER(s);
1728 bridge_release_member(sc, bif, &psref);
1729
1730 /* Guarantee we don't re-enter the loop as we already
1731 * decided we're at the end. */
1732 if (used)
1733 break;
1734 }
1735 BRIDGE_PSZ_REXIT(s);
1736
1737 if (!used)
1738 m_freem(m);
1739
1740 m = n;
1741 } while (m != NULL);
1742 return 0;
1743
1744 unicast_asis:
1745 /*
1746 * XXX Spanning tree consideration here?
1747 */
1748 if ((dst_if->if_flags & IFF_RUNNING) == 0)
1749 m_freem(m);
1750 else
1751 bridge_enqueue(sc, dst_if, m, 0);
1752 return 0;
1753 }
1754
1755 /*
1756 * bridge_start:
1757 *
1758 * Start output on a bridge.
1759 *
1760 * NOTE: This routine should never be called in this implementation.
1761 */
1762 static void
1763 bridge_start(struct ifnet *ifp)
1764 {
1765
1766 printf("%s: bridge_start() called\n", ifp->if_xname);
1767 }
1768
1769 /*
1770 * bridge_forward:
1771 *
1772 * The forwarding function of the bridge.
1773 */
1774 static void
1775 bridge_forward(struct bridge_softc *sc, struct mbuf *m)
1776 {
1777 struct bridge_iflist *bif;
1778 struct ifnet *src_if, *dst_if;
1779 struct ether_header *eh;
1780 struct psref psref;
1781 struct psref psref_src;
1782 DECLARE_LOCK_VARIABLE;
1783
1784 if ((sc->sc_if.if_flags & IFF_RUNNING) == 0)
1785 return;
1786
1787 src_if = m_get_rcvif_psref(m, &psref_src);
1788 if (src_if == NULL) {
1789 /* Interface is being destroyed? */
1790 m_freem(m);
1791 goto out;
1792 }
1793
1794 if_statadd2(&sc->sc_if, if_ipackets, 1, if_ibytes, m->m_pkthdr.len);
1795
1796 /*
1797 * Look up the bridge_iflist.
1798 */
1799 bif = bridge_lookup_member_if(sc, src_if, &psref);
1800 if (bif == NULL) {
1801 /* Interface is not a bridge member (anymore?) */
1802 m_freem(m);
1803 goto out;
1804 }
1805
1806 if (bif->bif_flags & IFBIF_STP) {
1807 switch (bif->bif_state) {
1808 case BSTP_IFSTATE_BLOCKING:
1809 case BSTP_IFSTATE_LISTENING:
1810 case BSTP_IFSTATE_DISABLED:
1811 m_freem(m);
1812 bridge_release_member(sc, bif, &psref);
1813 goto out;
1814 }
1815 }
1816
1817 eh = mtod(m, struct ether_header *);
1818
1819 /*
1820 * If the interface is learning, and the source
1821 * address is valid and not multicast, record
1822 * the address.
1823 */
1824 if ((bif->bif_flags & IFBIF_LEARNING) != 0 &&
1825 ETHER_IS_MULTICAST(eh->ether_shost) == 0 &&
1826 (eh->ether_shost[0] == 0 &&
1827 eh->ether_shost[1] == 0 &&
1828 eh->ether_shost[2] == 0 &&
1829 eh->ether_shost[3] == 0 &&
1830 eh->ether_shost[4] == 0 &&
1831 eh->ether_shost[5] == 0) == 0) {
1832 (void) bridge_rtupdate(sc, eh->ether_shost,
1833 src_if, 0, IFBAF_DYNAMIC);
1834 }
1835
1836 if ((bif->bif_flags & IFBIF_STP) != 0 &&
1837 bif->bif_state == BSTP_IFSTATE_LEARNING) {
1838 m_freem(m);
1839 bridge_release_member(sc, bif, &psref);
1840 goto out;
1841 }
1842
1843 bridge_release_member(sc, bif, &psref);
1844
1845 /*
1846 * At this point, the port either doesn't participate
1847 * in spanning tree or it is in the forwarding state.
1848 */
1849
1850 /*
1851 * If the packet is unicast, destined for someone on
1852 * "this" side of the bridge, drop it.
1853 */
1854 if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) {
1855 dst_if = bridge_rtlookup(sc, eh->ether_dhost);
1856 if (src_if == dst_if) {
1857 m_freem(m);
1858 goto out;
1859 }
1860 } else {
1861 /* ...forward it to all interfaces. */
1862 if_statinc(&sc->sc_if, if_imcasts);
1863 dst_if = NULL;
1864 }
1865
1866 if (pfil_run_hooks(sc->sc_if.if_pfil, &m, src_if, PFIL_IN) != 0) {
1867 if (m != NULL)
1868 m_freem(m);
1869 goto out;
1870 }
1871 if (m == NULL)
1872 goto out;
1873
1874 if (dst_if == NULL) {
1875 bridge_broadcast(sc, src_if, m);
1876 goto out;
1877 }
1878
1879 m_put_rcvif_psref(src_if, &psref_src);
1880 src_if = NULL;
1881
1882 /*
1883 * At this point, we're dealing with a unicast frame
1884 * going to a different interface.
1885 */
1886 if ((dst_if->if_flags & IFF_RUNNING) == 0) {
1887 m_freem(m);
1888 goto out;
1889 }
1890
1891 bif = bridge_lookup_member_if(sc, dst_if, &psref);
1892 if (bif == NULL) {
1893 /* Not a member of the bridge (anymore?) */
1894 m_freem(m);
1895 goto out;
1896 }
1897
1898 if (bif->bif_flags & IFBIF_STP) {
1899 switch (bif->bif_state) {
1900 case BSTP_IFSTATE_DISABLED:
1901 case BSTP_IFSTATE_BLOCKING:
1902 m_freem(m);
1903 bridge_release_member(sc, bif, &psref);
1904 goto out;
1905 }
1906 }
1907
1908 bridge_release_member(sc, bif, &psref);
1909
1910 /*
1911 * Before enqueueing this packet to the destination interface,
1912 * clear any in-bound checksum flags to prevent them from being
1913 * misused as out-bound flags.
1914 */
1915 m->m_pkthdr.csum_flags = 0;
1916
1917 ACQUIRE_GLOBAL_LOCKS();
1918 bridge_enqueue(sc, dst_if, m, 1);
1919 RELEASE_GLOBAL_LOCKS();
1920 out:
1921 if (src_if != NULL)
1922 m_put_rcvif_psref(src_if, &psref_src);
1923 return;
1924 }
1925
1926 static bool
1927 bstp_state_before_learning(struct bridge_iflist *bif)
1928 {
1929 if (bif->bif_flags & IFBIF_STP) {
1930 switch (bif->bif_state) {
1931 case BSTP_IFSTATE_BLOCKING:
1932 case BSTP_IFSTATE_LISTENING:
1933 case BSTP_IFSTATE_DISABLED:
1934 return true;
1935 }
1936 }
1937 return false;
1938 }
1939
1940 static bool
1941 bridge_ourether(struct bridge_iflist *bif, struct ether_header *eh, int src)
1942 {
1943 uint8_t *ether = src ? eh->ether_shost : eh->ether_dhost;
1944
1945 if (memcmp(CLLADDR(bif->bif_ifp->if_sadl), ether, ETHER_ADDR_LEN) == 0
1946 #if NCARP > 0
1947 || (bif->bif_ifp->if_carp &&
1948 carp_ourether(bif->bif_ifp->if_carp, eh, IFT_ETHER, src) != NULL)
1949 #endif /* NCARP > 0 */
1950 )
1951 return true;
1952
1953 return false;
1954 }
1955
1956 /*
1957 * bridge_input:
1958 *
1959 * Receive input from a member interface. Queue the packet for
1960 * bridging if it is not for us.
1961 */
1962 static void
1963 bridge_input(struct ifnet *ifp, struct mbuf *m)
1964 {
1965 struct bridge_softc *sc = ifp->if_bridge;
1966 struct bridge_iflist *bif;
1967 struct ether_header *eh;
1968 struct psref psref;
1969 int bound;
1970 DECLARE_LOCK_VARIABLE;
1971
1972 KASSERT(!cpu_intr_p());
1973
1974 if (__predict_false(sc == NULL) ||
1975 (sc->sc_if.if_flags & IFF_RUNNING) == 0) {
1976 ACQUIRE_GLOBAL_LOCKS();
1977 ether_input(ifp, m);
1978 RELEASE_GLOBAL_LOCKS();
1979 return;
1980 }
1981
1982 bound = curlwp_bind();
1983 bif = bridge_lookup_member_if(sc, ifp, &psref);
1984 if (bif == NULL) {
1985 curlwp_bindx(bound);
1986 ACQUIRE_GLOBAL_LOCKS();
1987 ether_input(ifp, m);
1988 RELEASE_GLOBAL_LOCKS();
1989 return;
1990 }
1991
1992 eh = mtod(m, struct ether_header *);
1993
1994 if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
1995 if (memcmp(etherbroadcastaddr,
1996 eh->ether_dhost, ETHER_ADDR_LEN) == 0)
1997 m->m_flags |= M_BCAST;
1998 else
1999 m->m_flags |= M_MCAST;
2000 }
2001
2002 /*
2003 * A 'fast' path for packets addressed to interfaces that are
2004 * part of this bridge.
2005 */
2006 if (!(m->m_flags & (M_BCAST|M_MCAST)) &&
2007 !bstp_state_before_learning(bif)) {
2008 struct bridge_iflist *_bif;
2009 struct ifnet *_ifp = NULL;
2010 int s;
2011 struct psref _psref;
2012
2013 BRIDGE_PSZ_RENTER(s);
2014 BRIDGE_IFLIST_READER_FOREACH(_bif, sc) {
2015 /* It is destined for us. */
2016 if (bridge_ourether(_bif, eh, 0)) {
2017 bridge_acquire_member(sc, _bif, &_psref);
2018 BRIDGE_PSZ_REXIT(s);
2019 if (_bif->bif_flags & IFBIF_LEARNING)
2020 (void) bridge_rtupdate(sc,
2021 eh->ether_shost, ifp, 0, IFBAF_DYNAMIC);
2022 m_set_rcvif(m, _bif->bif_ifp);
2023 _ifp = _bif->bif_ifp;
2024 bridge_release_member(sc, _bif, &_psref);
2025 goto out;
2026 }
2027
2028 /* We just received a packet that we sent out. */
2029 if (bridge_ourether(_bif, eh, 1))
2030 break;
2031 }
2032 BRIDGE_PSZ_REXIT(s);
2033 out:
2034
2035 if (_bif != NULL) {
2036 bridge_release_member(sc, bif, &psref);
2037 curlwp_bindx(bound);
2038 if (_ifp != NULL) {
2039 m->m_flags &= ~M_PROMISC;
2040 ACQUIRE_GLOBAL_LOCKS();
2041 ether_input(_ifp, m);
2042 RELEASE_GLOBAL_LOCKS();
2043 } else
2044 m_freem(m);
2045 return;
2046 }
2047 }
2048
2049 /* Tap off 802.1D packets; they do not get forwarded. */
2050 if (bif->bif_flags & IFBIF_STP &&
2051 memcmp(eh->ether_dhost, bstp_etheraddr, ETHER_ADDR_LEN) == 0) {
2052 bstp_input(sc, bif, m);
2053 bridge_release_member(sc, bif, &psref);
2054 curlwp_bindx(bound);
2055 return;
2056 }
2057
2058 /*
2059 * A normal switch would discard the packet here, but that's not what
2060 * we've done historically. This also prevents some obnoxious behaviour.
2061 */
2062 if (bstp_state_before_learning(bif)) {
2063 bridge_release_member(sc, bif, &psref);
2064 curlwp_bindx(bound);
2065 ACQUIRE_GLOBAL_LOCKS();
2066 ether_input(ifp, m);
2067 RELEASE_GLOBAL_LOCKS();
2068 return;
2069 }
2070
2071 bridge_release_member(sc, bif, &psref);
2072
2073 bridge_forward(sc, m);
2074
2075 curlwp_bindx(bound);
2076 }
2077
2078 /*
2079 * bridge_broadcast:
2080 *
2081 * Send a frame to all interfaces that are members of
2082 * the bridge, except for the one on which the packet
2083 * arrived.
2084 */
2085 static void
2086 bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if,
2087 struct mbuf *m)
2088 {
2089 struct bridge_iflist *bif;
2090 struct mbuf *mc;
2091 struct ifnet *dst_if;
2092 bool bmcast;
2093 int s;
2094 DECLARE_LOCK_VARIABLE;
2095
2096 bmcast = m->m_flags & (M_BCAST|M_MCAST);
2097
2098 BRIDGE_PSZ_RENTER(s);
2099 BRIDGE_IFLIST_READER_FOREACH(bif, sc) {
2100 struct psref psref;
2101
2102 bridge_acquire_member(sc, bif, &psref);
2103 BRIDGE_PSZ_REXIT(s);
2104
2105 dst_if = bif->bif_ifp;
2106
2107 if (bif->bif_flags & IFBIF_STP) {
2108 switch (bif->bif_state) {
2109 case BSTP_IFSTATE_BLOCKING:
2110 case BSTP_IFSTATE_DISABLED:
2111 goto next;
2112 }
2113 }
2114
2115 if ((bif->bif_flags & IFBIF_DISCOVER) == 0 && !bmcast)
2116 goto next;
2117
2118 if ((dst_if->if_flags & IFF_RUNNING) == 0)
2119 goto next;
2120
2121 if (dst_if != src_if) {
2122 mc = m_copypacket(m, M_DONTWAIT);
2123 if (mc == NULL) {
2124 if_statinc(&sc->sc_if, if_oerrors);
2125 goto next;
2126 }
2127 /*
2128 * Before enqueueing this packet to the destination
2129 * interface, clear any in-bound checksum flags to
2130 * prevent them from being misused as out-bound flags.
2131 */
2132 mc->m_pkthdr.csum_flags = 0;
2133
2134 ACQUIRE_GLOBAL_LOCKS();
2135 bridge_enqueue(sc, dst_if, mc, 1);
2136 RELEASE_GLOBAL_LOCKS();
2137 }
2138
2139 if (bmcast) {
2140 mc = m_copypacket(m, M_DONTWAIT);
2141 if (mc == NULL) {
2142 if_statinc(&sc->sc_if, if_oerrors);
2143 goto next;
2144 }
2145 /*
2146 * Before enqueueing this packet to the destination
2147 * interface, clear any in-bound checksum flags to
2148 * prevent them from being misused as out-bound flags.
2149 */
2150 mc->m_pkthdr.csum_flags = 0;
2151
2152 m_set_rcvif(mc, dst_if);
2153 mc->m_flags &= ~M_PROMISC;
2154
2155 ACQUIRE_GLOBAL_LOCKS();
2156 ether_input(dst_if, mc);
2157 RELEASE_GLOBAL_LOCKS();
2158 }
2159 next:
2160 BRIDGE_PSZ_RENTER(s);
2161 bridge_release_member(sc, bif, &psref);
2162 }
2163 BRIDGE_PSZ_REXIT(s);
2164
2165 m_freem(m);
2166 }
2167
2168 static int
2169 bridge_rtalloc(struct bridge_softc *sc, const uint8_t *dst,
2170 struct bridge_rtnode **brtp)
2171 {
2172 struct bridge_rtnode *brt;
2173 int error;
2174
2175 if (sc->sc_brtcnt >= sc->sc_brtmax)
2176 return ENOSPC;
2177
2178 /*
2179 * Allocate a new bridge forwarding node, and
2180 * initialize the expiration time and Ethernet
2181 * address.
2182 */
2183 brt = pool_get(&bridge_rtnode_pool, PR_NOWAIT);
2184 if (brt == NULL)
2185 return ENOMEM;
2186
2187 memset(brt, 0, sizeof(*brt));
2188 brt->brt_expire = time_uptime + sc->sc_brttimeout;
2189 brt->brt_flags = IFBAF_DYNAMIC;
2190 memcpy(brt->brt_addr, dst, ETHER_ADDR_LEN);
2191 PSLIST_ENTRY_INIT(brt, brt_list);
2192 PSLIST_ENTRY_INIT(brt, brt_hash);
2193
2194 BRIDGE_RT_LOCK(sc);
2195 error = bridge_rtnode_insert(sc, brt);
2196 BRIDGE_RT_UNLOCK(sc);
2197
2198 if (error != 0) {
2199 pool_put(&bridge_rtnode_pool, brt);
2200 return error;
2201 }
2202
2203 *brtp = brt;
2204 return 0;
2205 }
2206
2207 /*
2208 * bridge_rtupdate:
2209 *
2210 * Add a bridge routing entry.
2211 */
2212 static int
2213 bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst,
2214 struct ifnet *dst_if, int setflags, uint8_t flags)
2215 {
2216 struct bridge_rtnode *brt;
2217 int s;
2218
2219 again:
2220 /*
2221 * A route for this destination might already exist. If so,
2222 * update it, otherwise create a new one.
2223 */
2224 BRIDGE_RT_RENTER(s);
2225 brt = bridge_rtnode_lookup(sc, dst);
2226
2227 if (brt != NULL) {
2228 brt->brt_ifp = dst_if;
2229 if (setflags) {
2230 brt->brt_flags = flags;
2231 if (flags & IFBAF_STATIC)
2232 brt->brt_expire = 0;
2233 else
2234 brt->brt_expire = time_uptime + sc->sc_brttimeout;
2235 } else {
2236 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
2237 brt->brt_expire = time_uptime + sc->sc_brttimeout;
2238 }
2239 }
2240 BRIDGE_RT_REXIT(s);
2241
2242 if (brt == NULL) {
2243 int r;
2244
2245 r = bridge_rtalloc(sc, dst, &brt);
2246 if (r != 0)
2247 return r;
2248 goto again;
2249 }
2250
2251 return 0;
2252 }
2253
2254 /*
2255 * bridge_rtlookup:
2256 *
2257 * Lookup the destination interface for an address.
2258 */
2259 static struct ifnet *
2260 bridge_rtlookup(struct bridge_softc *sc, const uint8_t *addr)
2261 {
2262 struct bridge_rtnode *brt;
2263 struct ifnet *ifs = NULL;
2264 int s;
2265
2266 BRIDGE_RT_RENTER(s);
2267 brt = bridge_rtnode_lookup(sc, addr);
2268 if (brt != NULL)
2269 ifs = brt->brt_ifp;
2270 BRIDGE_RT_REXIT(s);
2271
2272 return ifs;
2273 }
2274
2275 typedef bool (*bridge_iterate_cb_t)
2276 (struct bridge_softc *, struct bridge_rtnode *, bool *, void *);
2277
2278 /*
2279 * bridge_rtlist_iterate_remove:
2280 *
2281 * It iterates on sc->sc_rtlist and removes rtnodes of it which func
2282 * callback judges to remove. Removals of rtnodes are done in a manner
2283 * of pserialize. To this end, all kmem_* operations are placed out of
2284 * mutexes.
2285 */
2286 static void
2287 bridge_rtlist_iterate_remove(struct bridge_softc *sc, bridge_iterate_cb_t func, void *arg)
2288 {
2289 struct bridge_rtnode *brt;
2290 struct bridge_rtnode **brt_list;
2291 int i, count;
2292
2293 retry:
2294 count = sc->sc_brtcnt;
2295 if (count == 0)
2296 return;
2297 brt_list = kmem_alloc(sizeof(*brt_list) * count, KM_SLEEP);
2298
2299 BRIDGE_RT_LOCK(sc);
2300 if (__predict_false(sc->sc_brtcnt > count)) {
2301 /* The rtnodes increased, we need more memory */
2302 BRIDGE_RT_UNLOCK(sc);
2303 kmem_free(brt_list, sizeof(*brt_list) * count);
2304 goto retry;
2305 }
2306
2307 i = 0;
2308 /*
2309 * We don't need to use a _SAFE variant here because we know
2310 * that a removed item keeps its next pointer as-is thanks to
2311 * pslist(9) and isn't freed in the loop.
2312 */
2313 BRIDGE_RTLIST_WRITER_FOREACH(brt, sc) {
2314 bool need_break = false;
2315 if (func(sc, brt, &need_break, arg)) {
2316 bridge_rtnode_remove(sc, brt);
2317 brt_list[i++] = brt;
2318 }
2319 if (need_break)
2320 break;
2321 }
2322
2323 if (i > 0)
2324 BRIDGE_RT_PSZ_PERFORM(sc);
2325 BRIDGE_RT_UNLOCK(sc);
2326
2327 while (--i >= 0)
2328 bridge_rtnode_destroy(brt_list[i]);
2329
2330 kmem_free(brt_list, sizeof(*brt_list) * count);
2331 }
2332
2333 static bool
2334 bridge_rttrim0_cb(struct bridge_softc *sc, struct bridge_rtnode *brt,
2335 bool *need_break, void *arg)
2336 {
2337 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
2338 /* Take into account of the subsequent removal */
2339 if ((sc->sc_brtcnt - 1) <= sc->sc_brtmax)
2340 *need_break = true;
2341 return true;
2342 } else
2343 return false;
2344 }
2345
2346 static void
2347 bridge_rttrim0(struct bridge_softc *sc)
2348 {
2349 bridge_rtlist_iterate_remove(sc, bridge_rttrim0_cb, NULL);
2350 }
2351
2352 /*
2353 * bridge_rttrim:
2354 *
2355 * Trim the routine table so that we have a number
2356 * of routing entries less than or equal to the
2357 * maximum number.
2358 */
2359 static void
2360 bridge_rttrim(struct bridge_softc *sc)
2361 {
2362
2363 /* Make sure we actually need to do this. */
2364 if (sc->sc_brtcnt <= sc->sc_brtmax)
2365 return;
2366
2367 /* Force an aging cycle; this might trim enough addresses. */
2368 bridge_rtage(sc);
2369 if (sc->sc_brtcnt <= sc->sc_brtmax)
2370 return;
2371
2372 bridge_rttrim0(sc);
2373
2374 return;
2375 }
2376
2377 /*
2378 * bridge_timer:
2379 *
2380 * Aging timer for the bridge.
2381 */
2382 static void
2383 bridge_timer(void *arg)
2384 {
2385 struct bridge_softc *sc = arg;
2386
2387 workqueue_enqueue(sc->sc_rtage_wq, &sc->sc_rtage_wk, NULL);
2388 }
2389
2390 static void
2391 bridge_rtage_work(struct work *wk, void *arg)
2392 {
2393 struct bridge_softc *sc = arg;
2394
2395 KASSERT(wk == &sc->sc_rtage_wk);
2396
2397 bridge_rtage(sc);
2398
2399 if (sc->sc_if.if_flags & IFF_RUNNING)
2400 callout_reset(&sc->sc_brcallout,
2401 bridge_rtable_prune_period * hz, bridge_timer, sc);
2402 }
2403
2404 static bool
2405 bridge_rtage_cb(struct bridge_softc *sc, struct bridge_rtnode *brt,
2406 bool *need_break, void *arg)
2407 {
2408 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
2409 time_uptime >= brt->brt_expire)
2410 return true;
2411 else
2412 return false;
2413 }
2414
2415 /*
2416 * bridge_rtage:
2417 *
2418 * Perform an aging cycle.
2419 */
2420 static void
2421 bridge_rtage(struct bridge_softc *sc)
2422 {
2423 bridge_rtlist_iterate_remove(sc, bridge_rtage_cb, NULL);
2424 }
2425
2426
2427 static bool
2428 bridge_rtflush_cb(struct bridge_softc *sc, struct bridge_rtnode *brt,
2429 bool *need_break, void *arg)
2430 {
2431 int full = *(int*)arg;
2432
2433 if (full || (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
2434 return true;
2435 else
2436 return false;
2437 }
2438
2439 /*
2440 * bridge_rtflush:
2441 *
2442 * Remove all dynamic addresses from the bridge.
2443 */
2444 static void
2445 bridge_rtflush(struct bridge_softc *sc, int full)
2446 {
2447 bridge_rtlist_iterate_remove(sc, bridge_rtflush_cb, &full);
2448 }
2449
2450 /*
2451 * bridge_rtdaddr:
2452 *
2453 * Remove an address from the table.
2454 */
2455 static int
2456 bridge_rtdaddr(struct bridge_softc *sc, const uint8_t *addr)
2457 {
2458 struct bridge_rtnode *brt;
2459
2460 BRIDGE_RT_LOCK(sc);
2461 if ((brt = bridge_rtnode_lookup(sc, addr)) == NULL) {
2462 BRIDGE_RT_UNLOCK(sc);
2463 return ENOENT;
2464 }
2465 bridge_rtnode_remove(sc, brt);
2466 BRIDGE_RT_PSZ_PERFORM(sc);
2467 BRIDGE_RT_UNLOCK(sc);
2468
2469 bridge_rtnode_destroy(brt);
2470
2471 return 0;
2472 }
2473
2474 /*
2475 * bridge_rtdelete:
2476 *
2477 * Delete routes to a speicifc member interface.
2478 */
2479 static void
2480 bridge_rtdelete(struct bridge_softc *sc, struct ifnet *ifp)
2481 {
2482 struct bridge_rtnode *brt;
2483
2484 /* XXX pserialize_perform for each entry is slow */
2485 again:
2486 BRIDGE_RT_LOCK(sc);
2487 BRIDGE_RTLIST_WRITER_FOREACH(brt, sc) {
2488 if (brt->brt_ifp == ifp)
2489 break;
2490 }
2491 if (brt == NULL) {
2492 BRIDGE_RT_UNLOCK(sc);
2493 return;
2494 }
2495 bridge_rtnode_remove(sc, brt);
2496 BRIDGE_RT_PSZ_PERFORM(sc);
2497 BRIDGE_RT_UNLOCK(sc);
2498
2499 bridge_rtnode_destroy(brt);
2500
2501 goto again;
2502 }
2503
2504 /*
2505 * bridge_rtable_init:
2506 *
2507 * Initialize the route table for this bridge.
2508 */
2509 static void
2510 bridge_rtable_init(struct bridge_softc *sc)
2511 {
2512 int i;
2513
2514 sc->sc_rthash = kmem_alloc(sizeof(*sc->sc_rthash) * BRIDGE_RTHASH_SIZE,
2515 KM_SLEEP);
2516
2517 for (i = 0; i < BRIDGE_RTHASH_SIZE; i++)
2518 PSLIST_INIT(&sc->sc_rthash[i]);
2519
2520 sc->sc_rthash_key = cprng_fast32();
2521
2522 PSLIST_INIT(&sc->sc_rtlist);
2523
2524 sc->sc_rtlist_psz = pserialize_create();
2525 sc->sc_rtlist_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET);
2526 }
2527
2528 /*
2529 * bridge_rtable_fini:
2530 *
2531 * Deconstruct the route table for this bridge.
2532 */
2533 static void
2534 bridge_rtable_fini(struct bridge_softc *sc)
2535 {
2536
2537 kmem_free(sc->sc_rthash, sizeof(*sc->sc_rthash) * BRIDGE_RTHASH_SIZE);
2538 mutex_obj_free(sc->sc_rtlist_lock);
2539 pserialize_destroy(sc->sc_rtlist_psz);
2540 }
2541
2542 /*
2543 * The following hash function is adapted from "Hash Functions" by Bob Jenkins
2544 * ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
2545 */
2546 #define mix(a, b, c) \
2547 do { \
2548 a -= b; a -= c; a ^= (c >> 13); \
2549 b -= c; b -= a; b ^= (a << 8); \
2550 c -= a; c -= b; c ^= (b >> 13); \
2551 a -= b; a -= c; a ^= (c >> 12); \
2552 b -= c; b -= a; b ^= (a << 16); \
2553 c -= a; c -= b; c ^= (b >> 5); \
2554 a -= b; a -= c; a ^= (c >> 3); \
2555 b -= c; b -= a; b ^= (a << 10); \
2556 c -= a; c -= b; c ^= (b >> 15); \
2557 } while (/*CONSTCOND*/0)
2558
2559 static inline uint32_t
2560 bridge_rthash(struct bridge_softc *sc, const uint8_t *addr)
2561 {
2562 uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = sc->sc_rthash_key;
2563
2564 b += addr[5] << 8;
2565 b += addr[4];
2566 a += (uint32_t)addr[3] << 24;
2567 a += addr[2] << 16;
2568 a += addr[1] << 8;
2569 a += addr[0];
2570
2571 mix(a, b, c);
2572
2573 return (c & BRIDGE_RTHASH_MASK);
2574 }
2575
2576 #undef mix
2577
2578 /*
2579 * bridge_rtnode_lookup:
2580 *
2581 * Look up a bridge route node for the specified destination.
2582 */
2583 static struct bridge_rtnode *
2584 bridge_rtnode_lookup(struct bridge_softc *sc, const uint8_t *addr)
2585 {
2586 struct bridge_rtnode *brt;
2587 uint32_t hash;
2588 int dir;
2589
2590 hash = bridge_rthash(sc, addr);
2591 BRIDGE_RTHASH_READER_FOREACH(brt, sc, hash) {
2592 dir = memcmp(addr, brt->brt_addr, ETHER_ADDR_LEN);
2593 if (dir == 0)
2594 return brt;
2595 if (dir > 0)
2596 return NULL;
2597 }
2598
2599 return NULL;
2600 }
2601
2602 /*
2603 * bridge_rtnode_insert:
2604 *
2605 * Insert the specified bridge node into the route table. We
2606 * assume the entry is not already in the table.
2607 */
2608 static int
2609 bridge_rtnode_insert(struct bridge_softc *sc, struct bridge_rtnode *brt)
2610 {
2611 struct bridge_rtnode *lbrt, *prev = NULL;
2612 uint32_t hash;
2613
2614 KASSERT(BRIDGE_RT_LOCKED(sc));
2615
2616 hash = bridge_rthash(sc, brt->brt_addr);
2617 BRIDGE_RTHASH_WRITER_FOREACH(lbrt, sc, hash) {
2618 int dir = memcmp(brt->brt_addr, lbrt->brt_addr, ETHER_ADDR_LEN);
2619 if (dir == 0)
2620 return EEXIST;
2621 if (dir > 0)
2622 break;
2623 prev = lbrt;
2624 }
2625 if (prev == NULL)
2626 BRIDGE_RTHASH_WRITER_INSERT_HEAD(sc, hash, brt);
2627 else
2628 BRIDGE_RTHASH_WRITER_INSERT_AFTER(prev, brt);
2629
2630 BRIDGE_RTLIST_WRITER_INSERT_HEAD(sc, brt);
2631 sc->sc_brtcnt++;
2632
2633 return 0;
2634 }
2635
2636 /*
2637 * bridge_rtnode_remove:
2638 *
2639 * Remove a bridge rtnode from the rthash and the rtlist of a bridge.
2640 */
2641 static void
2642 bridge_rtnode_remove(struct bridge_softc *sc, struct bridge_rtnode *brt)
2643 {
2644
2645 KASSERT(BRIDGE_RT_LOCKED(sc));
2646
2647 BRIDGE_RTHASH_WRITER_REMOVE(brt);
2648 BRIDGE_RTLIST_WRITER_REMOVE(brt);
2649 sc->sc_brtcnt--;
2650 }
2651
2652 /*
2653 * bridge_rtnode_destroy:
2654 *
2655 * Destroy a bridge rtnode.
2656 */
2657 static void
2658 bridge_rtnode_destroy(struct bridge_rtnode *brt)
2659 {
2660
2661 PSLIST_ENTRY_DESTROY(brt, brt_list);
2662 PSLIST_ENTRY_DESTROY(brt, brt_hash);
2663 pool_put(&bridge_rtnode_pool, brt);
2664 }
2665
2666 extern pfil_head_t *inet_pfil_hook; /* XXX */
2667 extern pfil_head_t *inet6_pfil_hook; /* XXX */
2668
2669 /*
2670 * Send bridge packets through IPF if they are one of the types IPF can deal
2671 * with, or if they are ARP or REVARP. (IPF will pass ARP and REVARP without
2672 * question.)
2673 */
2674 static int
2675 bridge_ipf(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
2676 {
2677 int snap, error;
2678 struct ether_header *eh1, eh2;
2679 struct llc llc1;
2680 uint16_t ether_type;
2681
2682 snap = 0;
2683 error = -1; /* Default error if not error == 0 */
2684 eh1 = mtod(*mp, struct ether_header *);
2685 ether_type = ntohs(eh1->ether_type);
2686
2687 /*
2688 * Check for SNAP/LLC.
2689 */
2690 if (ether_type < ETHERMTU) {
2691 struct llc *llc2 = (struct llc *)(eh1 + 1);
2692
2693 if ((*mp)->m_len >= ETHER_HDR_LEN + 8 &&
2694 llc2->llc_dsap == LLC_SNAP_LSAP &&
2695 llc2->llc_ssap == LLC_SNAP_LSAP &&
2696 llc2->llc_control == LLC_UI) {
2697 ether_type = htons(llc2->llc_un.type_snap.ether_type);
2698 snap = 1;
2699 }
2700 }
2701
2702 /*
2703 * If we're trying to filter bridge traffic, don't look at anything
2704 * other than IP and ARP traffic. If the filter doesn't understand
2705 * IPv6, don't allow IPv6 through the bridge either. This is lame
2706 * since if we really wanted, say, an AppleTalk filter, we are hosed,
2707 * but of course we don't have an AppleTalk filter to begin with.
2708 * (Note that since IPF doesn't understand ARP it will pass *ALL*
2709 * ARP traffic.)
2710 */
2711 switch (ether_type) {
2712 case ETHERTYPE_ARP:
2713 case ETHERTYPE_REVARP:
2714 return 0; /* Automatically pass */
2715 case ETHERTYPE_IP:
2716 # ifdef INET6
2717 case ETHERTYPE_IPV6:
2718 # endif /* INET6 */
2719 break;
2720 default:
2721 goto bad;
2722 }
2723
2724 /* Strip off the Ethernet header and keep a copy. */
2725 m_copydata(*mp, 0, ETHER_HDR_LEN, (void *) &eh2);
2726 m_adj(*mp, ETHER_HDR_LEN);
2727
2728 /* Strip off snap header, if present */
2729 if (snap) {
2730 m_copydata(*mp, 0, sizeof(struct llc), (void *) &llc1);
2731 m_adj(*mp, sizeof(struct llc));
2732 }
2733
2734 /*
2735 * Check basic packet sanity and run IPF through pfil.
2736 */
2737 KASSERT(!cpu_intr_p());
2738 switch (ether_type)
2739 {
2740 case ETHERTYPE_IP :
2741 error = bridge_ip_checkbasic(mp);
2742 if (error == 0)
2743 error = pfil_run_hooks(inet_pfil_hook, mp, ifp, dir);
2744 break;
2745 # ifdef INET6
2746 case ETHERTYPE_IPV6 :
2747 error = bridge_ip6_checkbasic(mp);
2748 if (error == 0)
2749 error = pfil_run_hooks(inet6_pfil_hook, mp, ifp, dir);
2750 break;
2751 # endif
2752 default :
2753 error = 0;
2754 break;
2755 }
2756
2757 if (*mp == NULL)
2758 return error;
2759 if (error != 0)
2760 goto bad;
2761
2762 error = -1;
2763
2764 /*
2765 * Finally, put everything back the way it was and return
2766 */
2767 if (snap) {
2768 M_PREPEND(*mp, sizeof(struct llc), M_DONTWAIT);
2769 if (*mp == NULL)
2770 return error;
2771 bcopy(&llc1, mtod(*mp, void *), sizeof(struct llc));
2772 }
2773
2774 M_PREPEND(*mp, ETHER_HDR_LEN, M_DONTWAIT);
2775 if (*mp == NULL)
2776 return error;
2777 bcopy(&eh2, mtod(*mp, void *), ETHER_HDR_LEN);
2778
2779 return 0;
2780
2781 bad:
2782 m_freem(*mp);
2783 *mp = NULL;
2784 return error;
2785 }
2786
2787 /*
2788 * Perform basic checks on header size since
2789 * IPF assumes ip_input has already processed
2790 * it for it. Cut-and-pasted from ip_input.c.
2791 * Given how simple the IPv6 version is,
2792 * does the IPv4 version really need to be
2793 * this complicated?
2794 *
2795 * XXX Should we update ipstat here, or not?
2796 * XXX Right now we update ipstat but not
2797 * XXX csum_counter.
2798 */
2799 static int
2800 bridge_ip_checkbasic(struct mbuf **mp)
2801 {
2802 struct mbuf *m = *mp;
2803 struct ip *ip;
2804 int len, hlen;
2805
2806 if (*mp == NULL)
2807 return -1;
2808
2809 if (M_GET_ALIGNED_HDR(&m, struct ip, true) != 0) {
2810 /* XXXJRT new stat, please */
2811 ip_statinc(IP_STAT_TOOSMALL);
2812 goto bad;
2813 }
2814 ip = mtod(m, struct ip *);
2815 if (ip == NULL) goto bad;
2816
2817 if (ip->ip_v != IPVERSION) {
2818 ip_statinc(IP_STAT_BADVERS);
2819 goto bad;
2820 }
2821 hlen = ip->ip_hl << 2;
2822 if (hlen < sizeof(struct ip)) { /* minimum header length */
2823 ip_statinc(IP_STAT_BADHLEN);
2824 goto bad;
2825 }
2826 if (hlen > m->m_len) {
2827 if ((m = m_pullup(m, hlen)) == 0) {
2828 ip_statinc(IP_STAT_BADHLEN);
2829 goto bad;
2830 }
2831 ip = mtod(m, struct ip *);
2832 if (ip == NULL) goto bad;
2833 }
2834
2835 switch (m->m_pkthdr.csum_flags &
2836 ((m_get_rcvif_NOMPSAFE(m)->if_csum_flags_rx & M_CSUM_IPv4) |
2837 M_CSUM_IPv4_BAD)) {
2838 case M_CSUM_IPv4|M_CSUM_IPv4_BAD:
2839 /* INET_CSUM_COUNTER_INCR(&ip_hwcsum_bad); */
2840 goto bad;
2841
2842 case M_CSUM_IPv4:
2843 /* Checksum was okay. */
2844 /* INET_CSUM_COUNTER_INCR(&ip_hwcsum_ok); */
2845 break;
2846
2847 default:
2848 /* Must compute it ourselves. */
2849 /* INET_CSUM_COUNTER_INCR(&ip_swcsum); */
2850 if (in_cksum(m, hlen) != 0)
2851 goto bad;
2852 break;
2853 }
2854
2855 /* Retrieve the packet length. */
2856 len = ntohs(ip->ip_len);
2857
2858 /*
2859 * Check for additional length bogosity
2860 */
2861 if (len < hlen) {
2862 ip_statinc(IP_STAT_BADLEN);
2863 goto bad;
2864 }
2865
2866 /*
2867 * Check that the amount of data in the buffers
2868 * is as at least much as the IP header would have us expect.
2869 * Drop packet if shorter than we expect.
2870 */
2871 if (m->m_pkthdr.len < len) {
2872 ip_statinc(IP_STAT_TOOSHORT);
2873 goto bad;
2874 }
2875
2876 /* Checks out, proceed */
2877 *mp = m;
2878 return 0;
2879
2880 bad:
2881 *mp = m;
2882 return -1;
2883 }
2884
2885 # ifdef INET6
2886 /*
2887 * Same as above, but for IPv6.
2888 * Cut-and-pasted from ip6_input.c.
2889 * XXX Should we update ip6stat, or not?
2890 */
2891 static int
2892 bridge_ip6_checkbasic(struct mbuf **mp)
2893 {
2894 struct mbuf *m = *mp;
2895 struct ip6_hdr *ip6;
2896
2897 /*
2898 * If the IPv6 header is not aligned, slurp it up into a new
2899 * mbuf with space for link headers, in the event we forward
2900 * it. Otherwise, if it is aligned, make sure the entire base
2901 * IPv6 header is in the first mbuf of the chain.
2902 */
2903 if (M_GET_ALIGNED_HDR(&m, struct ip6_hdr, true) != 0) {
2904 struct ifnet *inifp = m_get_rcvif_NOMPSAFE(m);
2905 /* XXXJRT new stat, please */
2906 ip6_statinc(IP6_STAT_TOOSMALL);
2907 in6_ifstat_inc(inifp, ifs6_in_hdrerr);
2908 goto bad;
2909 }
2910
2911 ip6 = mtod(m, struct ip6_hdr *);
2912
2913 if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
2914 ip6_statinc(IP6_STAT_BADVERS);
2915 in6_ifstat_inc(m_get_rcvif_NOMPSAFE(m), ifs6_in_hdrerr);
2916 goto bad;
2917 }
2918
2919 /* Checks out, proceed */
2920 *mp = m;
2921 return 0;
2922
2923 bad:
2924 *mp = m;
2925 return -1;
2926 }
2927 # endif /* INET6 */
2928