if_bridge.c revision 1.171 1 /* $NetBSD: if_bridge.c,v 1.171 2020/04/27 20:46:01 jdolecek Exp $ */
2
3 /*
4 * Copyright 2001 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Copyright (c) 1999, 2000 Jason L. Wright (jason (at) thought.net)
40 * All rights reserved.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. All advertising materials mentioning features or use of this software
51 * must display the following acknowledgement:
52 * This product includes software developed by Jason L. Wright
53 * 4. The name of the author may not be used to endorse or promote products
54 * derived from this software without specific prior written permission.
55 *
56 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
57 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
58 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
59 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
60 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
61 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
62 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
64 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
65 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
66 * POSSIBILITY OF SUCH DAMAGE.
67 *
68 * OpenBSD: if_bridge.c,v 1.60 2001/06/15 03:38:33 itojun Exp
69 */
70
71 /*
72 * Network interface bridge support.
73 *
74 * TODO:
75 *
76 * - Currently only supports Ethernet-like interfaces (Ethernet,
77 * 802.11, VLANs on Ethernet, etc.) Figure out a nice way
78 * to bridge other types of interfaces (FDDI-FDDI, and maybe
79 * consider heterogenous bridges).
80 */
81
82 #include <sys/cdefs.h>
83 __KERNEL_RCSID(0, "$NetBSD: if_bridge.c,v 1.171 2020/04/27 20:46:01 jdolecek Exp $");
84
85 #ifdef _KERNEL_OPT
86 #include "opt_bridge_ipf.h"
87 #include "opt_inet.h"
88 #include "opt_net_mpsafe.h"
89 #endif /* _KERNEL_OPT */
90
91 #include <sys/param.h>
92 #include <sys/kernel.h>
93 #include <sys/mbuf.h>
94 #include <sys/queue.h>
95 #include <sys/socket.h>
96 #include <sys/socketvar.h> /* for softnet_lock */
97 #include <sys/sockio.h>
98 #include <sys/systm.h>
99 #include <sys/proc.h>
100 #include <sys/pool.h>
101 #include <sys/kauth.h>
102 #include <sys/cpu.h>
103 #include <sys/cprng.h>
104 #include <sys/mutex.h>
105 #include <sys/kmem.h>
106
107 #include <net/bpf.h>
108 #include <net/if.h>
109 #include <net/if_dl.h>
110 #include <net/if_types.h>
111 #include <net/if_llc.h>
112
113 #include <net/if_ether.h>
114 #include <net/if_bridgevar.h>
115 #include <net/ether_sw_offload.h>
116
117 #if defined(BRIDGE_IPF)
118 /* Used for bridge_ip[6]_checkbasic */
119 #include <netinet/in.h>
120 #include <netinet/in_systm.h>
121 #include <netinet/ip.h>
122 #include <netinet/ip_var.h>
123 #include <netinet/ip_private.h> /* XXX */
124
125 #include <netinet/ip6.h>
126 #include <netinet6/in6_var.h>
127 #include <netinet6/ip6_var.h>
128 #include <netinet6/ip6_private.h> /* XXX */
129 #endif /* BRIDGE_IPF */
130
131 /*
132 * Size of the route hash table. Must be a power of two.
133 */
134 #ifndef BRIDGE_RTHASH_SIZE
135 #define BRIDGE_RTHASH_SIZE 1024
136 #endif
137
138 #define BRIDGE_RTHASH_MASK (BRIDGE_RTHASH_SIZE - 1)
139
140 #include "carp.h"
141 #if NCARP > 0
142 #include <netinet/in.h>
143 #include <netinet/in_var.h>
144 #include <netinet/ip_carp.h>
145 #endif
146
147 #include "ioconf.h"
148
149 __CTASSERT(sizeof(struct ifbifconf) == sizeof(struct ifbaconf));
150 __CTASSERT(offsetof(struct ifbifconf, ifbic_len) == offsetof(struct ifbaconf, ifbac_len));
151 __CTASSERT(offsetof(struct ifbifconf, ifbic_buf) == offsetof(struct ifbaconf, ifbac_buf));
152
153 /*
154 * Maximum number of addresses to cache.
155 */
156 #ifndef BRIDGE_RTABLE_MAX
157 #define BRIDGE_RTABLE_MAX 100
158 #endif
159
160 /*
161 * Spanning tree defaults.
162 */
163 #define BSTP_DEFAULT_MAX_AGE (20 * 256)
164 #define BSTP_DEFAULT_HELLO_TIME (2 * 256)
165 #define BSTP_DEFAULT_FORWARD_DELAY (15 * 256)
166 #define BSTP_DEFAULT_HOLD_TIME (1 * 256)
167 #define BSTP_DEFAULT_BRIDGE_PRIORITY 0x8000
168 #define BSTP_DEFAULT_PORT_PRIORITY 0x80
169 #define BSTP_DEFAULT_PATH_COST 55
170
171 /*
172 * Timeout (in seconds) for entries learned dynamically.
173 */
174 #ifndef BRIDGE_RTABLE_TIMEOUT
175 #define BRIDGE_RTABLE_TIMEOUT (20 * 60) /* same as ARP */
176 #endif
177
178 /*
179 * Number of seconds between walks of the route list.
180 */
181 #ifndef BRIDGE_RTABLE_PRUNE_PERIOD
182 #define BRIDGE_RTABLE_PRUNE_PERIOD (5 * 60)
183 #endif
184
185 #define BRIDGE_RT_LOCK(_sc) mutex_enter((_sc)->sc_rtlist_lock)
186 #define BRIDGE_RT_UNLOCK(_sc) mutex_exit((_sc)->sc_rtlist_lock)
187 #define BRIDGE_RT_LOCKED(_sc) mutex_owned((_sc)->sc_rtlist_lock)
188
189 #define BRIDGE_RT_PSZ_PERFORM(_sc) \
190 pserialize_perform((_sc)->sc_rtlist_psz)
191
192 #define BRIDGE_RT_RENTER(__s) do { __s = pserialize_read_enter(); } while (0)
193 #define BRIDGE_RT_REXIT(__s) do { pserialize_read_exit(__s); } while (0)
194
195 #define BRIDGE_RTLIST_READER_FOREACH(_brt, _sc) \
196 PSLIST_READER_FOREACH((_brt), &((_sc)->sc_rtlist), \
197 struct bridge_rtnode, brt_list)
198 #define BRIDGE_RTLIST_WRITER_FOREACH(_brt, _sc) \
199 PSLIST_WRITER_FOREACH((_brt), &((_sc)->sc_rtlist), \
200 struct bridge_rtnode, brt_list)
201 #define BRIDGE_RTLIST_WRITER_INSERT_HEAD(_sc, _brt) \
202 PSLIST_WRITER_INSERT_HEAD(&(_sc)->sc_rtlist, brt, brt_list)
203 #define BRIDGE_RTLIST_WRITER_REMOVE(_brt) \
204 PSLIST_WRITER_REMOVE((_brt), brt_list)
205
206 #define BRIDGE_RTHASH_READER_FOREACH(_brt, _sc, _hash) \
207 PSLIST_READER_FOREACH((_brt), &(_sc)->sc_rthash[(_hash)], \
208 struct bridge_rtnode, brt_hash)
209 #define BRIDGE_RTHASH_WRITER_FOREACH(_brt, _sc, _hash) \
210 PSLIST_WRITER_FOREACH((_brt), &(_sc)->sc_rthash[(_hash)], \
211 struct bridge_rtnode, brt_hash)
212 #define BRIDGE_RTHASH_WRITER_INSERT_HEAD(_sc, _hash, _brt) \
213 PSLIST_WRITER_INSERT_HEAD(&(_sc)->sc_rthash[(_hash)], brt, brt_hash)
214 #define BRIDGE_RTHASH_WRITER_INSERT_AFTER(_brt, _new) \
215 PSLIST_WRITER_INSERT_AFTER((_brt), (_new), brt_hash)
216 #define BRIDGE_RTHASH_WRITER_REMOVE(_brt) \
217 PSLIST_WRITER_REMOVE((_brt), brt_hash)
218
219 #ifdef NET_MPSAFE
220 #define DECLARE_LOCK_VARIABLE
221 #define ACQUIRE_GLOBAL_LOCKS() do { } while (0)
222 #define RELEASE_GLOBAL_LOCKS() do { } while (0)
223 #else
224 #define DECLARE_LOCK_VARIABLE int __s
225 #define ACQUIRE_GLOBAL_LOCKS() do { \
226 KERNEL_LOCK(1, NULL); \
227 mutex_enter(softnet_lock); \
228 __s = splsoftnet(); \
229 } while (0)
230 #define RELEASE_GLOBAL_LOCKS() do { \
231 splx(__s); \
232 mutex_exit(softnet_lock); \
233 KERNEL_UNLOCK_ONE(NULL); \
234 } while (0)
235 #endif
236
237 struct psref_class *bridge_psref_class __read_mostly;
238
239 int bridge_rtable_prune_period = BRIDGE_RTABLE_PRUNE_PERIOD;
240
241 static struct pool bridge_rtnode_pool;
242
243 static int bridge_clone_create(struct if_clone *, int);
244 static int bridge_clone_destroy(struct ifnet *);
245
246 static int bridge_ioctl(struct ifnet *, u_long, void *);
247 static int bridge_init(struct ifnet *);
248 static void bridge_stop(struct ifnet *, int);
249 static void bridge_start(struct ifnet *);
250
251 static void bridge_input(struct ifnet *, struct mbuf *);
252 static void bridge_forward(struct bridge_softc *, struct mbuf *);
253
254 static void bridge_timer(void *);
255
256 static void bridge_broadcast(struct bridge_softc *, struct ifnet *,
257 struct mbuf *);
258
259 static int bridge_rtupdate(struct bridge_softc *, const uint8_t *,
260 struct ifnet *, int, uint8_t);
261 static struct ifnet *bridge_rtlookup(struct bridge_softc *, const uint8_t *);
262 static void bridge_rttrim(struct bridge_softc *);
263 static void bridge_rtage(struct bridge_softc *);
264 static void bridge_rtage_work(struct work *, void *);
265 static void bridge_rtflush(struct bridge_softc *, int);
266 static int bridge_rtdaddr(struct bridge_softc *, const uint8_t *);
267 static void bridge_rtdelete(struct bridge_softc *, struct ifnet *ifp);
268
269 static void bridge_rtable_init(struct bridge_softc *);
270 static void bridge_rtable_fini(struct bridge_softc *);
271
272 static struct bridge_rtnode *bridge_rtnode_lookup(struct bridge_softc *,
273 const uint8_t *);
274 static int bridge_rtnode_insert(struct bridge_softc *,
275 struct bridge_rtnode *);
276 static void bridge_rtnode_remove(struct bridge_softc *,
277 struct bridge_rtnode *);
278 static void bridge_rtnode_destroy(struct bridge_rtnode *);
279
280 static struct bridge_iflist *bridge_lookup_member(struct bridge_softc *,
281 const char *name,
282 struct psref *);
283 static struct bridge_iflist *bridge_lookup_member_if(struct bridge_softc *,
284 struct ifnet *ifp,
285 struct psref *);
286 static void bridge_release_member(struct bridge_softc *, struct bridge_iflist *,
287 struct psref *);
288 static void bridge_delete_member(struct bridge_softc *,
289 struct bridge_iflist *);
290 static void bridge_acquire_member(struct bridge_softc *sc,
291 struct bridge_iflist *,
292 struct psref *);
293
294 static int bridge_ioctl_add(struct bridge_softc *, void *);
295 static int bridge_ioctl_del(struct bridge_softc *, void *);
296 static int bridge_ioctl_gifflags(struct bridge_softc *, void *);
297 static int bridge_ioctl_sifflags(struct bridge_softc *, void *);
298 static int bridge_ioctl_scache(struct bridge_softc *, void *);
299 static int bridge_ioctl_gcache(struct bridge_softc *, void *);
300 static int bridge_ioctl_gifs(struct bridge_softc *, void *);
301 static int bridge_ioctl_rts(struct bridge_softc *, void *);
302 static int bridge_ioctl_saddr(struct bridge_softc *, void *);
303 static int bridge_ioctl_sto(struct bridge_softc *, void *);
304 static int bridge_ioctl_gto(struct bridge_softc *, void *);
305 static int bridge_ioctl_daddr(struct bridge_softc *, void *);
306 static int bridge_ioctl_flush(struct bridge_softc *, void *);
307 static int bridge_ioctl_gpri(struct bridge_softc *, void *);
308 static int bridge_ioctl_spri(struct bridge_softc *, void *);
309 static int bridge_ioctl_ght(struct bridge_softc *, void *);
310 static int bridge_ioctl_sht(struct bridge_softc *, void *);
311 static int bridge_ioctl_gfd(struct bridge_softc *, void *);
312 static int bridge_ioctl_sfd(struct bridge_softc *, void *);
313 static int bridge_ioctl_gma(struct bridge_softc *, void *);
314 static int bridge_ioctl_sma(struct bridge_softc *, void *);
315 static int bridge_ioctl_sifprio(struct bridge_softc *, void *);
316 static int bridge_ioctl_sifcost(struct bridge_softc *, void *);
317 #if defined(BRIDGE_IPF)
318 static int bridge_ioctl_gfilt(struct bridge_softc *, void *);
319 static int bridge_ioctl_sfilt(struct bridge_softc *, void *);
320 static int bridge_ipf(void *, struct mbuf **, struct ifnet *, int);
321 static int bridge_ip_checkbasic(struct mbuf **mp);
322 # ifdef INET6
323 static int bridge_ip6_checkbasic(struct mbuf **mp);
324 # endif /* INET6 */
325 #endif /* BRIDGE_IPF */
326
327 struct bridge_control {
328 int (*bc_func)(struct bridge_softc *, void *);
329 int bc_argsize;
330 int bc_flags;
331 };
332
333 #define BC_F_COPYIN 0x01 /* copy arguments in */
334 #define BC_F_COPYOUT 0x02 /* copy arguments out */
335 #define BC_F_SUSER 0x04 /* do super-user check */
336 #define BC_F_XLATEIN 0x08 /* xlate arguments in */
337 #define BC_F_XLATEOUT 0x10 /* xlate arguments out */
338
339 static const struct bridge_control bridge_control_table[] = {
340 [BRDGADD] = {bridge_ioctl_add, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
341 [BRDGDEL] = {bridge_ioctl_del, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
342
343 [BRDGGIFFLGS] = {bridge_ioctl_gifflags, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_COPYOUT},
344 [BRDGSIFFLGS] = {bridge_ioctl_sifflags, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
345
346 [BRDGSCACHE] = {bridge_ioctl_scache, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
347 [BRDGGCACHE] = {bridge_ioctl_gcache, sizeof(struct ifbrparam), BC_F_COPYOUT},
348
349 [OBRDGGIFS] = {bridge_ioctl_gifs, sizeof(struct ifbifconf), BC_F_COPYIN|BC_F_COPYOUT},
350 [OBRDGRTS] = {bridge_ioctl_rts, sizeof(struct ifbaconf), BC_F_COPYIN|BC_F_COPYOUT},
351
352 [BRDGSADDR] = {bridge_ioctl_saddr, sizeof(struct ifbareq), BC_F_COPYIN|BC_F_SUSER},
353
354 [BRDGSTO] = {bridge_ioctl_sto, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
355 [BRDGGTO] = {bridge_ioctl_gto, sizeof(struct ifbrparam), BC_F_COPYOUT},
356
357 [BRDGDADDR] = {bridge_ioctl_daddr, sizeof(struct ifbareq), BC_F_COPYIN|BC_F_SUSER},
358
359 [BRDGFLUSH] = {bridge_ioctl_flush, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
360
361 [BRDGGPRI] = {bridge_ioctl_gpri, sizeof(struct ifbrparam), BC_F_COPYOUT},
362 [BRDGSPRI] = {bridge_ioctl_spri, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
363
364 [BRDGGHT] = {bridge_ioctl_ght, sizeof(struct ifbrparam), BC_F_COPYOUT},
365 [BRDGSHT] = {bridge_ioctl_sht, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
366
367 [BRDGGFD] = {bridge_ioctl_gfd, sizeof(struct ifbrparam), BC_F_COPYOUT},
368 [BRDGSFD] = {bridge_ioctl_sfd, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
369
370 [BRDGGMA] = {bridge_ioctl_gma, sizeof(struct ifbrparam), BC_F_COPYOUT},
371 [BRDGSMA] = {bridge_ioctl_sma, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
372
373 [BRDGSIFPRIO] = {bridge_ioctl_sifprio, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
374
375 [BRDGSIFCOST] = {bridge_ioctl_sifcost, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
376 #if defined(BRIDGE_IPF)
377 [BRDGGFILT] = {bridge_ioctl_gfilt, sizeof(struct ifbrparam), BC_F_COPYOUT},
378 [BRDGSFILT] = {bridge_ioctl_sfilt, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
379 #endif /* BRIDGE_IPF */
380 [BRDGGIFS] = {bridge_ioctl_gifs, sizeof(struct ifbifconf), BC_F_XLATEIN|BC_F_XLATEOUT},
381 [BRDGRTS] = {bridge_ioctl_rts, sizeof(struct ifbaconf), BC_F_XLATEIN|BC_F_XLATEOUT},
382 };
383
384 static const int bridge_control_table_size = __arraycount(bridge_control_table);
385
386 static struct if_clone bridge_cloner =
387 IF_CLONE_INITIALIZER("bridge", bridge_clone_create, bridge_clone_destroy);
388
389 /*
390 * bridgeattach:
391 *
392 * Pseudo-device attach routine.
393 */
394 void
395 bridgeattach(int n)
396 {
397
398 pool_init(&bridge_rtnode_pool, sizeof(struct bridge_rtnode),
399 0, 0, 0, "brtpl", NULL, IPL_NET);
400
401 bridge_psref_class = psref_class_create("bridge", IPL_SOFTNET);
402
403 if_clone_attach(&bridge_cloner);
404 }
405
406 /*
407 * bridge_clone_create:
408 *
409 * Create a new bridge instance.
410 */
411 static int
412 bridge_clone_create(struct if_clone *ifc, int unit)
413 {
414 struct bridge_softc *sc;
415 struct ifnet *ifp;
416 int error;
417
418 sc = kmem_zalloc(sizeof(*sc), KM_SLEEP);
419 ifp = &sc->sc_if;
420
421 sc->sc_brtmax = BRIDGE_RTABLE_MAX;
422 sc->sc_brttimeout = BRIDGE_RTABLE_TIMEOUT;
423 sc->sc_bridge_max_age = BSTP_DEFAULT_MAX_AGE;
424 sc->sc_bridge_hello_time = BSTP_DEFAULT_HELLO_TIME;
425 sc->sc_bridge_forward_delay = BSTP_DEFAULT_FORWARD_DELAY;
426 sc->sc_bridge_priority = BSTP_DEFAULT_BRIDGE_PRIORITY;
427 sc->sc_hold_time = BSTP_DEFAULT_HOLD_TIME;
428 sc->sc_filter_flags = 0;
429
430 /* Initialize our routing table. */
431 bridge_rtable_init(sc);
432
433 error = workqueue_create(&sc->sc_rtage_wq, "bridge_rtage",
434 bridge_rtage_work, sc, PRI_SOFTNET, IPL_SOFTNET, WQ_MPSAFE);
435 if (error)
436 panic("%s: workqueue_create %d\n", __func__, error);
437
438 callout_init(&sc->sc_brcallout, CALLOUT_MPSAFE);
439 callout_init(&sc->sc_bstpcallout, CALLOUT_MPSAFE);
440
441 mutex_init(&sc->sc_iflist_psref.bip_lock, MUTEX_DEFAULT, IPL_NONE);
442 PSLIST_INIT(&sc->sc_iflist_psref.bip_iflist);
443 sc->sc_iflist_psref.bip_psz = pserialize_create();
444
445 if_initname(ifp, ifc->ifc_name, unit);
446 ifp->if_softc = sc;
447 ifp->if_extflags = IFEF_NO_LINK_STATE_CHANGE;
448 #ifdef NET_MPSAFE
449 ifp->if_extflags |= IFEF_MPSAFE;
450 #endif
451 ifp->if_mtu = ETHERMTU;
452 ifp->if_ioctl = bridge_ioctl;
453 ifp->if_output = bridge_output;
454 ifp->if_start = bridge_start;
455 ifp->if_stop = bridge_stop;
456 ifp->if_init = bridge_init;
457 ifp->if_type = IFT_BRIDGE;
458 ifp->if_addrlen = 0;
459 ifp->if_dlt = DLT_EN10MB;
460 ifp->if_hdrlen = ETHER_HDR_LEN;
461
462 error = if_initialize(ifp);
463 if (error != 0) {
464 pserialize_destroy(sc->sc_iflist_psref.bip_psz);
465 mutex_destroy(&sc->sc_iflist_psref.bip_lock);
466 callout_destroy(&sc->sc_brcallout);
467 callout_destroy(&sc->sc_bstpcallout);
468 workqueue_destroy(sc->sc_rtage_wq);
469 bridge_rtable_fini(sc);
470 kmem_free(sc, sizeof(*sc));
471
472 return error;
473 }
474 if_alloc_sadl(ifp);
475 if_register(ifp);
476
477 return 0;
478 }
479
480 /*
481 * bridge_clone_destroy:
482 *
483 * Destroy a bridge instance.
484 */
485 static int
486 bridge_clone_destroy(struct ifnet *ifp)
487 {
488 struct bridge_softc *sc = ifp->if_softc;
489 struct bridge_iflist *bif;
490
491 if ((ifp->if_flags & IFF_RUNNING) != 0)
492 bridge_stop(ifp, 1);
493
494 BRIDGE_LOCK(sc);
495 for (;;) {
496 bif = PSLIST_WRITER_FIRST(&sc->sc_iflist_psref.bip_iflist, struct bridge_iflist,
497 bif_next);
498 if (bif == NULL)
499 break;
500 bridge_delete_member(sc, bif);
501 }
502 PSLIST_DESTROY(&sc->sc_iflist_psref.bip_iflist);
503 BRIDGE_UNLOCK(sc);
504
505 if_detach(ifp);
506
507 /* Tear down the routing table. */
508 bridge_rtable_fini(sc);
509
510 pserialize_destroy(sc->sc_iflist_psref.bip_psz);
511 mutex_destroy(&sc->sc_iflist_psref.bip_lock);
512 callout_destroy(&sc->sc_brcallout);
513 callout_destroy(&sc->sc_bstpcallout);
514 workqueue_destroy(sc->sc_rtage_wq);
515 kmem_free(sc, sizeof(*sc));
516
517 return 0;
518 }
519
520 /*
521 * bridge_ioctl:
522 *
523 * Handle a control request from the operator.
524 */
525 static int
526 bridge_ioctl(struct ifnet *ifp, u_long cmd, void *data)
527 {
528 struct bridge_softc *sc = ifp->if_softc;
529 struct lwp *l = curlwp; /* XXX */
530 union {
531 struct ifbreq ifbreq;
532 struct ifbifconf ifbifconf;
533 struct ifbareq ifbareq;
534 struct ifbaconf ifbaconf;
535 struct ifbrparam ifbrparam;
536 } args;
537 struct ifdrv *ifd = (struct ifdrv *) data;
538 const struct bridge_control *bc = NULL; /* XXXGCC */
539 int s, error = 0;
540
541 /* Authorize command before calling splsoftnet(). */
542 switch (cmd) {
543 case SIOCGDRVSPEC:
544 case SIOCSDRVSPEC:
545 if (ifd->ifd_cmd >= bridge_control_table_size
546 || (bc = &bridge_control_table[ifd->ifd_cmd]) == NULL) {
547 error = EINVAL;
548 return error;
549 }
550
551 /* We only care about BC_F_SUSER at this point. */
552 if ((bc->bc_flags & BC_F_SUSER) == 0)
553 break;
554
555 error = kauth_authorize_network(l->l_cred,
556 KAUTH_NETWORK_INTERFACE_BRIDGE,
557 cmd == SIOCGDRVSPEC ?
558 KAUTH_REQ_NETWORK_INTERFACE_BRIDGE_GETPRIV :
559 KAUTH_REQ_NETWORK_INTERFACE_BRIDGE_SETPRIV,
560 ifd, NULL, NULL);
561 if (error)
562 return error;
563
564 break;
565 }
566
567 s = splsoftnet();
568
569 switch (cmd) {
570 case SIOCGDRVSPEC:
571 case SIOCSDRVSPEC:
572 KASSERT(bc != NULL);
573 if (cmd == SIOCGDRVSPEC &&
574 (bc->bc_flags & (BC_F_COPYOUT|BC_F_XLATEOUT)) == 0) {
575 error = EINVAL;
576 break;
577 }
578 else if (cmd == SIOCSDRVSPEC &&
579 (bc->bc_flags & (BC_F_COPYOUT|BC_F_XLATEOUT)) != 0) {
580 error = EINVAL;
581 break;
582 }
583
584 /* BC_F_SUSER is checked above, before splsoftnet(). */
585
586 if ((bc->bc_flags & (BC_F_XLATEIN|BC_F_XLATEOUT)) == 0
587 && (ifd->ifd_len != bc->bc_argsize
588 || ifd->ifd_len > sizeof(args))) {
589 error = EINVAL;
590 break;
591 }
592
593 memset(&args, 0, sizeof(args));
594 if (bc->bc_flags & BC_F_COPYIN) {
595 error = copyin(ifd->ifd_data, &args, ifd->ifd_len);
596 if (error)
597 break;
598 } else if (bc->bc_flags & BC_F_XLATEIN) {
599 args.ifbifconf.ifbic_len = ifd->ifd_len;
600 args.ifbifconf.ifbic_buf = ifd->ifd_data;
601 }
602
603 error = (*bc->bc_func)(sc, &args);
604 if (error)
605 break;
606
607 if (bc->bc_flags & BC_F_COPYOUT) {
608 error = copyout(&args, ifd->ifd_data, ifd->ifd_len);
609 } else if (bc->bc_flags & BC_F_XLATEOUT) {
610 ifd->ifd_len = args.ifbifconf.ifbic_len;
611 ifd->ifd_data = args.ifbifconf.ifbic_buf;
612 }
613 break;
614
615 case SIOCSIFFLAGS:
616 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
617 break;
618 switch (ifp->if_flags & (IFF_UP|IFF_RUNNING)) {
619 case IFF_RUNNING:
620 /*
621 * If interface is marked down and it is running,
622 * then stop and disable it.
623 */
624 (*ifp->if_stop)(ifp, 1);
625 break;
626 case IFF_UP:
627 /*
628 * If interface is marked up and it is stopped, then
629 * start it.
630 */
631 error = (*ifp->if_init)(ifp);
632 break;
633 default:
634 break;
635 }
636 break;
637
638 case SIOCSIFMTU:
639 if ((error = ifioctl_common(ifp, cmd, data)) == ENETRESET)
640 error = 0;
641 break;
642
643 default:
644 error = ifioctl_common(ifp, cmd, data);
645 break;
646 }
647
648 splx(s);
649
650 return error;
651 }
652
653 /*
654 * bridge_lookup_member:
655 *
656 * Lookup a bridge member interface.
657 */
658 static struct bridge_iflist *
659 bridge_lookup_member(struct bridge_softc *sc, const char *name, struct psref *psref)
660 {
661 struct bridge_iflist *bif;
662 struct ifnet *ifp;
663 int s;
664
665 BRIDGE_PSZ_RENTER(s);
666
667 BRIDGE_IFLIST_READER_FOREACH(bif, sc) {
668 ifp = bif->bif_ifp;
669 if (strcmp(ifp->if_xname, name) == 0)
670 break;
671 }
672 if (bif != NULL)
673 bridge_acquire_member(sc, bif, psref);
674
675 BRIDGE_PSZ_REXIT(s);
676
677 return bif;
678 }
679
680 /*
681 * bridge_lookup_member_if:
682 *
683 * Lookup a bridge member interface by ifnet*.
684 */
685 static struct bridge_iflist *
686 bridge_lookup_member_if(struct bridge_softc *sc, struct ifnet *member_ifp,
687 struct psref *psref)
688 {
689 struct bridge_iflist *bif;
690 int s;
691
692 BRIDGE_PSZ_RENTER(s);
693
694 bif = member_ifp->if_bridgeif;
695 if (bif != NULL) {
696 psref_acquire(psref, &bif->bif_psref,
697 bridge_psref_class);
698 }
699
700 BRIDGE_PSZ_REXIT(s);
701
702 return bif;
703 }
704
705 static void
706 bridge_acquire_member(struct bridge_softc *sc, struct bridge_iflist *bif,
707 struct psref *psref)
708 {
709
710 psref_acquire(psref, &bif->bif_psref, bridge_psref_class);
711 }
712
713 /*
714 * bridge_release_member:
715 *
716 * Release the specified member interface.
717 */
718 static void
719 bridge_release_member(struct bridge_softc *sc, struct bridge_iflist *bif,
720 struct psref *psref)
721 {
722
723 psref_release(psref, &bif->bif_psref, bridge_psref_class);
724 }
725
726 /*
727 * bridge_delete_member:
728 *
729 * Delete the specified member interface.
730 */
731 static void
732 bridge_delete_member(struct bridge_softc *sc, struct bridge_iflist *bif)
733 {
734 struct ifnet *ifs = bif->bif_ifp;
735
736 KASSERT(BRIDGE_LOCKED(sc));
737
738 ifs->_if_input = ether_input;
739 ifs->if_bridge = NULL;
740 ifs->if_bridgeif = NULL;
741
742 PSLIST_WRITER_REMOVE(bif, bif_next);
743 BRIDGE_PSZ_PERFORM(sc);
744 BRIDGE_UNLOCK(sc);
745
746 switch (ifs->if_type) {
747 case IFT_ETHER:
748 case IFT_L2TP:
749 /*
750 * Take the interface out of promiscuous mode.
751 * Don't call it with holding a spin lock.
752 */
753 (void) ifpromisc(ifs, 0);
754 IFNET_LOCK(ifs);
755 (void) ether_disable_vlan_mtu(ifs);
756 IFNET_UNLOCK(ifs);
757 break;
758 default:
759 #ifdef DIAGNOSTIC
760 panic("%s: impossible", __func__);
761 #endif
762 break;
763 }
764
765 psref_target_destroy(&bif->bif_psref, bridge_psref_class);
766
767 PSLIST_ENTRY_DESTROY(bif, bif_next);
768 kmem_free(bif, sizeof(*bif));
769
770 BRIDGE_LOCK(sc);
771 }
772
773 /*
774 * bridge_calc_csum_flags:
775 *
776 * Calculate logical and b/w csum flags each member interface supports.
777 */
778 void
779 bridge_calc_csum_flags(struct bridge_softc *sc)
780 {
781 struct bridge_iflist *bif;
782 struct ifnet *ifs;
783 int flags = ~0;
784
785 BRIDGE_LOCK(sc);
786 BRIDGE_IFLIST_READER_FOREACH(bif, sc) {
787 ifs = bif->bif_ifp;
788 flags &= ifs->if_csum_flags_tx;
789 }
790 sc->sc_csum_flags_tx = flags;
791 BRIDGE_UNLOCK(sc);
792 }
793
794 static int
795 bridge_ioctl_add(struct bridge_softc *sc, void *arg)
796 {
797 struct ifbreq *req = arg;
798 struct bridge_iflist *bif = NULL;
799 struct ifnet *ifs;
800 int error = 0;
801 struct psref psref;
802
803 ifs = if_get(req->ifbr_ifsname, &psref);
804 if (ifs == NULL)
805 return ENOENT;
806
807 if (ifs->if_bridge == sc) {
808 error = EEXIST;
809 goto out;
810 }
811
812 if (ifs->if_bridge != NULL) {
813 error = EBUSY;
814 goto out;
815 }
816
817 if (ifs->_if_input != ether_input) {
818 error = EINVAL;
819 goto out;
820 }
821
822 /* FIXME: doesn't work with non-IFF_SIMPLEX interfaces */
823 if ((ifs->if_flags & IFF_SIMPLEX) == 0) {
824 error = EINVAL;
825 goto out;
826 }
827
828 bif = kmem_alloc(sizeof(*bif), KM_SLEEP);
829
830 switch (ifs->if_type) {
831 case IFT_ETHER:
832 if (sc->sc_if.if_mtu != ifs->if_mtu) {
833 /* Change MTU of added interface to bridge MTU */
834 struct ifreq ifr;
835 memset(&ifr, 0, sizeof(ifr));
836 ifr.ifr_mtu = sc->sc_if.if_mtu;
837 IFNET_LOCK(ifs);
838 error = ether_ioctl(ifs, SIOCSIFMTU, &ifr);
839 IFNET_UNLOCK(ifs);
840 if (error != 0)
841 goto out;
842 }
843 /* FALLTHROUGH */
844 case IFT_L2TP:
845 IFNET_LOCK(ifs);
846 error = ether_enable_vlan_mtu(ifs);
847 IFNET_UNLOCK(ifs);
848 if (error > 0)
849 goto out;
850 /*
851 * Place the interface into promiscuous mode.
852 */
853 error = ifpromisc(ifs, 1);
854 if (error)
855 goto out;
856 break;
857 default:
858 error = EINVAL;
859 goto out;
860 }
861
862 bif->bif_ifp = ifs;
863 bif->bif_flags = IFBIF_LEARNING | IFBIF_DISCOVER;
864 bif->bif_priority = BSTP_DEFAULT_PORT_PRIORITY;
865 bif->bif_path_cost = BSTP_DEFAULT_PATH_COST;
866 PSLIST_ENTRY_INIT(bif, bif_next);
867 psref_target_init(&bif->bif_psref, bridge_psref_class);
868
869 BRIDGE_LOCK(sc);
870
871 ifs->if_bridge = sc;
872 ifs->if_bridgeif = bif;
873 PSLIST_WRITER_INSERT_HEAD(&sc->sc_iflist_psref.bip_iflist, bif, bif_next);
874 ifs->_if_input = bridge_input;
875
876 BRIDGE_UNLOCK(sc);
877
878 bridge_calc_csum_flags(sc);
879
880 if (sc->sc_if.if_flags & IFF_RUNNING)
881 bstp_initialization(sc);
882 else
883 bstp_stop(sc);
884
885 out:
886 if_put(ifs, &psref);
887 if (error) {
888 if (bif != NULL)
889 kmem_free(bif, sizeof(*bif));
890 }
891 return error;
892 }
893
894 static int
895 bridge_ioctl_del(struct bridge_softc *sc, void *arg)
896 {
897 struct ifbreq *req = arg;
898 const char *name = req->ifbr_ifsname;
899 struct bridge_iflist *bif;
900 struct ifnet *ifs;
901
902 BRIDGE_LOCK(sc);
903
904 /*
905 * Don't use bridge_lookup_member. We want to get a member
906 * with bif_refs == 0.
907 */
908 BRIDGE_IFLIST_WRITER_FOREACH(bif, sc) {
909 ifs = bif->bif_ifp;
910 if (strcmp(ifs->if_xname, name) == 0)
911 break;
912 }
913
914 if (bif == NULL) {
915 BRIDGE_UNLOCK(sc);
916 return ENOENT;
917 }
918
919 bridge_delete_member(sc, bif);
920
921 BRIDGE_UNLOCK(sc);
922
923 bridge_rtdelete(sc, ifs);
924 bridge_calc_csum_flags(sc);
925
926 if (sc->sc_if.if_flags & IFF_RUNNING)
927 bstp_initialization(sc);
928
929 return 0;
930 }
931
932 static int
933 bridge_ioctl_gifflags(struct bridge_softc *sc, void *arg)
934 {
935 struct ifbreq *req = arg;
936 struct bridge_iflist *bif;
937 struct psref psref;
938
939 bif = bridge_lookup_member(sc, req->ifbr_ifsname, &psref);
940 if (bif == NULL)
941 return ENOENT;
942
943 req->ifbr_ifsflags = bif->bif_flags;
944 req->ifbr_state = bif->bif_state;
945 req->ifbr_priority = bif->bif_priority;
946 req->ifbr_path_cost = bif->bif_path_cost;
947 req->ifbr_portno = bif->bif_ifp->if_index & 0xff;
948
949 bridge_release_member(sc, bif, &psref);
950
951 return 0;
952 }
953
954 static int
955 bridge_ioctl_sifflags(struct bridge_softc *sc, void *arg)
956 {
957 struct ifbreq *req = arg;
958 struct bridge_iflist *bif;
959 struct psref psref;
960
961 bif = bridge_lookup_member(sc, req->ifbr_ifsname, &psref);
962 if (bif == NULL)
963 return ENOENT;
964
965 if (req->ifbr_ifsflags & IFBIF_STP) {
966 switch (bif->bif_ifp->if_type) {
967 case IFT_ETHER:
968 case IFT_L2TP:
969 /* These can do spanning tree. */
970 break;
971
972 default:
973 /* Nothing else can. */
974 bridge_release_member(sc, bif, &psref);
975 return EINVAL;
976 }
977 }
978
979 bif->bif_flags = req->ifbr_ifsflags;
980
981 bridge_release_member(sc, bif, &psref);
982
983 if (sc->sc_if.if_flags & IFF_RUNNING)
984 bstp_initialization(sc);
985
986 return 0;
987 }
988
989 static int
990 bridge_ioctl_scache(struct bridge_softc *sc, void *arg)
991 {
992 struct ifbrparam *param = arg;
993
994 sc->sc_brtmax = param->ifbrp_csize;
995 bridge_rttrim(sc);
996
997 return 0;
998 }
999
1000 static int
1001 bridge_ioctl_gcache(struct bridge_softc *sc, void *arg)
1002 {
1003 struct ifbrparam *param = arg;
1004
1005 param->ifbrp_csize = sc->sc_brtmax;
1006
1007 return 0;
1008 }
1009
1010 static int
1011 bridge_ioctl_gifs(struct bridge_softc *sc, void *arg)
1012 {
1013 struct ifbifconf *bifc = arg;
1014 struct bridge_iflist *bif;
1015 struct ifbreq *breqs;
1016 int i, count, error = 0;
1017
1018 retry:
1019 BRIDGE_LOCK(sc);
1020 count = 0;
1021 BRIDGE_IFLIST_WRITER_FOREACH(bif, sc)
1022 count++;
1023 BRIDGE_UNLOCK(sc);
1024
1025 if (count == 0) {
1026 bifc->ifbic_len = 0;
1027 return 0;
1028 }
1029
1030 if (bifc->ifbic_len == 0 || bifc->ifbic_len < (sizeof(*breqs) * count)) {
1031 /* Tell that a larger buffer is needed */
1032 bifc->ifbic_len = sizeof(*breqs) * count;
1033 return 0;
1034 }
1035
1036 breqs = kmem_alloc(sizeof(*breqs) * count, KM_SLEEP);
1037
1038 BRIDGE_LOCK(sc);
1039
1040 i = 0;
1041 BRIDGE_IFLIST_WRITER_FOREACH(bif, sc)
1042 i++;
1043 if (i > count) {
1044 /*
1045 * The number of members has been increased.
1046 * We need more memory!
1047 */
1048 BRIDGE_UNLOCK(sc);
1049 kmem_free(breqs, sizeof(*breqs) * count);
1050 goto retry;
1051 }
1052
1053 i = 0;
1054 BRIDGE_IFLIST_WRITER_FOREACH(bif, sc) {
1055 struct ifbreq *breq = &breqs[i++];
1056 memset(breq, 0, sizeof(*breq));
1057
1058 strlcpy(breq->ifbr_ifsname, bif->bif_ifp->if_xname,
1059 sizeof(breq->ifbr_ifsname));
1060 breq->ifbr_ifsflags = bif->bif_flags;
1061 breq->ifbr_state = bif->bif_state;
1062 breq->ifbr_priority = bif->bif_priority;
1063 breq->ifbr_path_cost = bif->bif_path_cost;
1064 breq->ifbr_portno = bif->bif_ifp->if_index & 0xff;
1065 }
1066
1067 /* Don't call copyout with holding the mutex */
1068 BRIDGE_UNLOCK(sc);
1069
1070 for (i = 0; i < count; i++) {
1071 error = copyout(&breqs[i], bifc->ifbic_req + i, sizeof(*breqs));
1072 if (error)
1073 break;
1074 }
1075 bifc->ifbic_len = sizeof(*breqs) * i;
1076
1077 kmem_free(breqs, sizeof(*breqs) * count);
1078
1079 return error;
1080 }
1081
1082 static int
1083 bridge_ioctl_rts(struct bridge_softc *sc, void *arg)
1084 {
1085 struct ifbaconf *bac = arg;
1086 struct bridge_rtnode *brt;
1087 struct ifbareq bareq;
1088 int count = 0, error = 0, len;
1089
1090 if (bac->ifbac_len == 0)
1091 return 0;
1092
1093 BRIDGE_RT_LOCK(sc);
1094
1095 /* The passed buffer is not enough, tell a required size. */
1096 if (bac->ifbac_len < (sizeof(bareq) * sc->sc_brtcnt)) {
1097 count = sc->sc_brtcnt;
1098 goto out;
1099 }
1100
1101 len = bac->ifbac_len;
1102 BRIDGE_RTLIST_WRITER_FOREACH(brt, sc) {
1103 if (len < sizeof(bareq))
1104 goto out;
1105 memset(&bareq, 0, sizeof(bareq));
1106 strlcpy(bareq.ifba_ifsname, brt->brt_ifp->if_xname,
1107 sizeof(bareq.ifba_ifsname));
1108 memcpy(bareq.ifba_dst, brt->brt_addr, sizeof(brt->brt_addr));
1109 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
1110 bareq.ifba_expire = brt->brt_expire - time_uptime;
1111 } else
1112 bareq.ifba_expire = 0;
1113 bareq.ifba_flags = brt->brt_flags;
1114
1115 error = copyout(&bareq, bac->ifbac_req + count, sizeof(bareq));
1116 if (error)
1117 goto out;
1118 count++;
1119 len -= sizeof(bareq);
1120 }
1121 out:
1122 BRIDGE_RT_UNLOCK(sc);
1123
1124 bac->ifbac_len = sizeof(bareq) * count;
1125 return error;
1126 }
1127
1128 static int
1129 bridge_ioctl_saddr(struct bridge_softc *sc, void *arg)
1130 {
1131 struct ifbareq *req = arg;
1132 struct bridge_iflist *bif;
1133 int error;
1134 struct psref psref;
1135
1136 bif = bridge_lookup_member(sc, req->ifba_ifsname, &psref);
1137 if (bif == NULL)
1138 return ENOENT;
1139
1140 error = bridge_rtupdate(sc, req->ifba_dst, bif->bif_ifp, 1,
1141 req->ifba_flags);
1142
1143 bridge_release_member(sc, bif, &psref);
1144
1145 return error;
1146 }
1147
1148 static int
1149 bridge_ioctl_sto(struct bridge_softc *sc, void *arg)
1150 {
1151 struct ifbrparam *param = arg;
1152
1153 sc->sc_brttimeout = param->ifbrp_ctime;
1154
1155 return 0;
1156 }
1157
1158 static int
1159 bridge_ioctl_gto(struct bridge_softc *sc, void *arg)
1160 {
1161 struct ifbrparam *param = arg;
1162
1163 param->ifbrp_ctime = sc->sc_brttimeout;
1164
1165 return 0;
1166 }
1167
1168 static int
1169 bridge_ioctl_daddr(struct bridge_softc *sc, void *arg)
1170 {
1171 struct ifbareq *req = arg;
1172
1173 return (bridge_rtdaddr(sc, req->ifba_dst));
1174 }
1175
1176 static int
1177 bridge_ioctl_flush(struct bridge_softc *sc, void *arg)
1178 {
1179 struct ifbreq *req = arg;
1180
1181 bridge_rtflush(sc, req->ifbr_ifsflags);
1182
1183 return 0;
1184 }
1185
1186 static int
1187 bridge_ioctl_gpri(struct bridge_softc *sc, void *arg)
1188 {
1189 struct ifbrparam *param = arg;
1190
1191 param->ifbrp_prio = sc->sc_bridge_priority;
1192
1193 return 0;
1194 }
1195
1196 static int
1197 bridge_ioctl_spri(struct bridge_softc *sc, void *arg)
1198 {
1199 struct ifbrparam *param = arg;
1200
1201 sc->sc_bridge_priority = param->ifbrp_prio;
1202
1203 if (sc->sc_if.if_flags & IFF_RUNNING)
1204 bstp_initialization(sc);
1205
1206 return 0;
1207 }
1208
1209 static int
1210 bridge_ioctl_ght(struct bridge_softc *sc, void *arg)
1211 {
1212 struct ifbrparam *param = arg;
1213
1214 param->ifbrp_hellotime = sc->sc_bridge_hello_time >> 8;
1215
1216 return 0;
1217 }
1218
1219 static int
1220 bridge_ioctl_sht(struct bridge_softc *sc, void *arg)
1221 {
1222 struct ifbrparam *param = arg;
1223
1224 if (param->ifbrp_hellotime == 0)
1225 return EINVAL;
1226 sc->sc_bridge_hello_time = param->ifbrp_hellotime << 8;
1227
1228 if (sc->sc_if.if_flags & IFF_RUNNING)
1229 bstp_initialization(sc);
1230
1231 return 0;
1232 }
1233
1234 static int
1235 bridge_ioctl_gfd(struct bridge_softc *sc, void *arg)
1236 {
1237 struct ifbrparam *param = arg;
1238
1239 param->ifbrp_fwddelay = sc->sc_bridge_forward_delay >> 8;
1240
1241 return 0;
1242 }
1243
1244 static int
1245 bridge_ioctl_sfd(struct bridge_softc *sc, void *arg)
1246 {
1247 struct ifbrparam *param = arg;
1248
1249 if (param->ifbrp_fwddelay == 0)
1250 return EINVAL;
1251 sc->sc_bridge_forward_delay = param->ifbrp_fwddelay << 8;
1252
1253 if (sc->sc_if.if_flags & IFF_RUNNING)
1254 bstp_initialization(sc);
1255
1256 return 0;
1257 }
1258
1259 static int
1260 bridge_ioctl_gma(struct bridge_softc *sc, void *arg)
1261 {
1262 struct ifbrparam *param = arg;
1263
1264 param->ifbrp_maxage = sc->sc_bridge_max_age >> 8;
1265
1266 return 0;
1267 }
1268
1269 static int
1270 bridge_ioctl_sma(struct bridge_softc *sc, void *arg)
1271 {
1272 struct ifbrparam *param = arg;
1273
1274 if (param->ifbrp_maxage == 0)
1275 return EINVAL;
1276 sc->sc_bridge_max_age = param->ifbrp_maxage << 8;
1277
1278 if (sc->sc_if.if_flags & IFF_RUNNING)
1279 bstp_initialization(sc);
1280
1281 return 0;
1282 }
1283
1284 static int
1285 bridge_ioctl_sifprio(struct bridge_softc *sc, void *arg)
1286 {
1287 struct ifbreq *req = arg;
1288 struct bridge_iflist *bif;
1289 struct psref psref;
1290
1291 bif = bridge_lookup_member(sc, req->ifbr_ifsname, &psref);
1292 if (bif == NULL)
1293 return ENOENT;
1294
1295 bif->bif_priority = req->ifbr_priority;
1296
1297 if (sc->sc_if.if_flags & IFF_RUNNING)
1298 bstp_initialization(sc);
1299
1300 bridge_release_member(sc, bif, &psref);
1301
1302 return 0;
1303 }
1304
1305 #if defined(BRIDGE_IPF)
1306 static int
1307 bridge_ioctl_gfilt(struct bridge_softc *sc, void *arg)
1308 {
1309 struct ifbrparam *param = arg;
1310
1311 param->ifbrp_filter = sc->sc_filter_flags;
1312
1313 return 0;
1314 }
1315
1316 static int
1317 bridge_ioctl_sfilt(struct bridge_softc *sc, void *arg)
1318 {
1319 struct ifbrparam *param = arg;
1320 uint32_t nflags, oflags;
1321
1322 if (param->ifbrp_filter & ~IFBF_FILT_MASK)
1323 return EINVAL;
1324
1325 nflags = param->ifbrp_filter;
1326 oflags = sc->sc_filter_flags;
1327
1328 if ((nflags & IFBF_FILT_USEIPF) && !(oflags & IFBF_FILT_USEIPF)) {
1329 pfil_add_hook((void *)bridge_ipf, NULL, PFIL_IN|PFIL_OUT,
1330 sc->sc_if.if_pfil);
1331 }
1332 if (!(nflags & IFBF_FILT_USEIPF) && (oflags & IFBF_FILT_USEIPF)) {
1333 pfil_remove_hook((void *)bridge_ipf, NULL, PFIL_IN|PFIL_OUT,
1334 sc->sc_if.if_pfil);
1335 }
1336
1337 sc->sc_filter_flags = nflags;
1338
1339 return 0;
1340 }
1341 #endif /* BRIDGE_IPF */
1342
1343 static int
1344 bridge_ioctl_sifcost(struct bridge_softc *sc, void *arg)
1345 {
1346 struct ifbreq *req = arg;
1347 struct bridge_iflist *bif;
1348 struct psref psref;
1349
1350 bif = bridge_lookup_member(sc, req->ifbr_ifsname, &psref);
1351 if (bif == NULL)
1352 return ENOENT;
1353
1354 bif->bif_path_cost = req->ifbr_path_cost;
1355
1356 if (sc->sc_if.if_flags & IFF_RUNNING)
1357 bstp_initialization(sc);
1358
1359 bridge_release_member(sc, bif, &psref);
1360
1361 return 0;
1362 }
1363
1364 /*
1365 * bridge_ifdetach:
1366 *
1367 * Detach an interface from a bridge. Called when a member
1368 * interface is detaching.
1369 */
1370 void
1371 bridge_ifdetach(struct ifnet *ifp)
1372 {
1373 struct bridge_softc *sc = ifp->if_bridge;
1374 struct ifbreq breq;
1375
1376 /* ioctl_lock should prevent this from happening */
1377 KASSERT(sc != NULL);
1378
1379 memset(&breq, 0, sizeof(breq));
1380 strlcpy(breq.ifbr_ifsname, ifp->if_xname, sizeof(breq.ifbr_ifsname));
1381
1382 (void) bridge_ioctl_del(sc, &breq);
1383 }
1384
1385 /*
1386 * bridge_init:
1387 *
1388 * Initialize a bridge interface.
1389 */
1390 static int
1391 bridge_init(struct ifnet *ifp)
1392 {
1393 struct bridge_softc *sc = ifp->if_softc;
1394
1395 KASSERT((ifp->if_flags & IFF_RUNNING) == 0);
1396
1397 callout_reset(&sc->sc_brcallout, bridge_rtable_prune_period * hz,
1398 bridge_timer, sc);
1399 bstp_initialization(sc);
1400
1401 ifp->if_flags |= IFF_RUNNING;
1402 return 0;
1403 }
1404
1405 /*
1406 * bridge_stop:
1407 *
1408 * Stop the bridge interface.
1409 */
1410 static void
1411 bridge_stop(struct ifnet *ifp, int disable)
1412 {
1413 struct bridge_softc *sc = ifp->if_softc;
1414
1415 KASSERT((ifp->if_flags & IFF_RUNNING) != 0);
1416 ifp->if_flags &= ~IFF_RUNNING;
1417
1418 callout_halt(&sc->sc_brcallout, NULL);
1419 workqueue_wait(sc->sc_rtage_wq, &sc->sc_rtage_wk);
1420 bstp_stop(sc);
1421 bridge_rtflush(sc, IFBF_FLUSHDYN);
1422 }
1423
1424 /*
1425 * bridge_enqueue:
1426 *
1427 * Enqueue a packet on a bridge member interface.
1428 */
1429 void
1430 bridge_enqueue(struct bridge_softc *sc, struct ifnet *dst_ifp, struct mbuf *m,
1431 int runfilt)
1432 {
1433 int len, error;
1434 short mflags;
1435
1436 if (runfilt) {
1437 if (pfil_run_hooks(sc->sc_if.if_pfil, &m,
1438 dst_ifp, PFIL_OUT) != 0) {
1439 if (m != NULL)
1440 m_freem(m);
1441 return;
1442 }
1443 if (m == NULL)
1444 return;
1445 }
1446
1447 #ifdef ALTQ
1448 KERNEL_LOCK(1, NULL);
1449 /*
1450 * If ALTQ is enabled on the member interface, do
1451 * classification; the queueing discipline might
1452 * not require classification, but might require
1453 * the address family/header pointer in the pktattr.
1454 */
1455 if (ALTQ_IS_ENABLED(&dst_ifp->if_snd)) {
1456 /* XXX IFT_ETHER */
1457 altq_etherclassify(&dst_ifp->if_snd, m);
1458 }
1459 KERNEL_UNLOCK_ONE(NULL);
1460 #endif /* ALTQ */
1461
1462 len = m->m_pkthdr.len;
1463 mflags = m->m_flags;
1464
1465 error = if_transmit_lock(dst_ifp, m);
1466 if (error) {
1467 /* mbuf is already freed */
1468 if_statinc(&sc->sc_if, if_oerrors);
1469 return;
1470 }
1471
1472 net_stat_ref_t nsr = IF_STAT_GETREF(&sc->sc_if);
1473 if_statinc_ref(nsr, if_opackets);
1474 if_statadd_ref(nsr, if_obytes, len);
1475 if (mflags & M_MCAST)
1476 if_statinc_ref(nsr, if_omcasts);
1477 IF_STAT_PUTREF(&sc->sc_if);
1478 }
1479
1480 /*
1481 * bridge_output:
1482 *
1483 * Send output from a bridge member interface. This
1484 * performs the bridging function for locally originated
1485 * packets.
1486 *
1487 * The mbuf has the Ethernet header already attached. We must
1488 * enqueue or free the mbuf before returning.
1489 */
1490 int
1491 bridge_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *sa,
1492 const struct rtentry *rt)
1493 {
1494 struct ether_header *eh;
1495 struct ifnet *dst_if;
1496 struct bridge_softc *sc;
1497 struct mbuf *n;
1498 int s;
1499
1500 /*
1501 * bridge_output() is called from ether_output(), furthermore
1502 * ifp argument doesn't point to bridge(4). So, don't assert
1503 * IFEF_MPSAFE here.
1504 */
1505
1506 KASSERT(m->m_len >= ETHER_HDR_LEN);
1507
1508 eh = mtod(m, struct ether_header *);
1509 sc = ifp->if_bridge;
1510
1511 if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
1512 if (memcmp(etherbroadcastaddr,
1513 eh->ether_dhost, ETHER_ADDR_LEN) == 0)
1514 m->m_flags |= M_BCAST;
1515 else
1516 m->m_flags |= M_MCAST;
1517 }
1518
1519 /*
1520 * If bridge is down, but the original output interface is up,
1521 * go ahead and send out that interface. Otherwise, the packet
1522 * is dropped below.
1523 */
1524 if (__predict_false(sc == NULL) ||
1525 (sc->sc_if.if_flags & IFF_RUNNING) == 0) {
1526 dst_if = ifp;
1527 goto unicast_asis;
1528 }
1529
1530 /*
1531 * If the packet is a multicast, or we don't know a better way to
1532 * get there, send to all interfaces.
1533 */
1534 if ((m->m_flags & (M_MCAST | M_BCAST)) != 0)
1535 dst_if = NULL;
1536 else
1537 dst_if = bridge_rtlookup(sc, eh->ether_dhost);
1538
1539 /*
1540 * In general, we need to handle TX offload in software before
1541 * enqueueing a packet. However, we can send it as is in the
1542 * cases of unicast via (1) the source interface, or (2) an
1543 * interface which supports the specified offload options.
1544 * For multicast or broadcast, send it as is only if (3) all
1545 * the member interfaces support the specified options.
1546 */
1547
1548 /*
1549 * Unicast via the source interface.
1550 */
1551 if (dst_if == ifp)
1552 goto unicast_asis;
1553
1554 /*
1555 * Unicast via other interface.
1556 */
1557 if (dst_if != NULL) {
1558 KASSERT(m->m_flags & M_PKTHDR);
1559 if (TX_OFFLOAD_SUPPORTED(dst_if->if_csum_flags_tx,
1560 m->m_pkthdr.csum_flags)) {
1561 /*
1562 * Unicast via an interface which supports the
1563 * specified offload options.
1564 */
1565 goto unicast_asis;
1566 }
1567
1568 /*
1569 * Handle TX offload in software. For TSO, a packet is
1570 * split into multiple chunks. Thus, the return value of
1571 * ether_sw_offload_tx() is mbuf queue consists of them.
1572 */
1573 m = ether_sw_offload_tx(ifp, m);
1574 if (m == NULL)
1575 return 0;
1576
1577 do {
1578 n = m->m_nextpkt;
1579 if ((dst_if->if_flags & IFF_RUNNING) == 0)
1580 m_freem(m);
1581 else
1582 bridge_enqueue(sc, dst_if, m, 0);
1583 m = n;
1584 } while (m != NULL);
1585
1586 return 0;
1587 }
1588
1589 /*
1590 * Multicast or broadcast.
1591 */
1592 if (TX_OFFLOAD_SUPPORTED(sc->sc_csum_flags_tx,
1593 m->m_pkthdr.csum_flags)) {
1594 /*
1595 * Specified TX offload options are supported by all
1596 * the member interfaces of this bridge.
1597 */
1598 m->m_nextpkt = NULL; /* XXX */
1599 } else {
1600 /*
1601 * Otherwise, handle TX offload in software.
1602 */
1603 m = ether_sw_offload_tx(ifp, m);
1604 if (m == NULL)
1605 return 0;
1606 }
1607
1608 do {
1609 /* XXX Should call bridge_broadcast, but there are locking
1610 * issues which need resolving first. */
1611 struct bridge_iflist *bif;
1612 struct mbuf *mc;
1613 bool used = false;
1614
1615 n = m->m_nextpkt;
1616
1617 BRIDGE_PSZ_RENTER(s);
1618 BRIDGE_IFLIST_READER_FOREACH(bif, sc) {
1619 struct psref psref;
1620
1621 bridge_acquire_member(sc, bif, &psref);
1622 BRIDGE_PSZ_REXIT(s);
1623
1624 dst_if = bif->bif_ifp;
1625 if ((dst_if->if_flags & IFF_RUNNING) == 0)
1626 goto next;
1627
1628 /*
1629 * If this is not the original output interface,
1630 * and the interface is participating in spanning
1631 * tree, make sure the port is in a state that
1632 * allows forwarding.
1633 */
1634 if (dst_if != ifp &&
1635 (bif->bif_flags & IFBIF_STP) != 0) {
1636 switch (bif->bif_state) {
1637 case BSTP_IFSTATE_BLOCKING:
1638 case BSTP_IFSTATE_LISTENING:
1639 case BSTP_IFSTATE_DISABLED:
1640 goto next;
1641 }
1642 }
1643
1644 if (PSLIST_READER_NEXT(bif, struct bridge_iflist,
1645 bif_next) == NULL &&
1646 ((m->m_flags & (M_MCAST | M_BCAST)) == 0 ||
1647 dst_if == ifp))
1648 {
1649 used = true;
1650 mc = m;
1651 } else {
1652 mc = m_copypacket(m, M_DONTWAIT);
1653 if (mc == NULL) {
1654 if_statinc(&sc->sc_if, if_oerrors);
1655 goto next;
1656 }
1657 }
1658
1659 bridge_enqueue(sc, dst_if, mc, 0);
1660
1661 if ((m->m_flags & (M_MCAST | M_BCAST)) != 0 &&
1662 dst_if != ifp)
1663 {
1664 if (PSLIST_READER_NEXT(bif,
1665 struct bridge_iflist, bif_next) == NULL)
1666 {
1667 used = true;
1668 mc = m;
1669 } else {
1670 mc = m_copypacket(m, M_DONTWAIT);
1671 if (mc == NULL) {
1672 if_statinc(&sc->sc_if,
1673 if_oerrors);
1674 goto next;
1675 }
1676 }
1677
1678 m_set_rcvif(mc, dst_if);
1679 mc->m_flags &= ~M_PROMISC;
1680
1681 s = splsoftnet();
1682 KERNEL_LOCK_UNLESS_IFP_MPSAFE(dst_if);
1683 ether_input(dst_if, mc);
1684 KERNEL_UNLOCK_UNLESS_IFP_MPSAFE(dst_if);
1685 splx(s);
1686 }
1687
1688 next:
1689 BRIDGE_PSZ_RENTER(s);
1690 bridge_release_member(sc, bif, &psref);
1691
1692 /* Guarantee we don't re-enter the loop as we already
1693 * decided we're at the end. */
1694 if (used)
1695 break;
1696 }
1697 BRIDGE_PSZ_REXIT(s);
1698
1699 if (!used)
1700 m_freem(m);
1701
1702 m = n;
1703 } while (m != NULL);
1704 return 0;
1705
1706 unicast_asis:
1707 /*
1708 * XXX Spanning tree consideration here?
1709 */
1710 if ((dst_if->if_flags & IFF_RUNNING) == 0)
1711 m_freem(m);
1712 else
1713 bridge_enqueue(sc, dst_if, m, 0);
1714 return 0;
1715 }
1716
1717 /*
1718 * bridge_start:
1719 *
1720 * Start output on a bridge.
1721 *
1722 * NOTE: This routine should never be called in this implementation.
1723 */
1724 static void
1725 bridge_start(struct ifnet *ifp)
1726 {
1727
1728 printf("%s: bridge_start() called\n", ifp->if_xname);
1729 }
1730
1731 /*
1732 * bridge_forward:
1733 *
1734 * The forwarding function of the bridge.
1735 */
1736 static void
1737 bridge_forward(struct bridge_softc *sc, struct mbuf *m)
1738 {
1739 struct bridge_iflist *bif;
1740 struct ifnet *src_if, *dst_if;
1741 struct ether_header *eh;
1742 struct psref psref;
1743 struct psref psref_src;
1744 DECLARE_LOCK_VARIABLE;
1745
1746 if ((sc->sc_if.if_flags & IFF_RUNNING) == 0)
1747 return;
1748
1749 src_if = m_get_rcvif_psref(m, &psref_src);
1750 if (src_if == NULL) {
1751 /* Interface is being destroyed? */
1752 m_freem(m);
1753 goto out;
1754 }
1755
1756 if_statadd2(&sc->sc_if, if_ipackets, 1, if_ibytes, m->m_pkthdr.len);
1757
1758 /*
1759 * Look up the bridge_iflist.
1760 */
1761 bif = bridge_lookup_member_if(sc, src_if, &psref);
1762 if (bif == NULL) {
1763 /* Interface is not a bridge member (anymore?) */
1764 m_freem(m);
1765 goto out;
1766 }
1767
1768 if (bif->bif_flags & IFBIF_STP) {
1769 switch (bif->bif_state) {
1770 case BSTP_IFSTATE_BLOCKING:
1771 case BSTP_IFSTATE_LISTENING:
1772 case BSTP_IFSTATE_DISABLED:
1773 m_freem(m);
1774 bridge_release_member(sc, bif, &psref);
1775 goto out;
1776 }
1777 }
1778
1779 eh = mtod(m, struct ether_header *);
1780
1781 /*
1782 * If the interface is learning, and the source
1783 * address is valid and not multicast, record
1784 * the address.
1785 */
1786 if ((bif->bif_flags & IFBIF_LEARNING) != 0 &&
1787 ETHER_IS_MULTICAST(eh->ether_shost) == 0 &&
1788 (eh->ether_shost[0] == 0 &&
1789 eh->ether_shost[1] == 0 &&
1790 eh->ether_shost[2] == 0 &&
1791 eh->ether_shost[3] == 0 &&
1792 eh->ether_shost[4] == 0 &&
1793 eh->ether_shost[5] == 0) == 0) {
1794 (void) bridge_rtupdate(sc, eh->ether_shost,
1795 src_if, 0, IFBAF_DYNAMIC);
1796 }
1797
1798 if ((bif->bif_flags & IFBIF_STP) != 0 &&
1799 bif->bif_state == BSTP_IFSTATE_LEARNING) {
1800 m_freem(m);
1801 bridge_release_member(sc, bif, &psref);
1802 goto out;
1803 }
1804
1805 bridge_release_member(sc, bif, &psref);
1806
1807 /*
1808 * At this point, the port either doesn't participate
1809 * in spanning tree or it is in the forwarding state.
1810 */
1811
1812 /*
1813 * If the packet is unicast, destined for someone on
1814 * "this" side of the bridge, drop it.
1815 */
1816 if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) {
1817 dst_if = bridge_rtlookup(sc, eh->ether_dhost);
1818 if (src_if == dst_if) {
1819 m_freem(m);
1820 goto out;
1821 }
1822 } else {
1823 /* ...forward it to all interfaces. */
1824 if_statinc(&sc->sc_if, if_imcasts);
1825 dst_if = NULL;
1826 }
1827
1828 if (pfil_run_hooks(sc->sc_if.if_pfil, &m, src_if, PFIL_IN) != 0) {
1829 if (m != NULL)
1830 m_freem(m);
1831 goto out;
1832 }
1833 if (m == NULL)
1834 goto out;
1835
1836 if (dst_if == NULL) {
1837 bridge_broadcast(sc, src_if, m);
1838 goto out;
1839 }
1840
1841 m_put_rcvif_psref(src_if, &psref_src);
1842 src_if = NULL;
1843
1844 /*
1845 * At this point, we're dealing with a unicast frame
1846 * going to a different interface.
1847 */
1848 if ((dst_if->if_flags & IFF_RUNNING) == 0) {
1849 m_freem(m);
1850 goto out;
1851 }
1852
1853 bif = bridge_lookup_member_if(sc, dst_if, &psref);
1854 if (bif == NULL) {
1855 /* Not a member of the bridge (anymore?) */
1856 m_freem(m);
1857 goto out;
1858 }
1859
1860 if (bif->bif_flags & IFBIF_STP) {
1861 switch (bif->bif_state) {
1862 case BSTP_IFSTATE_DISABLED:
1863 case BSTP_IFSTATE_BLOCKING:
1864 m_freem(m);
1865 bridge_release_member(sc, bif, &psref);
1866 goto out;
1867 }
1868 }
1869
1870 bridge_release_member(sc, bif, &psref);
1871
1872 /*
1873 * Before enqueueing this packet to the destination interface,
1874 * clear any in-bound checksum flags to prevent them from being
1875 * misused as out-bound flags.
1876 */
1877 m->m_pkthdr.csum_flags = 0;
1878
1879 ACQUIRE_GLOBAL_LOCKS();
1880 bridge_enqueue(sc, dst_if, m, 1);
1881 RELEASE_GLOBAL_LOCKS();
1882 out:
1883 if (src_if != NULL)
1884 m_put_rcvif_psref(src_if, &psref_src);
1885 return;
1886 }
1887
1888 static bool
1889 bstp_state_before_learning(struct bridge_iflist *bif)
1890 {
1891 if (bif->bif_flags & IFBIF_STP) {
1892 switch (bif->bif_state) {
1893 case BSTP_IFSTATE_BLOCKING:
1894 case BSTP_IFSTATE_LISTENING:
1895 case BSTP_IFSTATE_DISABLED:
1896 return true;
1897 }
1898 }
1899 return false;
1900 }
1901
1902 static bool
1903 bridge_ourether(struct bridge_iflist *bif, struct ether_header *eh, int src)
1904 {
1905 uint8_t *ether = src ? eh->ether_shost : eh->ether_dhost;
1906
1907 if (memcmp(CLLADDR(bif->bif_ifp->if_sadl), ether, ETHER_ADDR_LEN) == 0
1908 #if NCARP > 0
1909 || (bif->bif_ifp->if_carp &&
1910 carp_ourether(bif->bif_ifp->if_carp, eh, IFT_ETHER, src) != NULL)
1911 #endif /* NCARP > 0 */
1912 )
1913 return true;
1914
1915 return false;
1916 }
1917
1918 /*
1919 * bridge_input:
1920 *
1921 * Receive input from a member interface. Queue the packet for
1922 * bridging if it is not for us.
1923 */
1924 static void
1925 bridge_input(struct ifnet *ifp, struct mbuf *m)
1926 {
1927 struct bridge_softc *sc = ifp->if_bridge;
1928 struct bridge_iflist *bif;
1929 struct ether_header *eh;
1930 struct psref psref;
1931 int bound;
1932 DECLARE_LOCK_VARIABLE;
1933
1934 KASSERT(!cpu_intr_p());
1935
1936 if (__predict_false(sc == NULL) ||
1937 (sc->sc_if.if_flags & IFF_RUNNING) == 0) {
1938 ACQUIRE_GLOBAL_LOCKS();
1939 ether_input(ifp, m);
1940 RELEASE_GLOBAL_LOCKS();
1941 return;
1942 }
1943
1944 bound = curlwp_bind();
1945 bif = bridge_lookup_member_if(sc, ifp, &psref);
1946 if (bif == NULL) {
1947 curlwp_bindx(bound);
1948 ACQUIRE_GLOBAL_LOCKS();
1949 ether_input(ifp, m);
1950 RELEASE_GLOBAL_LOCKS();
1951 return;
1952 }
1953
1954 eh = mtod(m, struct ether_header *);
1955
1956 if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
1957 if (memcmp(etherbroadcastaddr,
1958 eh->ether_dhost, ETHER_ADDR_LEN) == 0)
1959 m->m_flags |= M_BCAST;
1960 else
1961 m->m_flags |= M_MCAST;
1962 }
1963
1964 /*
1965 * A 'fast' path for packets addressed to interfaces that are
1966 * part of this bridge.
1967 */
1968 if (!(m->m_flags & (M_BCAST|M_MCAST)) &&
1969 !bstp_state_before_learning(bif)) {
1970 struct bridge_iflist *_bif;
1971 struct ifnet *_ifp = NULL;
1972 int s;
1973 struct psref _psref;
1974
1975 BRIDGE_PSZ_RENTER(s);
1976 BRIDGE_IFLIST_READER_FOREACH(_bif, sc) {
1977 /* It is destined for us. */
1978 if (bridge_ourether(_bif, eh, 0)) {
1979 bridge_acquire_member(sc, _bif, &_psref);
1980 BRIDGE_PSZ_REXIT(s);
1981 if (_bif->bif_flags & IFBIF_LEARNING)
1982 (void) bridge_rtupdate(sc,
1983 eh->ether_shost, ifp, 0, IFBAF_DYNAMIC);
1984 m_set_rcvif(m, _bif->bif_ifp);
1985 _ifp = _bif->bif_ifp;
1986 bridge_release_member(sc, _bif, &_psref);
1987 goto out;
1988 }
1989
1990 /* We just received a packet that we sent out. */
1991 if (bridge_ourether(_bif, eh, 1))
1992 break;
1993 }
1994 BRIDGE_PSZ_REXIT(s);
1995 out:
1996
1997 if (_bif != NULL) {
1998 bridge_release_member(sc, bif, &psref);
1999 curlwp_bindx(bound);
2000 if (_ifp != NULL) {
2001 m->m_flags &= ~M_PROMISC;
2002 ACQUIRE_GLOBAL_LOCKS();
2003 ether_input(_ifp, m);
2004 RELEASE_GLOBAL_LOCKS();
2005 } else
2006 m_freem(m);
2007 return;
2008 }
2009 }
2010
2011 /* Tap off 802.1D packets; they do not get forwarded. */
2012 if (bif->bif_flags & IFBIF_STP &&
2013 memcmp(eh->ether_dhost, bstp_etheraddr, ETHER_ADDR_LEN) == 0) {
2014 bstp_input(sc, bif, m);
2015 bridge_release_member(sc, bif, &psref);
2016 curlwp_bindx(bound);
2017 return;
2018 }
2019
2020 /*
2021 * A normal switch would discard the packet here, but that's not what
2022 * we've done historically. This also prevents some obnoxious behaviour.
2023 */
2024 if (bstp_state_before_learning(bif)) {
2025 bridge_release_member(sc, bif, &psref);
2026 curlwp_bindx(bound);
2027 ACQUIRE_GLOBAL_LOCKS();
2028 ether_input(ifp, m);
2029 RELEASE_GLOBAL_LOCKS();
2030 return;
2031 }
2032
2033 bridge_release_member(sc, bif, &psref);
2034
2035 bridge_forward(sc, m);
2036
2037 curlwp_bindx(bound);
2038 }
2039
2040 /*
2041 * bridge_broadcast:
2042 *
2043 * Send a frame to all interfaces that are members of
2044 * the bridge, except for the one on which the packet
2045 * arrived.
2046 */
2047 static void
2048 bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if,
2049 struct mbuf *m)
2050 {
2051 struct bridge_iflist *bif;
2052 struct mbuf *mc;
2053 struct ifnet *dst_if;
2054 bool bmcast;
2055 int s;
2056 DECLARE_LOCK_VARIABLE;
2057
2058 bmcast = m->m_flags & (M_BCAST|M_MCAST);
2059
2060 BRIDGE_PSZ_RENTER(s);
2061 BRIDGE_IFLIST_READER_FOREACH(bif, sc) {
2062 struct psref psref;
2063
2064 bridge_acquire_member(sc, bif, &psref);
2065 BRIDGE_PSZ_REXIT(s);
2066
2067 dst_if = bif->bif_ifp;
2068
2069 if (bif->bif_flags & IFBIF_STP) {
2070 switch (bif->bif_state) {
2071 case BSTP_IFSTATE_BLOCKING:
2072 case BSTP_IFSTATE_DISABLED:
2073 goto next;
2074 }
2075 }
2076
2077 if ((bif->bif_flags & IFBIF_DISCOVER) == 0 && !bmcast)
2078 goto next;
2079
2080 if ((dst_if->if_flags & IFF_RUNNING) == 0)
2081 goto next;
2082
2083 if (dst_if != src_if) {
2084 mc = m_copypacket(m, M_DONTWAIT);
2085 if (mc == NULL) {
2086 if_statinc(&sc->sc_if, if_oerrors);
2087 goto next;
2088 }
2089 /*
2090 * Before enqueueing this packet to the destination
2091 * interface, clear any in-bound checksum flags to
2092 * prevent them from being misused as out-bound flags.
2093 */
2094 mc->m_pkthdr.csum_flags = 0;
2095
2096 ACQUIRE_GLOBAL_LOCKS();
2097 bridge_enqueue(sc, dst_if, mc, 1);
2098 RELEASE_GLOBAL_LOCKS();
2099 }
2100
2101 if (bmcast) {
2102 mc = m_copypacket(m, M_DONTWAIT);
2103 if (mc == NULL) {
2104 if_statinc(&sc->sc_if, if_oerrors);
2105 goto next;
2106 }
2107 /*
2108 * Before enqueueing this packet to the destination
2109 * interface, clear any in-bound checksum flags to
2110 * prevent them from being misused as out-bound flags.
2111 */
2112 mc->m_pkthdr.csum_flags = 0;
2113
2114 m_set_rcvif(mc, dst_if);
2115 mc->m_flags &= ~M_PROMISC;
2116
2117 ACQUIRE_GLOBAL_LOCKS();
2118 ether_input(dst_if, mc);
2119 RELEASE_GLOBAL_LOCKS();
2120 }
2121 next:
2122 BRIDGE_PSZ_RENTER(s);
2123 bridge_release_member(sc, bif, &psref);
2124 }
2125 BRIDGE_PSZ_REXIT(s);
2126
2127 m_freem(m);
2128 }
2129
2130 static int
2131 bridge_rtalloc(struct bridge_softc *sc, const uint8_t *dst,
2132 struct bridge_rtnode **brtp)
2133 {
2134 struct bridge_rtnode *brt;
2135 int error;
2136
2137 if (sc->sc_brtcnt >= sc->sc_brtmax)
2138 return ENOSPC;
2139
2140 /*
2141 * Allocate a new bridge forwarding node, and
2142 * initialize the expiration time and Ethernet
2143 * address.
2144 */
2145 brt = pool_get(&bridge_rtnode_pool, PR_NOWAIT);
2146 if (brt == NULL)
2147 return ENOMEM;
2148
2149 memset(brt, 0, sizeof(*brt));
2150 brt->brt_expire = time_uptime + sc->sc_brttimeout;
2151 brt->brt_flags = IFBAF_DYNAMIC;
2152 memcpy(brt->brt_addr, dst, ETHER_ADDR_LEN);
2153 PSLIST_ENTRY_INIT(brt, brt_list);
2154 PSLIST_ENTRY_INIT(brt, brt_hash);
2155
2156 BRIDGE_RT_LOCK(sc);
2157 error = bridge_rtnode_insert(sc, brt);
2158 BRIDGE_RT_UNLOCK(sc);
2159
2160 if (error != 0) {
2161 pool_put(&bridge_rtnode_pool, brt);
2162 return error;
2163 }
2164
2165 *brtp = brt;
2166 return 0;
2167 }
2168
2169 /*
2170 * bridge_rtupdate:
2171 *
2172 * Add a bridge routing entry.
2173 */
2174 static int
2175 bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst,
2176 struct ifnet *dst_if, int setflags, uint8_t flags)
2177 {
2178 struct bridge_rtnode *brt;
2179 int s;
2180
2181 again:
2182 /*
2183 * A route for this destination might already exist. If so,
2184 * update it, otherwise create a new one.
2185 */
2186 BRIDGE_RT_RENTER(s);
2187 brt = bridge_rtnode_lookup(sc, dst);
2188
2189 if (brt != NULL) {
2190 brt->brt_ifp = dst_if;
2191 if (setflags) {
2192 brt->brt_flags = flags;
2193 if (flags & IFBAF_STATIC)
2194 brt->brt_expire = 0;
2195 else
2196 brt->brt_expire = time_uptime + sc->sc_brttimeout;
2197 } else {
2198 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
2199 brt->brt_expire = time_uptime + sc->sc_brttimeout;
2200 }
2201 }
2202 BRIDGE_RT_REXIT(s);
2203
2204 if (brt == NULL) {
2205 int r;
2206
2207 r = bridge_rtalloc(sc, dst, &brt);
2208 if (r != 0)
2209 return r;
2210 goto again;
2211 }
2212
2213 return 0;
2214 }
2215
2216 /*
2217 * bridge_rtlookup:
2218 *
2219 * Lookup the destination interface for an address.
2220 */
2221 static struct ifnet *
2222 bridge_rtlookup(struct bridge_softc *sc, const uint8_t *addr)
2223 {
2224 struct bridge_rtnode *brt;
2225 struct ifnet *ifs = NULL;
2226 int s;
2227
2228 BRIDGE_RT_RENTER(s);
2229 brt = bridge_rtnode_lookup(sc, addr);
2230 if (brt != NULL)
2231 ifs = brt->brt_ifp;
2232 BRIDGE_RT_REXIT(s);
2233
2234 return ifs;
2235 }
2236
2237 typedef bool (*bridge_iterate_cb_t)
2238 (struct bridge_softc *, struct bridge_rtnode *, bool *, void *);
2239
2240 /*
2241 * bridge_rtlist_iterate_remove:
2242 *
2243 * It iterates on sc->sc_rtlist and removes rtnodes of it which func
2244 * callback judges to remove. Removals of rtnodes are done in a manner
2245 * of pserialize. To this end, all kmem_* operations are placed out of
2246 * mutexes.
2247 */
2248 static void
2249 bridge_rtlist_iterate_remove(struct bridge_softc *sc, bridge_iterate_cb_t func, void *arg)
2250 {
2251 struct bridge_rtnode *brt;
2252 struct bridge_rtnode **brt_list;
2253 int i, count;
2254
2255 retry:
2256 count = sc->sc_brtcnt;
2257 if (count == 0)
2258 return;
2259 brt_list = kmem_alloc(sizeof(*brt_list) * count, KM_SLEEP);
2260
2261 BRIDGE_RT_LOCK(sc);
2262 if (__predict_false(sc->sc_brtcnt > count)) {
2263 /* The rtnodes increased, we need more memory */
2264 BRIDGE_RT_UNLOCK(sc);
2265 kmem_free(brt_list, sizeof(*brt_list) * count);
2266 goto retry;
2267 }
2268
2269 i = 0;
2270 /*
2271 * We don't need to use a _SAFE variant here because we know
2272 * that a removed item keeps its next pointer as-is thanks to
2273 * pslist(9) and isn't freed in the loop.
2274 */
2275 BRIDGE_RTLIST_WRITER_FOREACH(brt, sc) {
2276 bool need_break = false;
2277 if (func(sc, brt, &need_break, arg)) {
2278 bridge_rtnode_remove(sc, brt);
2279 brt_list[i++] = brt;
2280 }
2281 if (need_break)
2282 break;
2283 }
2284
2285 if (i > 0)
2286 BRIDGE_RT_PSZ_PERFORM(sc);
2287 BRIDGE_RT_UNLOCK(sc);
2288
2289 while (--i >= 0)
2290 bridge_rtnode_destroy(brt_list[i]);
2291
2292 kmem_free(brt_list, sizeof(*brt_list) * count);
2293 }
2294
2295 static bool
2296 bridge_rttrim0_cb(struct bridge_softc *sc, struct bridge_rtnode *brt,
2297 bool *need_break, void *arg)
2298 {
2299 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
2300 /* Take into account of the subsequent removal */
2301 if ((sc->sc_brtcnt - 1) <= sc->sc_brtmax)
2302 *need_break = true;
2303 return true;
2304 } else
2305 return false;
2306 }
2307
2308 static void
2309 bridge_rttrim0(struct bridge_softc *sc)
2310 {
2311 bridge_rtlist_iterate_remove(sc, bridge_rttrim0_cb, NULL);
2312 }
2313
2314 /*
2315 * bridge_rttrim:
2316 *
2317 * Trim the routine table so that we have a number
2318 * of routing entries less than or equal to the
2319 * maximum number.
2320 */
2321 static void
2322 bridge_rttrim(struct bridge_softc *sc)
2323 {
2324
2325 /* Make sure we actually need to do this. */
2326 if (sc->sc_brtcnt <= sc->sc_brtmax)
2327 return;
2328
2329 /* Force an aging cycle; this might trim enough addresses. */
2330 bridge_rtage(sc);
2331 if (sc->sc_brtcnt <= sc->sc_brtmax)
2332 return;
2333
2334 bridge_rttrim0(sc);
2335
2336 return;
2337 }
2338
2339 /*
2340 * bridge_timer:
2341 *
2342 * Aging timer for the bridge.
2343 */
2344 static void
2345 bridge_timer(void *arg)
2346 {
2347 struct bridge_softc *sc = arg;
2348
2349 workqueue_enqueue(sc->sc_rtage_wq, &sc->sc_rtage_wk, NULL);
2350 }
2351
2352 static void
2353 bridge_rtage_work(struct work *wk, void *arg)
2354 {
2355 struct bridge_softc *sc = arg;
2356
2357 KASSERT(wk == &sc->sc_rtage_wk);
2358
2359 bridge_rtage(sc);
2360
2361 if (sc->sc_if.if_flags & IFF_RUNNING)
2362 callout_reset(&sc->sc_brcallout,
2363 bridge_rtable_prune_period * hz, bridge_timer, sc);
2364 }
2365
2366 static bool
2367 bridge_rtage_cb(struct bridge_softc *sc, struct bridge_rtnode *brt,
2368 bool *need_break, void *arg)
2369 {
2370 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
2371 time_uptime >= brt->brt_expire)
2372 return true;
2373 else
2374 return false;
2375 }
2376
2377 /*
2378 * bridge_rtage:
2379 *
2380 * Perform an aging cycle.
2381 */
2382 static void
2383 bridge_rtage(struct bridge_softc *sc)
2384 {
2385 bridge_rtlist_iterate_remove(sc, bridge_rtage_cb, NULL);
2386 }
2387
2388
2389 static bool
2390 bridge_rtflush_cb(struct bridge_softc *sc, struct bridge_rtnode *brt,
2391 bool *need_break, void *arg)
2392 {
2393 int full = *(int*)arg;
2394
2395 if (full || (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
2396 return true;
2397 else
2398 return false;
2399 }
2400
2401 /*
2402 * bridge_rtflush:
2403 *
2404 * Remove all dynamic addresses from the bridge.
2405 */
2406 static void
2407 bridge_rtflush(struct bridge_softc *sc, int full)
2408 {
2409 bridge_rtlist_iterate_remove(sc, bridge_rtflush_cb, &full);
2410 }
2411
2412 /*
2413 * bridge_rtdaddr:
2414 *
2415 * Remove an address from the table.
2416 */
2417 static int
2418 bridge_rtdaddr(struct bridge_softc *sc, const uint8_t *addr)
2419 {
2420 struct bridge_rtnode *brt;
2421
2422 BRIDGE_RT_LOCK(sc);
2423 if ((brt = bridge_rtnode_lookup(sc, addr)) == NULL) {
2424 BRIDGE_RT_UNLOCK(sc);
2425 return ENOENT;
2426 }
2427 bridge_rtnode_remove(sc, brt);
2428 BRIDGE_RT_PSZ_PERFORM(sc);
2429 BRIDGE_RT_UNLOCK(sc);
2430
2431 bridge_rtnode_destroy(brt);
2432
2433 return 0;
2434 }
2435
2436 /*
2437 * bridge_rtdelete:
2438 *
2439 * Delete routes to a speicifc member interface.
2440 */
2441 static void
2442 bridge_rtdelete(struct bridge_softc *sc, struct ifnet *ifp)
2443 {
2444 struct bridge_rtnode *brt;
2445
2446 /* XXX pserialize_perform for each entry is slow */
2447 again:
2448 BRIDGE_RT_LOCK(sc);
2449 BRIDGE_RTLIST_WRITER_FOREACH(brt, sc) {
2450 if (brt->brt_ifp == ifp)
2451 break;
2452 }
2453 if (brt == NULL) {
2454 BRIDGE_RT_UNLOCK(sc);
2455 return;
2456 }
2457 bridge_rtnode_remove(sc, brt);
2458 BRIDGE_RT_PSZ_PERFORM(sc);
2459 BRIDGE_RT_UNLOCK(sc);
2460
2461 bridge_rtnode_destroy(brt);
2462
2463 goto again;
2464 }
2465
2466 /*
2467 * bridge_rtable_init:
2468 *
2469 * Initialize the route table for this bridge.
2470 */
2471 static void
2472 bridge_rtable_init(struct bridge_softc *sc)
2473 {
2474 int i;
2475
2476 sc->sc_rthash = kmem_alloc(sizeof(*sc->sc_rthash) * BRIDGE_RTHASH_SIZE,
2477 KM_SLEEP);
2478
2479 for (i = 0; i < BRIDGE_RTHASH_SIZE; i++)
2480 PSLIST_INIT(&sc->sc_rthash[i]);
2481
2482 sc->sc_rthash_key = cprng_fast32();
2483
2484 PSLIST_INIT(&sc->sc_rtlist);
2485
2486 sc->sc_rtlist_psz = pserialize_create();
2487 sc->sc_rtlist_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET);
2488 }
2489
2490 /*
2491 * bridge_rtable_fini:
2492 *
2493 * Deconstruct the route table for this bridge.
2494 */
2495 static void
2496 bridge_rtable_fini(struct bridge_softc *sc)
2497 {
2498
2499 kmem_free(sc->sc_rthash, sizeof(*sc->sc_rthash) * BRIDGE_RTHASH_SIZE);
2500 mutex_obj_free(sc->sc_rtlist_lock);
2501 pserialize_destroy(sc->sc_rtlist_psz);
2502 }
2503
2504 /*
2505 * The following hash function is adapted from "Hash Functions" by Bob Jenkins
2506 * ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
2507 */
2508 #define mix(a, b, c) \
2509 do { \
2510 a -= b; a -= c; a ^= (c >> 13); \
2511 b -= c; b -= a; b ^= (a << 8); \
2512 c -= a; c -= b; c ^= (b >> 13); \
2513 a -= b; a -= c; a ^= (c >> 12); \
2514 b -= c; b -= a; b ^= (a << 16); \
2515 c -= a; c -= b; c ^= (b >> 5); \
2516 a -= b; a -= c; a ^= (c >> 3); \
2517 b -= c; b -= a; b ^= (a << 10); \
2518 c -= a; c -= b; c ^= (b >> 15); \
2519 } while (/*CONSTCOND*/0)
2520
2521 static inline uint32_t
2522 bridge_rthash(struct bridge_softc *sc, const uint8_t *addr)
2523 {
2524 uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = sc->sc_rthash_key;
2525
2526 b += addr[5] << 8;
2527 b += addr[4];
2528 a += (uint32_t)addr[3] << 24;
2529 a += addr[2] << 16;
2530 a += addr[1] << 8;
2531 a += addr[0];
2532
2533 mix(a, b, c);
2534
2535 return (c & BRIDGE_RTHASH_MASK);
2536 }
2537
2538 #undef mix
2539
2540 /*
2541 * bridge_rtnode_lookup:
2542 *
2543 * Look up a bridge route node for the specified destination.
2544 */
2545 static struct bridge_rtnode *
2546 bridge_rtnode_lookup(struct bridge_softc *sc, const uint8_t *addr)
2547 {
2548 struct bridge_rtnode *brt;
2549 uint32_t hash;
2550 int dir;
2551
2552 hash = bridge_rthash(sc, addr);
2553 BRIDGE_RTHASH_READER_FOREACH(brt, sc, hash) {
2554 dir = memcmp(addr, brt->brt_addr, ETHER_ADDR_LEN);
2555 if (dir == 0)
2556 return brt;
2557 if (dir > 0)
2558 return NULL;
2559 }
2560
2561 return NULL;
2562 }
2563
2564 /*
2565 * bridge_rtnode_insert:
2566 *
2567 * Insert the specified bridge node into the route table. We
2568 * assume the entry is not already in the table.
2569 */
2570 static int
2571 bridge_rtnode_insert(struct bridge_softc *sc, struct bridge_rtnode *brt)
2572 {
2573 struct bridge_rtnode *lbrt, *prev = NULL;
2574 uint32_t hash;
2575
2576 KASSERT(BRIDGE_RT_LOCKED(sc));
2577
2578 hash = bridge_rthash(sc, brt->brt_addr);
2579 BRIDGE_RTHASH_WRITER_FOREACH(lbrt, sc, hash) {
2580 int dir = memcmp(brt->brt_addr, lbrt->brt_addr, ETHER_ADDR_LEN);
2581 if (dir == 0)
2582 return EEXIST;
2583 if (dir > 0)
2584 break;
2585 prev = lbrt;
2586 }
2587 if (prev == NULL)
2588 BRIDGE_RTHASH_WRITER_INSERT_HEAD(sc, hash, brt);
2589 else
2590 BRIDGE_RTHASH_WRITER_INSERT_AFTER(prev, brt);
2591
2592 BRIDGE_RTLIST_WRITER_INSERT_HEAD(sc, brt);
2593 sc->sc_brtcnt++;
2594
2595 return 0;
2596 }
2597
2598 /*
2599 * bridge_rtnode_remove:
2600 *
2601 * Remove a bridge rtnode from the rthash and the rtlist of a bridge.
2602 */
2603 static void
2604 bridge_rtnode_remove(struct bridge_softc *sc, struct bridge_rtnode *brt)
2605 {
2606
2607 KASSERT(BRIDGE_RT_LOCKED(sc));
2608
2609 BRIDGE_RTHASH_WRITER_REMOVE(brt);
2610 BRIDGE_RTLIST_WRITER_REMOVE(brt);
2611 sc->sc_brtcnt--;
2612 }
2613
2614 /*
2615 * bridge_rtnode_destroy:
2616 *
2617 * Destroy a bridge rtnode.
2618 */
2619 static void
2620 bridge_rtnode_destroy(struct bridge_rtnode *brt)
2621 {
2622
2623 PSLIST_ENTRY_DESTROY(brt, brt_list);
2624 PSLIST_ENTRY_DESTROY(brt, brt_hash);
2625 pool_put(&bridge_rtnode_pool, brt);
2626 }
2627
2628 #if defined(BRIDGE_IPF)
2629 extern pfil_head_t *inet_pfil_hook; /* XXX */
2630 extern pfil_head_t *inet6_pfil_hook; /* XXX */
2631
2632 /*
2633 * Send bridge packets through IPF if they are one of the types IPF can deal
2634 * with, or if they are ARP or REVARP. (IPF will pass ARP and REVARP without
2635 * question.)
2636 */
2637 static int
2638 bridge_ipf(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
2639 {
2640 int snap, error;
2641 struct ether_header *eh1, eh2;
2642 struct llc llc1;
2643 uint16_t ether_type;
2644
2645 snap = 0;
2646 error = -1; /* Default error if not error == 0 */
2647 eh1 = mtod(*mp, struct ether_header *);
2648 ether_type = ntohs(eh1->ether_type);
2649
2650 /*
2651 * Check for SNAP/LLC.
2652 */
2653 if (ether_type < ETHERMTU) {
2654 struct llc *llc2 = (struct llc *)(eh1 + 1);
2655
2656 if ((*mp)->m_len >= ETHER_HDR_LEN + 8 &&
2657 llc2->llc_dsap == LLC_SNAP_LSAP &&
2658 llc2->llc_ssap == LLC_SNAP_LSAP &&
2659 llc2->llc_control == LLC_UI) {
2660 ether_type = htons(llc2->llc_un.type_snap.ether_type);
2661 snap = 1;
2662 }
2663 }
2664
2665 /*
2666 * If we're trying to filter bridge traffic, don't look at anything
2667 * other than IP and ARP traffic. If the filter doesn't understand
2668 * IPv6, don't allow IPv6 through the bridge either. This is lame
2669 * since if we really wanted, say, an AppleTalk filter, we are hosed,
2670 * but of course we don't have an AppleTalk filter to begin with.
2671 * (Note that since IPF doesn't understand ARP it will pass *ALL*
2672 * ARP traffic.)
2673 */
2674 switch (ether_type) {
2675 case ETHERTYPE_ARP:
2676 case ETHERTYPE_REVARP:
2677 return 0; /* Automatically pass */
2678 case ETHERTYPE_IP:
2679 # ifdef INET6
2680 case ETHERTYPE_IPV6:
2681 # endif /* INET6 */
2682 break;
2683 default:
2684 goto bad;
2685 }
2686
2687 /* Strip off the Ethernet header and keep a copy. */
2688 m_copydata(*mp, 0, ETHER_HDR_LEN, (void *) &eh2);
2689 m_adj(*mp, ETHER_HDR_LEN);
2690
2691 /* Strip off snap header, if present */
2692 if (snap) {
2693 m_copydata(*mp, 0, sizeof(struct llc), (void *) &llc1);
2694 m_adj(*mp, sizeof(struct llc));
2695 }
2696
2697 /*
2698 * Check basic packet sanity and run IPF through pfil.
2699 */
2700 KASSERT(!cpu_intr_p());
2701 switch (ether_type)
2702 {
2703 case ETHERTYPE_IP :
2704 error = bridge_ip_checkbasic(mp);
2705 if (error == 0)
2706 error = pfil_run_hooks(inet_pfil_hook, mp, ifp, dir);
2707 break;
2708 # ifdef INET6
2709 case ETHERTYPE_IPV6 :
2710 error = bridge_ip6_checkbasic(mp);
2711 if (error == 0)
2712 error = pfil_run_hooks(inet6_pfil_hook, mp, ifp, dir);
2713 break;
2714 # endif
2715 default :
2716 error = 0;
2717 break;
2718 }
2719
2720 if (*mp == NULL)
2721 return error;
2722 if (error != 0)
2723 goto bad;
2724
2725 error = -1;
2726
2727 /*
2728 * Finally, put everything back the way it was and return
2729 */
2730 if (snap) {
2731 M_PREPEND(*mp, sizeof(struct llc), M_DONTWAIT);
2732 if (*mp == NULL)
2733 return error;
2734 bcopy(&llc1, mtod(*mp, void *), sizeof(struct llc));
2735 }
2736
2737 M_PREPEND(*mp, ETHER_HDR_LEN, M_DONTWAIT);
2738 if (*mp == NULL)
2739 return error;
2740 bcopy(&eh2, mtod(*mp, void *), ETHER_HDR_LEN);
2741
2742 return 0;
2743
2744 bad:
2745 m_freem(*mp);
2746 *mp = NULL;
2747 return error;
2748 }
2749
2750 /*
2751 * Perform basic checks on header size since
2752 * IPF assumes ip_input has already processed
2753 * it for it. Cut-and-pasted from ip_input.c.
2754 * Given how simple the IPv6 version is,
2755 * does the IPv4 version really need to be
2756 * this complicated?
2757 *
2758 * XXX Should we update ipstat here, or not?
2759 * XXX Right now we update ipstat but not
2760 * XXX csum_counter.
2761 */
2762 static int
2763 bridge_ip_checkbasic(struct mbuf **mp)
2764 {
2765 struct mbuf *m = *mp;
2766 struct ip *ip;
2767 int len, hlen;
2768
2769 if (*mp == NULL)
2770 return -1;
2771
2772 if (IP_HDR_ALIGNED_P(mtod(m, void *)) == 0) {
2773 if ((m = m_copyup(m, sizeof(struct ip),
2774 (max_linkhdr + 3) & ~3)) == NULL) {
2775 /* XXXJRT new stat, please */
2776 ip_statinc(IP_STAT_TOOSMALL);
2777 goto bad;
2778 }
2779 } else if (__predict_false(m->m_len < sizeof (struct ip))) {
2780 if ((m = m_pullup(m, sizeof (struct ip))) == NULL) {
2781 ip_statinc(IP_STAT_TOOSMALL);
2782 goto bad;
2783 }
2784 }
2785 ip = mtod(m, struct ip *);
2786 if (ip == NULL) goto bad;
2787
2788 if (ip->ip_v != IPVERSION) {
2789 ip_statinc(IP_STAT_BADVERS);
2790 goto bad;
2791 }
2792 hlen = ip->ip_hl << 2;
2793 if (hlen < sizeof(struct ip)) { /* minimum header length */
2794 ip_statinc(IP_STAT_BADHLEN);
2795 goto bad;
2796 }
2797 if (hlen > m->m_len) {
2798 if ((m = m_pullup(m, hlen)) == 0) {
2799 ip_statinc(IP_STAT_BADHLEN);
2800 goto bad;
2801 }
2802 ip = mtod(m, struct ip *);
2803 if (ip == NULL) goto bad;
2804 }
2805
2806 switch (m->m_pkthdr.csum_flags &
2807 ((m_get_rcvif_NOMPSAFE(m)->if_csum_flags_rx & M_CSUM_IPv4) |
2808 M_CSUM_IPv4_BAD)) {
2809 case M_CSUM_IPv4|M_CSUM_IPv4_BAD:
2810 /* INET_CSUM_COUNTER_INCR(&ip_hwcsum_bad); */
2811 goto bad;
2812
2813 case M_CSUM_IPv4:
2814 /* Checksum was okay. */
2815 /* INET_CSUM_COUNTER_INCR(&ip_hwcsum_ok); */
2816 break;
2817
2818 default:
2819 /* Must compute it ourselves. */
2820 /* INET_CSUM_COUNTER_INCR(&ip_swcsum); */
2821 if (in_cksum(m, hlen) != 0)
2822 goto bad;
2823 break;
2824 }
2825
2826 /* Retrieve the packet length. */
2827 len = ntohs(ip->ip_len);
2828
2829 /*
2830 * Check for additional length bogosity
2831 */
2832 if (len < hlen) {
2833 ip_statinc(IP_STAT_BADLEN);
2834 goto bad;
2835 }
2836
2837 /*
2838 * Check that the amount of data in the buffers
2839 * is as at least much as the IP header would have us expect.
2840 * Drop packet if shorter than we expect.
2841 */
2842 if (m->m_pkthdr.len < len) {
2843 ip_statinc(IP_STAT_TOOSHORT);
2844 goto bad;
2845 }
2846
2847 /* Checks out, proceed */
2848 *mp = m;
2849 return 0;
2850
2851 bad:
2852 *mp = m;
2853 return -1;
2854 }
2855
2856 # ifdef INET6
2857 /*
2858 * Same as above, but for IPv6.
2859 * Cut-and-pasted from ip6_input.c.
2860 * XXX Should we update ip6stat, or not?
2861 */
2862 static int
2863 bridge_ip6_checkbasic(struct mbuf **mp)
2864 {
2865 struct mbuf *m = *mp;
2866 struct ip6_hdr *ip6;
2867
2868 /*
2869 * If the IPv6 header is not aligned, slurp it up into a new
2870 * mbuf with space for link headers, in the event we forward
2871 * it. Otherwise, if it is aligned, make sure the entire base
2872 * IPv6 header is in the first mbuf of the chain.
2873 */
2874 if (IP6_HDR_ALIGNED_P(mtod(m, void *)) == 0) {
2875 struct ifnet *inifp = m_get_rcvif_NOMPSAFE(m);
2876 if ((m = m_copyup(m, sizeof(struct ip6_hdr),
2877 (max_linkhdr + 3) & ~3)) == NULL) {
2878 /* XXXJRT new stat, please */
2879 ip6_statinc(IP6_STAT_TOOSMALL);
2880 in6_ifstat_inc(inifp, ifs6_in_hdrerr);
2881 goto bad;
2882 }
2883 } else if (__predict_false(m->m_len < sizeof(struct ip6_hdr))) {
2884 struct ifnet *inifp = m_get_rcvif_NOMPSAFE(m);
2885 if ((m = m_pullup(m, sizeof(struct ip6_hdr))) == NULL) {
2886 ip6_statinc(IP6_STAT_TOOSMALL);
2887 in6_ifstat_inc(inifp, ifs6_in_hdrerr);
2888 goto bad;
2889 }
2890 }
2891
2892 ip6 = mtod(m, struct ip6_hdr *);
2893
2894 if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
2895 ip6_statinc(IP6_STAT_BADVERS);
2896 in6_ifstat_inc(m_get_rcvif_NOMPSAFE(m), ifs6_in_hdrerr);
2897 goto bad;
2898 }
2899
2900 /* Checks out, proceed */
2901 *mp = m;
2902 return 0;
2903
2904 bad:
2905 *mp = m;
2906 return -1;
2907 }
2908 # endif /* INET6 */
2909 #endif /* BRIDGE_IPF */
2910