if_bridge.c revision 1.134.6.7 1 /* $NetBSD: if_bridge.c,v 1.134.6.7 2018/02/26 00:41:13 snj Exp $ */
2
3 /*
4 * Copyright 2001 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Copyright (c) 1999, 2000 Jason L. Wright (jason (at) thought.net)
40 * All rights reserved.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. All advertising materials mentioning features or use of this software
51 * must display the following acknowledgement:
52 * This product includes software developed by Jason L. Wright
53 * 4. The name of the author may not be used to endorse or promote products
54 * derived from this software without specific prior written permission.
55 *
56 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
57 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
58 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
59 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
60 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
61 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
62 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
64 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
65 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
66 * POSSIBILITY OF SUCH DAMAGE.
67 *
68 * OpenBSD: if_bridge.c,v 1.60 2001/06/15 03:38:33 itojun Exp
69 */
70
71 /*
72 * Network interface bridge support.
73 *
74 * TODO:
75 *
76 * - Currently only supports Ethernet-like interfaces (Ethernet,
77 * 802.11, VLANs on Ethernet, etc.) Figure out a nice way
78 * to bridge other types of interfaces (FDDI-FDDI, and maybe
79 * consider heterogenous bridges).
80 */
81
82 #include <sys/cdefs.h>
83 __KERNEL_RCSID(0, "$NetBSD: if_bridge.c,v 1.134.6.7 2018/02/26 00:41:13 snj Exp $");
84
85 #ifdef _KERNEL_OPT
86 #include "opt_bridge_ipf.h"
87 #include "opt_inet.h"
88 #include "opt_net_mpsafe.h"
89 #endif /* _KERNEL_OPT */
90
91 #include <sys/param.h>
92 #include <sys/kernel.h>
93 #include <sys/mbuf.h>
94 #include <sys/queue.h>
95 #include <sys/socket.h>
96 #include <sys/socketvar.h> /* for softnet_lock */
97 #include <sys/sockio.h>
98 #include <sys/systm.h>
99 #include <sys/proc.h>
100 #include <sys/pool.h>
101 #include <sys/kauth.h>
102 #include <sys/cpu.h>
103 #include <sys/cprng.h>
104 #include <sys/mutex.h>
105 #include <sys/kmem.h>
106
107 #include <net/bpf.h>
108 #include <net/if.h>
109 #include <net/if_dl.h>
110 #include <net/if_types.h>
111 #include <net/if_llc.h>
112
113 #include <net/if_ether.h>
114 #include <net/if_bridgevar.h>
115
116 #if defined(BRIDGE_IPF)
117 /* Used for bridge_ip[6]_checkbasic */
118 #include <netinet/in.h>
119 #include <netinet/in_systm.h>
120 #include <netinet/ip.h>
121 #include <netinet/ip_var.h>
122 #include <netinet/ip_private.h> /* XXX */
123
124 #include <netinet/ip6.h>
125 #include <netinet6/in6_var.h>
126 #include <netinet6/ip6_var.h>
127 #include <netinet6/ip6_private.h> /* XXX */
128 #endif /* BRIDGE_IPF */
129
130 /*
131 * Size of the route hash table. Must be a power of two.
132 */
133 #ifndef BRIDGE_RTHASH_SIZE
134 #define BRIDGE_RTHASH_SIZE 1024
135 #endif
136
137 #define BRIDGE_RTHASH_MASK (BRIDGE_RTHASH_SIZE - 1)
138
139 #include "carp.h"
140 #if NCARP > 0
141 #include <netinet/in.h>
142 #include <netinet/in_var.h>
143 #include <netinet/ip_carp.h>
144 #endif
145
146 #include "ioconf.h"
147
148 __CTASSERT(sizeof(struct ifbifconf) == sizeof(struct ifbaconf));
149 __CTASSERT(offsetof(struct ifbifconf, ifbic_len) == offsetof(struct ifbaconf, ifbac_len));
150 __CTASSERT(offsetof(struct ifbifconf, ifbic_buf) == offsetof(struct ifbaconf, ifbac_buf));
151
152 /*
153 * Maximum number of addresses to cache.
154 */
155 #ifndef BRIDGE_RTABLE_MAX
156 #define BRIDGE_RTABLE_MAX 100
157 #endif
158
159 /*
160 * Spanning tree defaults.
161 */
162 #define BSTP_DEFAULT_MAX_AGE (20 * 256)
163 #define BSTP_DEFAULT_HELLO_TIME (2 * 256)
164 #define BSTP_DEFAULT_FORWARD_DELAY (15 * 256)
165 #define BSTP_DEFAULT_HOLD_TIME (1 * 256)
166 #define BSTP_DEFAULT_BRIDGE_PRIORITY 0x8000
167 #define BSTP_DEFAULT_PORT_PRIORITY 0x80
168 #define BSTP_DEFAULT_PATH_COST 55
169
170 /*
171 * Timeout (in seconds) for entries learned dynamically.
172 */
173 #ifndef BRIDGE_RTABLE_TIMEOUT
174 #define BRIDGE_RTABLE_TIMEOUT (20 * 60) /* same as ARP */
175 #endif
176
177 /*
178 * Number of seconds between walks of the route list.
179 */
180 #ifndef BRIDGE_RTABLE_PRUNE_PERIOD
181 #define BRIDGE_RTABLE_PRUNE_PERIOD (5 * 60)
182 #endif
183
184 #define BRIDGE_RT_LOCK(_sc) if ((_sc)->sc_rtlist_lock) \
185 mutex_enter((_sc)->sc_rtlist_lock)
186 #define BRIDGE_RT_UNLOCK(_sc) if ((_sc)->sc_rtlist_lock) \
187 mutex_exit((_sc)->sc_rtlist_lock)
188 #define BRIDGE_RT_LOCKED(_sc) (!(_sc)->sc_rtlist_lock || \
189 mutex_owned((_sc)->sc_rtlist_lock))
190
191 #define BRIDGE_RT_PSZ_PERFORM(_sc) \
192 if ((_sc)->sc_rtlist_psz != NULL) \
193 pserialize_perform((_sc)->sc_rtlist_psz);
194
195 #define BRIDGE_RT_RENTER(__s) do { __s = pserialize_read_enter(); } while (0)
196 #define BRIDGE_RT_REXIT(__s) do { pserialize_read_exit(__s); } while (0)
197
198
199 #ifdef NET_MPSAFE
200 #define DECLARE_LOCK_VARIABLE
201 #define ACQUIRE_GLOBAL_LOCKS() do { } while (0)
202 #define RELEASE_GLOBAL_LOCKS() do { } while (0)
203 #else
204 #define DECLARE_LOCK_VARIABLE int __s
205 #define ACQUIRE_GLOBAL_LOCKS() do { \
206 KERNEL_LOCK(1, NULL); \
207 mutex_enter(softnet_lock); \
208 __s = splsoftnet(); \
209 } while (0)
210 #define RELEASE_GLOBAL_LOCKS() do { \
211 splx(__s); \
212 mutex_exit(softnet_lock); \
213 KERNEL_UNLOCK_ONE(NULL); \
214 } while (0)
215 #endif
216
217 struct psref_class *bridge_psref_class __read_mostly;
218
219 int bridge_rtable_prune_period = BRIDGE_RTABLE_PRUNE_PERIOD;
220
221 static struct pool bridge_rtnode_pool;
222
223 static int bridge_clone_create(struct if_clone *, int);
224 static int bridge_clone_destroy(struct ifnet *);
225
226 static int bridge_ioctl(struct ifnet *, u_long, void *);
227 static int bridge_init(struct ifnet *);
228 static void bridge_stop(struct ifnet *, int);
229 static void bridge_start(struct ifnet *);
230
231 static void bridge_input(struct ifnet *, struct mbuf *);
232 static void bridge_forward(struct bridge_softc *, struct mbuf *);
233
234 static void bridge_timer(void *);
235
236 static void bridge_broadcast(struct bridge_softc *, struct ifnet *,
237 struct mbuf *);
238
239 static int bridge_rtupdate(struct bridge_softc *, const uint8_t *,
240 struct ifnet *, int, uint8_t);
241 static struct ifnet *bridge_rtlookup(struct bridge_softc *, const uint8_t *);
242 static void bridge_rttrim(struct bridge_softc *);
243 static void bridge_rtage(struct bridge_softc *);
244 static void bridge_rtage_work(struct work *, void *);
245 static void bridge_rtflush(struct bridge_softc *, int);
246 static int bridge_rtdaddr(struct bridge_softc *, const uint8_t *);
247 static void bridge_rtdelete(struct bridge_softc *, struct ifnet *ifp);
248
249 static void bridge_rtable_init(struct bridge_softc *);
250 static void bridge_rtable_fini(struct bridge_softc *);
251
252 static struct bridge_rtnode *bridge_rtnode_lookup(struct bridge_softc *,
253 const uint8_t *);
254 static int bridge_rtnode_insert(struct bridge_softc *,
255 struct bridge_rtnode *);
256 static void bridge_rtnode_remove(struct bridge_softc *,
257 struct bridge_rtnode *);
258 static void bridge_rtnode_destroy(struct bridge_rtnode *);
259
260 static struct bridge_iflist *bridge_lookup_member(struct bridge_softc *,
261 const char *name,
262 struct psref *);
263 static struct bridge_iflist *bridge_lookup_member_if(struct bridge_softc *,
264 struct ifnet *ifp,
265 struct psref *);
266 static void bridge_release_member(struct bridge_softc *, struct bridge_iflist *,
267 struct psref *);
268 static void bridge_delete_member(struct bridge_softc *,
269 struct bridge_iflist *);
270 static void bridge_acquire_member(struct bridge_softc *sc,
271 struct bridge_iflist *,
272 struct psref *);
273
274 static int bridge_ioctl_add(struct bridge_softc *, void *);
275 static int bridge_ioctl_del(struct bridge_softc *, void *);
276 static int bridge_ioctl_gifflags(struct bridge_softc *, void *);
277 static int bridge_ioctl_sifflags(struct bridge_softc *, void *);
278 static int bridge_ioctl_scache(struct bridge_softc *, void *);
279 static int bridge_ioctl_gcache(struct bridge_softc *, void *);
280 static int bridge_ioctl_gifs(struct bridge_softc *, void *);
281 static int bridge_ioctl_rts(struct bridge_softc *, void *);
282 static int bridge_ioctl_saddr(struct bridge_softc *, void *);
283 static int bridge_ioctl_sto(struct bridge_softc *, void *);
284 static int bridge_ioctl_gto(struct bridge_softc *, void *);
285 static int bridge_ioctl_daddr(struct bridge_softc *, void *);
286 static int bridge_ioctl_flush(struct bridge_softc *, void *);
287 static int bridge_ioctl_gpri(struct bridge_softc *, void *);
288 static int bridge_ioctl_spri(struct bridge_softc *, void *);
289 static int bridge_ioctl_ght(struct bridge_softc *, void *);
290 static int bridge_ioctl_sht(struct bridge_softc *, void *);
291 static int bridge_ioctl_gfd(struct bridge_softc *, void *);
292 static int bridge_ioctl_sfd(struct bridge_softc *, void *);
293 static int bridge_ioctl_gma(struct bridge_softc *, void *);
294 static int bridge_ioctl_sma(struct bridge_softc *, void *);
295 static int bridge_ioctl_sifprio(struct bridge_softc *, void *);
296 static int bridge_ioctl_sifcost(struct bridge_softc *, void *);
297 #if defined(BRIDGE_IPF)
298 static int bridge_ioctl_gfilt(struct bridge_softc *, void *);
299 static int bridge_ioctl_sfilt(struct bridge_softc *, void *);
300 static int bridge_ipf(void *, struct mbuf **, struct ifnet *, int);
301 static int bridge_ip_checkbasic(struct mbuf **mp);
302 # ifdef INET6
303 static int bridge_ip6_checkbasic(struct mbuf **mp);
304 # endif /* INET6 */
305 #endif /* BRIDGE_IPF */
306
307 struct bridge_control {
308 int (*bc_func)(struct bridge_softc *, void *);
309 int bc_argsize;
310 int bc_flags;
311 };
312
313 #define BC_F_COPYIN 0x01 /* copy arguments in */
314 #define BC_F_COPYOUT 0x02 /* copy arguments out */
315 #define BC_F_SUSER 0x04 /* do super-user check */
316 #define BC_F_XLATEIN 0x08 /* xlate arguments in */
317 #define BC_F_XLATEOUT 0x10 /* xlate arguments out */
318
319 static const struct bridge_control bridge_control_table[] = {
320 [BRDGADD] = {bridge_ioctl_add, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
321 [BRDGDEL] = {bridge_ioctl_del, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
322
323 [BRDGGIFFLGS] = {bridge_ioctl_gifflags, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_COPYOUT},
324 [BRDGSIFFLGS] = {bridge_ioctl_sifflags, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
325
326 [BRDGSCACHE] = {bridge_ioctl_scache, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
327 [BRDGGCACHE] = {bridge_ioctl_gcache, sizeof(struct ifbrparam), BC_F_COPYOUT},
328
329 [OBRDGGIFS] = {bridge_ioctl_gifs, sizeof(struct ifbifconf), BC_F_COPYIN|BC_F_COPYOUT},
330 [OBRDGRTS] = {bridge_ioctl_rts, sizeof(struct ifbaconf), BC_F_COPYIN|BC_F_COPYOUT},
331
332 [BRDGSADDR] = {bridge_ioctl_saddr, sizeof(struct ifbareq), BC_F_COPYIN|BC_F_SUSER},
333
334 [BRDGSTO] = {bridge_ioctl_sto, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
335 [BRDGGTO] = {bridge_ioctl_gto, sizeof(struct ifbrparam), BC_F_COPYOUT},
336
337 [BRDGDADDR] = {bridge_ioctl_daddr, sizeof(struct ifbareq), BC_F_COPYIN|BC_F_SUSER},
338
339 [BRDGFLUSH] = {bridge_ioctl_flush, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
340
341 [BRDGGPRI] = {bridge_ioctl_gpri, sizeof(struct ifbrparam), BC_F_COPYOUT},
342 [BRDGSPRI] = {bridge_ioctl_spri, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
343
344 [BRDGGHT] = {bridge_ioctl_ght, sizeof(struct ifbrparam), BC_F_COPYOUT},
345 [BRDGSHT] = {bridge_ioctl_sht, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
346
347 [BRDGGFD] = {bridge_ioctl_gfd, sizeof(struct ifbrparam), BC_F_COPYOUT},
348 [BRDGSFD] = {bridge_ioctl_sfd, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
349
350 [BRDGGMA] = {bridge_ioctl_gma, sizeof(struct ifbrparam), BC_F_COPYOUT},
351 [BRDGSMA] = {bridge_ioctl_sma, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
352
353 [BRDGSIFPRIO] = {bridge_ioctl_sifprio, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
354
355 [BRDGSIFCOST] = {bridge_ioctl_sifcost, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
356 #if defined(BRIDGE_IPF)
357 [BRDGGFILT] = {bridge_ioctl_gfilt, sizeof(struct ifbrparam), BC_F_COPYOUT},
358 [BRDGSFILT] = {bridge_ioctl_sfilt, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
359 #endif /* BRIDGE_IPF */
360 [BRDGGIFS] = {bridge_ioctl_gifs, sizeof(struct ifbifconf), BC_F_XLATEIN|BC_F_XLATEOUT},
361 [BRDGRTS] = {bridge_ioctl_rts, sizeof(struct ifbaconf), BC_F_XLATEIN|BC_F_XLATEOUT},
362 };
363
364 static const int bridge_control_table_size = __arraycount(bridge_control_table);
365
366 static struct if_clone bridge_cloner =
367 IF_CLONE_INITIALIZER("bridge", bridge_clone_create, bridge_clone_destroy);
368
369 /*
370 * bridgeattach:
371 *
372 * Pseudo-device attach routine.
373 */
374 void
375 bridgeattach(int n)
376 {
377
378 pool_init(&bridge_rtnode_pool, sizeof(struct bridge_rtnode),
379 0, 0, 0, "brtpl", NULL, IPL_NET);
380
381 bridge_psref_class = psref_class_create("bridge", IPL_SOFTNET);
382
383 if_clone_attach(&bridge_cloner);
384 }
385
386 /*
387 * bridge_clone_create:
388 *
389 * Create a new bridge instance.
390 */
391 static int
392 bridge_clone_create(struct if_clone *ifc, int unit)
393 {
394 struct bridge_softc *sc;
395 struct ifnet *ifp;
396 int error;
397
398 sc = kmem_zalloc(sizeof(*sc), KM_SLEEP);
399 ifp = &sc->sc_if;
400
401 sc->sc_brtmax = BRIDGE_RTABLE_MAX;
402 sc->sc_brttimeout = BRIDGE_RTABLE_TIMEOUT;
403 sc->sc_bridge_max_age = BSTP_DEFAULT_MAX_AGE;
404 sc->sc_bridge_hello_time = BSTP_DEFAULT_HELLO_TIME;
405 sc->sc_bridge_forward_delay = BSTP_DEFAULT_FORWARD_DELAY;
406 sc->sc_bridge_priority = BSTP_DEFAULT_BRIDGE_PRIORITY;
407 sc->sc_hold_time = BSTP_DEFAULT_HOLD_TIME;
408 sc->sc_filter_flags = 0;
409
410 /* Initialize our routing table. */
411 bridge_rtable_init(sc);
412
413 error = workqueue_create(&sc->sc_rtage_wq, "bridge_rtage",
414 bridge_rtage_work, sc, PRI_SOFTNET, IPL_SOFTNET, WQ_MPSAFE);
415 if (error)
416 panic("%s: workqueue_create %d\n", __func__, error);
417
418 callout_init(&sc->sc_brcallout, CALLOUT_MPSAFE);
419 callout_init(&sc->sc_bstpcallout, CALLOUT_MPSAFE);
420
421 mutex_init(&sc->sc_iflist_psref.bip_lock, MUTEX_DEFAULT, IPL_NONE);
422 PSLIST_INIT(&sc->sc_iflist_psref.bip_iflist);
423 sc->sc_iflist_psref.bip_psz = pserialize_create();
424
425 if_initname(ifp, ifc->ifc_name, unit);
426 ifp->if_softc = sc;
427 ifp->if_extflags = IFEF_NO_LINK_STATE_CHANGE;
428 #ifdef NET_MPSAFE
429 ifp->if_extflags |= IFEF_MPSAFE;
430 #endif
431 ifp->if_mtu = ETHERMTU;
432 ifp->if_ioctl = bridge_ioctl;
433 ifp->if_output = bridge_output;
434 ifp->if_start = bridge_start;
435 ifp->if_stop = bridge_stop;
436 ifp->if_init = bridge_init;
437 ifp->if_type = IFT_BRIDGE;
438 ifp->if_addrlen = 0;
439 ifp->if_dlt = DLT_EN10MB;
440 ifp->if_hdrlen = ETHER_HDR_LEN;
441
442 error = if_initialize(ifp);
443 if (error != 0) {
444 pserialize_destroy(sc->sc_iflist_psref.bip_psz);
445 mutex_destroy(&sc->sc_iflist_psref.bip_lock);
446 callout_destroy(&sc->sc_brcallout);
447 callout_destroy(&sc->sc_bstpcallout);
448 workqueue_destroy(sc->sc_rtage_wq);
449 bridge_rtable_fini(sc);
450 kmem_free(sc, sizeof(*sc));
451
452 return error;
453 }
454 if_register(ifp);
455
456 if_alloc_sadl(ifp);
457
458 return 0;
459 }
460
461 /*
462 * bridge_clone_destroy:
463 *
464 * Destroy a bridge instance.
465 */
466 static int
467 bridge_clone_destroy(struct ifnet *ifp)
468 {
469 struct bridge_softc *sc = ifp->if_softc;
470 struct bridge_iflist *bif;
471
472 if ((ifp->if_flags & IFF_RUNNING) != 0)
473 bridge_stop(ifp, 1);
474
475 BRIDGE_LOCK(sc);
476 for (;;) {
477 bif = PSLIST_WRITER_FIRST(&sc->sc_iflist_psref.bip_iflist, struct bridge_iflist,
478 bif_next);
479 if (bif == NULL)
480 break;
481 bridge_delete_member(sc, bif);
482 }
483 PSLIST_DESTROY(&sc->sc_iflist_psref.bip_iflist);
484 BRIDGE_UNLOCK(sc);
485
486 if_detach(ifp);
487
488 /* Tear down the routing table. */
489 bridge_rtable_fini(sc);
490
491 pserialize_destroy(sc->sc_iflist_psref.bip_psz);
492 mutex_destroy(&sc->sc_iflist_psref.bip_lock);
493 callout_destroy(&sc->sc_brcallout);
494 callout_destroy(&sc->sc_bstpcallout);
495 workqueue_destroy(sc->sc_rtage_wq);
496 kmem_free(sc, sizeof(*sc));
497
498 return 0;
499 }
500
501 /*
502 * bridge_ioctl:
503 *
504 * Handle a control request from the operator.
505 */
506 static int
507 bridge_ioctl(struct ifnet *ifp, u_long cmd, void *data)
508 {
509 struct bridge_softc *sc = ifp->if_softc;
510 struct lwp *l = curlwp; /* XXX */
511 union {
512 struct ifbreq ifbreq;
513 struct ifbifconf ifbifconf;
514 struct ifbareq ifbareq;
515 struct ifbaconf ifbaconf;
516 struct ifbrparam ifbrparam;
517 } args;
518 struct ifdrv *ifd = (struct ifdrv *) data;
519 const struct bridge_control *bc = NULL; /* XXXGCC */
520 int s, error = 0;
521
522 /* Authorize command before calling splsoftnet(). */
523 switch (cmd) {
524 case SIOCGDRVSPEC:
525 case SIOCSDRVSPEC:
526 if (ifd->ifd_cmd >= bridge_control_table_size
527 || (bc = &bridge_control_table[ifd->ifd_cmd]) == NULL) {
528 error = EINVAL;
529 return error;
530 }
531
532 /* We only care about BC_F_SUSER at this point. */
533 if ((bc->bc_flags & BC_F_SUSER) == 0)
534 break;
535
536 error = kauth_authorize_network(l->l_cred,
537 KAUTH_NETWORK_INTERFACE_BRIDGE,
538 cmd == SIOCGDRVSPEC ?
539 KAUTH_REQ_NETWORK_INTERFACE_BRIDGE_GETPRIV :
540 KAUTH_REQ_NETWORK_INTERFACE_BRIDGE_SETPRIV,
541 ifd, NULL, NULL);
542 if (error)
543 return error;
544
545 break;
546 }
547
548 s = splsoftnet();
549
550 switch (cmd) {
551 case SIOCGDRVSPEC:
552 case SIOCSDRVSPEC:
553 KASSERT(bc != NULL);
554 if (cmd == SIOCGDRVSPEC &&
555 (bc->bc_flags & (BC_F_COPYOUT|BC_F_XLATEOUT)) == 0) {
556 error = EINVAL;
557 break;
558 }
559 else if (cmd == SIOCSDRVSPEC &&
560 (bc->bc_flags & (BC_F_COPYOUT|BC_F_XLATEOUT)) != 0) {
561 error = EINVAL;
562 break;
563 }
564
565 /* BC_F_SUSER is checked above, before splsoftnet(). */
566
567 if ((bc->bc_flags & (BC_F_XLATEIN|BC_F_XLATEOUT)) == 0
568 && (ifd->ifd_len != bc->bc_argsize
569 || ifd->ifd_len > sizeof(args))) {
570 error = EINVAL;
571 break;
572 }
573
574 memset(&args, 0, sizeof(args));
575 if (bc->bc_flags & BC_F_COPYIN) {
576 error = copyin(ifd->ifd_data, &args, ifd->ifd_len);
577 if (error)
578 break;
579 } else if (bc->bc_flags & BC_F_XLATEIN) {
580 args.ifbifconf.ifbic_len = ifd->ifd_len;
581 args.ifbifconf.ifbic_buf = ifd->ifd_data;
582 }
583
584 error = (*bc->bc_func)(sc, &args);
585 if (error)
586 break;
587
588 if (bc->bc_flags & BC_F_COPYOUT) {
589 error = copyout(&args, ifd->ifd_data, ifd->ifd_len);
590 } else if (bc->bc_flags & BC_F_XLATEOUT) {
591 ifd->ifd_len = args.ifbifconf.ifbic_len;
592 ifd->ifd_data = args.ifbifconf.ifbic_buf;
593 }
594 break;
595
596 case SIOCSIFFLAGS:
597 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
598 break;
599 switch (ifp->if_flags & (IFF_UP|IFF_RUNNING)) {
600 case IFF_RUNNING:
601 /*
602 * If interface is marked down and it is running,
603 * then stop and disable it.
604 */
605 (*ifp->if_stop)(ifp, 1);
606 break;
607 case IFF_UP:
608 /*
609 * If interface is marked up and it is stopped, then
610 * start it.
611 */
612 error = (*ifp->if_init)(ifp);
613 break;
614 default:
615 break;
616 }
617 break;
618
619 case SIOCSIFMTU:
620 if ((error = ifioctl_common(ifp, cmd, data)) == ENETRESET)
621 error = 0;
622 break;
623
624 default:
625 error = ifioctl_common(ifp, cmd, data);
626 break;
627 }
628
629 splx(s);
630
631 return error;
632 }
633
634 /*
635 * bridge_lookup_member:
636 *
637 * Lookup a bridge member interface.
638 */
639 static struct bridge_iflist *
640 bridge_lookup_member(struct bridge_softc *sc, const char *name, struct psref *psref)
641 {
642 struct bridge_iflist *bif;
643 struct ifnet *ifp;
644 int s;
645
646 BRIDGE_PSZ_RENTER(s);
647
648 BRIDGE_IFLIST_READER_FOREACH(bif, sc) {
649 ifp = bif->bif_ifp;
650 if (strcmp(ifp->if_xname, name) == 0)
651 break;
652 }
653 if (bif != NULL)
654 bridge_acquire_member(sc, bif, psref);
655
656 BRIDGE_PSZ_REXIT(s);
657
658 return bif;
659 }
660
661 /*
662 * bridge_lookup_member_if:
663 *
664 * Lookup a bridge member interface by ifnet*.
665 */
666 static struct bridge_iflist *
667 bridge_lookup_member_if(struct bridge_softc *sc, struct ifnet *member_ifp,
668 struct psref *psref)
669 {
670 struct bridge_iflist *bif;
671 int s;
672
673 BRIDGE_PSZ_RENTER(s);
674
675 bif = member_ifp->if_bridgeif;
676 if (bif != NULL) {
677 psref_acquire(psref, &bif->bif_psref,
678 bridge_psref_class);
679 }
680
681 BRIDGE_PSZ_REXIT(s);
682
683 return bif;
684 }
685
686 static void
687 bridge_acquire_member(struct bridge_softc *sc, struct bridge_iflist *bif,
688 struct psref *psref)
689 {
690
691 psref_acquire(psref, &bif->bif_psref, bridge_psref_class);
692 }
693
694 /*
695 * bridge_release_member:
696 *
697 * Release the specified member interface.
698 */
699 static void
700 bridge_release_member(struct bridge_softc *sc, struct bridge_iflist *bif,
701 struct psref *psref)
702 {
703
704 psref_release(psref, &bif->bif_psref, bridge_psref_class);
705 }
706
707 /*
708 * bridge_delete_member:
709 *
710 * Delete the specified member interface.
711 */
712 static void
713 bridge_delete_member(struct bridge_softc *sc, struct bridge_iflist *bif)
714 {
715 struct ifnet *ifs = bif->bif_ifp;
716
717 KASSERT(BRIDGE_LOCKED(sc));
718
719 ifs->_if_input = ether_input;
720 ifs->if_bridge = NULL;
721 ifs->if_bridgeif = NULL;
722
723 PSLIST_WRITER_REMOVE(bif, bif_next);
724 BRIDGE_PSZ_PERFORM(sc);
725 BRIDGE_UNLOCK(sc);
726
727 psref_target_destroy(&bif->bif_psref, bridge_psref_class);
728
729 PSLIST_ENTRY_DESTROY(bif, bif_next);
730 kmem_free(bif, sizeof(*bif));
731
732 BRIDGE_LOCK(sc);
733 }
734
735 static int
736 bridge_ioctl_add(struct bridge_softc *sc, void *arg)
737 {
738 struct ifbreq *req = arg;
739 struct bridge_iflist *bif = NULL;
740 struct ifnet *ifs;
741 int error = 0;
742 struct psref psref;
743
744 ifs = if_get(req->ifbr_ifsname, &psref);
745 if (ifs == NULL)
746 return ENOENT;
747
748 if (ifs->if_bridge == sc) {
749 error = EEXIST;
750 goto out;
751 }
752
753 if (ifs->if_bridge != NULL) {
754 error = EBUSY;
755 goto out;
756 }
757
758 if (ifs->_if_input != ether_input) {
759 error = EINVAL;
760 goto out;
761 }
762
763 /* FIXME: doesn't work with non-IFF_SIMPLEX interfaces */
764 if ((ifs->if_flags & IFF_SIMPLEX) == 0) {
765 error = EINVAL;
766 goto out;
767 }
768
769 bif = kmem_alloc(sizeof(*bif), KM_SLEEP);
770
771 switch (ifs->if_type) {
772 case IFT_ETHER:
773 if (sc->sc_if.if_mtu != ifs->if_mtu) {
774 error = EINVAL;
775 goto out;
776 }
777 /* FALLTHROUGH */
778 case IFT_L2TP:
779 IFNET_LOCK(ifs);
780 error = ether_enable_vlan_mtu(ifs);
781 IFNET_UNLOCK(ifs);
782 if (error > 0)
783 goto out;
784 /*
785 * Place the interface into promiscuous mode.
786 */
787 error = ifpromisc(ifs, 1);
788 if (error)
789 goto out;
790 break;
791 default:
792 error = EINVAL;
793 goto out;
794 }
795
796 bif->bif_ifp = ifs;
797 bif->bif_flags = IFBIF_LEARNING | IFBIF_DISCOVER;
798 bif->bif_priority = BSTP_DEFAULT_PORT_PRIORITY;
799 bif->bif_path_cost = BSTP_DEFAULT_PATH_COST;
800 PSLIST_ENTRY_INIT(bif, bif_next);
801 psref_target_init(&bif->bif_psref, bridge_psref_class);
802
803 BRIDGE_LOCK(sc);
804
805 ifs->if_bridge = sc;
806 ifs->if_bridgeif = bif;
807 PSLIST_WRITER_INSERT_HEAD(&sc->sc_iflist_psref.bip_iflist, bif, bif_next);
808 ifs->_if_input = bridge_input;
809
810 BRIDGE_UNLOCK(sc);
811
812 if (sc->sc_if.if_flags & IFF_RUNNING)
813 bstp_initialization(sc);
814 else
815 bstp_stop(sc);
816
817 out:
818 if_put(ifs, &psref);
819 if (error) {
820 if (bif != NULL)
821 kmem_free(bif, sizeof(*bif));
822 }
823 return error;
824 }
825
826 static int
827 bridge_ioctl_del(struct bridge_softc *sc, void *arg)
828 {
829 struct ifbreq *req = arg;
830 const char *name = req->ifbr_ifsname;
831 struct bridge_iflist *bif;
832 struct ifnet *ifs;
833
834 BRIDGE_LOCK(sc);
835
836 /*
837 * Don't use bridge_lookup_member. We want to get a member
838 * with bif_refs == 0.
839 */
840 BRIDGE_IFLIST_WRITER_FOREACH(bif, sc) {
841 ifs = bif->bif_ifp;
842 if (strcmp(ifs->if_xname, name) == 0)
843 break;
844 }
845
846 if (bif == NULL) {
847 BRIDGE_UNLOCK(sc);
848 return ENOENT;
849 }
850
851 bridge_delete_member(sc, bif);
852
853 BRIDGE_UNLOCK(sc);
854
855 switch (ifs->if_type) {
856 case IFT_ETHER:
857 case IFT_L2TP:
858 /*
859 * Take the interface out of promiscuous mode.
860 * Don't call it with holding a spin lock.
861 */
862 (void) ifpromisc(ifs, 0);
863 IFNET_LOCK(ifs);
864 (void) ether_disable_vlan_mtu(ifs);
865 IFNET_UNLOCK(ifs);
866 break;
867 default:
868 #ifdef DIAGNOSTIC
869 panic("bridge_delete_member: impossible");
870 #endif
871 break;
872 }
873
874 bridge_rtdelete(sc, ifs);
875
876 if (sc->sc_if.if_flags & IFF_RUNNING)
877 bstp_initialization(sc);
878
879 return 0;
880 }
881
882 static int
883 bridge_ioctl_gifflags(struct bridge_softc *sc, void *arg)
884 {
885 struct ifbreq *req = arg;
886 struct bridge_iflist *bif;
887 struct psref psref;
888
889 bif = bridge_lookup_member(sc, req->ifbr_ifsname, &psref);
890 if (bif == NULL)
891 return ENOENT;
892
893 req->ifbr_ifsflags = bif->bif_flags;
894 req->ifbr_state = bif->bif_state;
895 req->ifbr_priority = bif->bif_priority;
896 req->ifbr_path_cost = bif->bif_path_cost;
897 req->ifbr_portno = bif->bif_ifp->if_index & 0xff;
898
899 bridge_release_member(sc, bif, &psref);
900
901 return 0;
902 }
903
904 static int
905 bridge_ioctl_sifflags(struct bridge_softc *sc, void *arg)
906 {
907 struct ifbreq *req = arg;
908 struct bridge_iflist *bif;
909 struct psref psref;
910
911 bif = bridge_lookup_member(sc, req->ifbr_ifsname, &psref);
912 if (bif == NULL)
913 return ENOENT;
914
915 if (req->ifbr_ifsflags & IFBIF_STP) {
916 switch (bif->bif_ifp->if_type) {
917 case IFT_ETHER:
918 case IFT_L2TP:
919 /* These can do spanning tree. */
920 break;
921
922 default:
923 /* Nothing else can. */
924 bridge_release_member(sc, bif, &psref);
925 return EINVAL;
926 }
927 }
928
929 bif->bif_flags = req->ifbr_ifsflags;
930
931 bridge_release_member(sc, bif, &psref);
932
933 if (sc->sc_if.if_flags & IFF_RUNNING)
934 bstp_initialization(sc);
935
936 return 0;
937 }
938
939 static int
940 bridge_ioctl_scache(struct bridge_softc *sc, void *arg)
941 {
942 struct ifbrparam *param = arg;
943
944 sc->sc_brtmax = param->ifbrp_csize;
945 bridge_rttrim(sc);
946
947 return 0;
948 }
949
950 static int
951 bridge_ioctl_gcache(struct bridge_softc *sc, void *arg)
952 {
953 struct ifbrparam *param = arg;
954
955 param->ifbrp_csize = sc->sc_brtmax;
956
957 return 0;
958 }
959
960 static int
961 bridge_ioctl_gifs(struct bridge_softc *sc, void *arg)
962 {
963 struct ifbifconf *bifc = arg;
964 struct bridge_iflist *bif;
965 struct ifbreq *breqs;
966 int i, count, error = 0;
967
968 retry:
969 BRIDGE_LOCK(sc);
970 count = 0;
971 BRIDGE_IFLIST_WRITER_FOREACH(bif, sc)
972 count++;
973 BRIDGE_UNLOCK(sc);
974
975 if (count == 0) {
976 bifc->ifbic_len = 0;
977 return 0;
978 }
979
980 if (bifc->ifbic_len == 0 || bifc->ifbic_len < (sizeof(*breqs) * count)) {
981 /* Tell that a larger buffer is needed */
982 bifc->ifbic_len = sizeof(*breqs) * count;
983 return 0;
984 }
985
986 breqs = kmem_alloc(sizeof(*breqs) * count, KM_SLEEP);
987
988 BRIDGE_LOCK(sc);
989
990 i = 0;
991 BRIDGE_IFLIST_WRITER_FOREACH(bif, sc)
992 i++;
993 if (i > count) {
994 /*
995 * The number of members has been increased.
996 * We need more memory!
997 */
998 BRIDGE_UNLOCK(sc);
999 kmem_free(breqs, sizeof(*breqs) * count);
1000 goto retry;
1001 }
1002
1003 i = 0;
1004 BRIDGE_IFLIST_WRITER_FOREACH(bif, sc) {
1005 struct ifbreq *breq = &breqs[i++];
1006 memset(breq, 0, sizeof(*breq));
1007
1008 strlcpy(breq->ifbr_ifsname, bif->bif_ifp->if_xname,
1009 sizeof(breq->ifbr_ifsname));
1010 breq->ifbr_ifsflags = bif->bif_flags;
1011 breq->ifbr_state = bif->bif_state;
1012 breq->ifbr_priority = bif->bif_priority;
1013 breq->ifbr_path_cost = bif->bif_path_cost;
1014 breq->ifbr_portno = bif->bif_ifp->if_index & 0xff;
1015 }
1016
1017 /* Don't call copyout with holding the mutex */
1018 BRIDGE_UNLOCK(sc);
1019
1020 for (i = 0; i < count; i++) {
1021 error = copyout(&breqs[i], bifc->ifbic_req + i, sizeof(*breqs));
1022 if (error)
1023 break;
1024 }
1025 bifc->ifbic_len = sizeof(*breqs) * i;
1026
1027 kmem_free(breqs, sizeof(*breqs) * count);
1028
1029 return error;
1030 }
1031
1032 static int
1033 bridge_ioctl_rts(struct bridge_softc *sc, void *arg)
1034 {
1035 struct ifbaconf *bac = arg;
1036 struct bridge_rtnode *brt;
1037 struct ifbareq bareq;
1038 int count = 0, error = 0, len;
1039
1040 if (bac->ifbac_len == 0)
1041 return 0;
1042
1043 BRIDGE_RT_LOCK(sc);
1044
1045 len = bac->ifbac_len;
1046 LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) {
1047 if (len < sizeof(bareq))
1048 goto out;
1049 memset(&bareq, 0, sizeof(bareq));
1050 strlcpy(bareq.ifba_ifsname, brt->brt_ifp->if_xname,
1051 sizeof(bareq.ifba_ifsname));
1052 memcpy(bareq.ifba_dst, brt->brt_addr, sizeof(brt->brt_addr));
1053 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
1054 bareq.ifba_expire = brt->brt_expire - time_uptime;
1055 } else
1056 bareq.ifba_expire = 0;
1057 bareq.ifba_flags = brt->brt_flags;
1058
1059 error = copyout(&bareq, bac->ifbac_req + count, sizeof(bareq));
1060 if (error)
1061 goto out;
1062 count++;
1063 len -= sizeof(bareq);
1064 }
1065 out:
1066 BRIDGE_RT_UNLOCK(sc);
1067
1068 bac->ifbac_len = sizeof(bareq) * count;
1069 return error;
1070 }
1071
1072 static int
1073 bridge_ioctl_saddr(struct bridge_softc *sc, void *arg)
1074 {
1075 struct ifbareq *req = arg;
1076 struct bridge_iflist *bif;
1077 int error;
1078 struct psref psref;
1079
1080 bif = bridge_lookup_member(sc, req->ifba_ifsname, &psref);
1081 if (bif == NULL)
1082 return ENOENT;
1083
1084 error = bridge_rtupdate(sc, req->ifba_dst, bif->bif_ifp, 1,
1085 req->ifba_flags);
1086
1087 bridge_release_member(sc, bif, &psref);
1088
1089 return error;
1090 }
1091
1092 static int
1093 bridge_ioctl_sto(struct bridge_softc *sc, void *arg)
1094 {
1095 struct ifbrparam *param = arg;
1096
1097 sc->sc_brttimeout = param->ifbrp_ctime;
1098
1099 return 0;
1100 }
1101
1102 static int
1103 bridge_ioctl_gto(struct bridge_softc *sc, void *arg)
1104 {
1105 struct ifbrparam *param = arg;
1106
1107 param->ifbrp_ctime = sc->sc_brttimeout;
1108
1109 return 0;
1110 }
1111
1112 static int
1113 bridge_ioctl_daddr(struct bridge_softc *sc, void *arg)
1114 {
1115 struct ifbareq *req = arg;
1116
1117 return (bridge_rtdaddr(sc, req->ifba_dst));
1118 }
1119
1120 static int
1121 bridge_ioctl_flush(struct bridge_softc *sc, void *arg)
1122 {
1123 struct ifbreq *req = arg;
1124
1125 bridge_rtflush(sc, req->ifbr_ifsflags);
1126
1127 return 0;
1128 }
1129
1130 static int
1131 bridge_ioctl_gpri(struct bridge_softc *sc, void *arg)
1132 {
1133 struct ifbrparam *param = arg;
1134
1135 param->ifbrp_prio = sc->sc_bridge_priority;
1136
1137 return 0;
1138 }
1139
1140 static int
1141 bridge_ioctl_spri(struct bridge_softc *sc, void *arg)
1142 {
1143 struct ifbrparam *param = arg;
1144
1145 sc->sc_bridge_priority = param->ifbrp_prio;
1146
1147 if (sc->sc_if.if_flags & IFF_RUNNING)
1148 bstp_initialization(sc);
1149
1150 return 0;
1151 }
1152
1153 static int
1154 bridge_ioctl_ght(struct bridge_softc *sc, void *arg)
1155 {
1156 struct ifbrparam *param = arg;
1157
1158 param->ifbrp_hellotime = sc->sc_bridge_hello_time >> 8;
1159
1160 return 0;
1161 }
1162
1163 static int
1164 bridge_ioctl_sht(struct bridge_softc *sc, void *arg)
1165 {
1166 struct ifbrparam *param = arg;
1167
1168 if (param->ifbrp_hellotime == 0)
1169 return EINVAL;
1170 sc->sc_bridge_hello_time = param->ifbrp_hellotime << 8;
1171
1172 if (sc->sc_if.if_flags & IFF_RUNNING)
1173 bstp_initialization(sc);
1174
1175 return 0;
1176 }
1177
1178 static int
1179 bridge_ioctl_gfd(struct bridge_softc *sc, void *arg)
1180 {
1181 struct ifbrparam *param = arg;
1182
1183 param->ifbrp_fwddelay = sc->sc_bridge_forward_delay >> 8;
1184
1185 return 0;
1186 }
1187
1188 static int
1189 bridge_ioctl_sfd(struct bridge_softc *sc, void *arg)
1190 {
1191 struct ifbrparam *param = arg;
1192
1193 if (param->ifbrp_fwddelay == 0)
1194 return EINVAL;
1195 sc->sc_bridge_forward_delay = param->ifbrp_fwddelay << 8;
1196
1197 if (sc->sc_if.if_flags & IFF_RUNNING)
1198 bstp_initialization(sc);
1199
1200 return 0;
1201 }
1202
1203 static int
1204 bridge_ioctl_gma(struct bridge_softc *sc, void *arg)
1205 {
1206 struct ifbrparam *param = arg;
1207
1208 param->ifbrp_maxage = sc->sc_bridge_max_age >> 8;
1209
1210 return 0;
1211 }
1212
1213 static int
1214 bridge_ioctl_sma(struct bridge_softc *sc, void *arg)
1215 {
1216 struct ifbrparam *param = arg;
1217
1218 if (param->ifbrp_maxage == 0)
1219 return EINVAL;
1220 sc->sc_bridge_max_age = param->ifbrp_maxage << 8;
1221
1222 if (sc->sc_if.if_flags & IFF_RUNNING)
1223 bstp_initialization(sc);
1224
1225 return 0;
1226 }
1227
1228 static int
1229 bridge_ioctl_sifprio(struct bridge_softc *sc, void *arg)
1230 {
1231 struct ifbreq *req = arg;
1232 struct bridge_iflist *bif;
1233 struct psref psref;
1234
1235 bif = bridge_lookup_member(sc, req->ifbr_ifsname, &psref);
1236 if (bif == NULL)
1237 return ENOENT;
1238
1239 bif->bif_priority = req->ifbr_priority;
1240
1241 if (sc->sc_if.if_flags & IFF_RUNNING)
1242 bstp_initialization(sc);
1243
1244 bridge_release_member(sc, bif, &psref);
1245
1246 return 0;
1247 }
1248
1249 #if defined(BRIDGE_IPF)
1250 static int
1251 bridge_ioctl_gfilt(struct bridge_softc *sc, void *arg)
1252 {
1253 struct ifbrparam *param = arg;
1254
1255 param->ifbrp_filter = sc->sc_filter_flags;
1256
1257 return 0;
1258 }
1259
1260 static int
1261 bridge_ioctl_sfilt(struct bridge_softc *sc, void *arg)
1262 {
1263 struct ifbrparam *param = arg;
1264 uint32_t nflags, oflags;
1265
1266 if (param->ifbrp_filter & ~IFBF_FILT_MASK)
1267 return EINVAL;
1268
1269 nflags = param->ifbrp_filter;
1270 oflags = sc->sc_filter_flags;
1271
1272 if ((nflags & IFBF_FILT_USEIPF) && !(oflags & IFBF_FILT_USEIPF)) {
1273 pfil_add_hook((void *)bridge_ipf, NULL, PFIL_IN|PFIL_OUT,
1274 sc->sc_if.if_pfil);
1275 }
1276 if (!(nflags & IFBF_FILT_USEIPF) && (oflags & IFBF_FILT_USEIPF)) {
1277 pfil_remove_hook((void *)bridge_ipf, NULL, PFIL_IN|PFIL_OUT,
1278 sc->sc_if.if_pfil);
1279 }
1280
1281 sc->sc_filter_flags = nflags;
1282
1283 return 0;
1284 }
1285 #endif /* BRIDGE_IPF */
1286
1287 static int
1288 bridge_ioctl_sifcost(struct bridge_softc *sc, void *arg)
1289 {
1290 struct ifbreq *req = arg;
1291 struct bridge_iflist *bif;
1292 struct psref psref;
1293
1294 bif = bridge_lookup_member(sc, req->ifbr_ifsname, &psref);
1295 if (bif == NULL)
1296 return ENOENT;
1297
1298 bif->bif_path_cost = req->ifbr_path_cost;
1299
1300 if (sc->sc_if.if_flags & IFF_RUNNING)
1301 bstp_initialization(sc);
1302
1303 bridge_release_member(sc, bif, &psref);
1304
1305 return 0;
1306 }
1307
1308 /*
1309 * bridge_ifdetach:
1310 *
1311 * Detach an interface from a bridge. Called when a member
1312 * interface is detaching.
1313 */
1314 void
1315 bridge_ifdetach(struct ifnet *ifp)
1316 {
1317 struct bridge_softc *sc = ifp->if_bridge;
1318 struct ifbreq breq;
1319
1320 /* ioctl_lock should prevent this from happening */
1321 KASSERT(sc != NULL);
1322
1323 memset(&breq, 0, sizeof(breq));
1324 strlcpy(breq.ifbr_ifsname, ifp->if_xname, sizeof(breq.ifbr_ifsname));
1325
1326 (void) bridge_ioctl_del(sc, &breq);
1327 }
1328
1329 /*
1330 * bridge_init:
1331 *
1332 * Initialize a bridge interface.
1333 */
1334 static int
1335 bridge_init(struct ifnet *ifp)
1336 {
1337 struct bridge_softc *sc = ifp->if_softc;
1338
1339 KASSERT((ifp->if_flags & IFF_RUNNING) == 0);
1340
1341 callout_reset(&sc->sc_brcallout, bridge_rtable_prune_period * hz,
1342 bridge_timer, sc);
1343 bstp_initialization(sc);
1344
1345 ifp->if_flags |= IFF_RUNNING;
1346 return 0;
1347 }
1348
1349 /*
1350 * bridge_stop:
1351 *
1352 * Stop the bridge interface.
1353 */
1354 static void
1355 bridge_stop(struct ifnet *ifp, int disable)
1356 {
1357 struct bridge_softc *sc = ifp->if_softc;
1358
1359 KASSERT((ifp->if_flags & IFF_RUNNING) != 0);
1360 ifp->if_flags &= ~IFF_RUNNING;
1361
1362 callout_halt(&sc->sc_brcallout, NULL);
1363 workqueue_wait(sc->sc_rtage_wq, &sc->sc_rtage_wk);
1364 bstp_stop(sc);
1365 bridge_rtflush(sc, IFBF_FLUSHDYN);
1366 }
1367
1368 /*
1369 * bridge_enqueue:
1370 *
1371 * Enqueue a packet on a bridge member interface.
1372 */
1373 void
1374 bridge_enqueue(struct bridge_softc *sc, struct ifnet *dst_ifp, struct mbuf *m,
1375 int runfilt)
1376 {
1377 int len, error;
1378 short mflags;
1379
1380 /*
1381 * Clear any in-bound checksum flags for this packet.
1382 */
1383 m->m_pkthdr.csum_flags = 0;
1384
1385 if (runfilt) {
1386 if (pfil_run_hooks(sc->sc_if.if_pfil, &m,
1387 dst_ifp, PFIL_OUT) != 0) {
1388 if (m != NULL)
1389 m_freem(m);
1390 return;
1391 }
1392 if (m == NULL)
1393 return;
1394 }
1395
1396 #ifdef ALTQ
1397 KERNEL_LOCK(1, NULL);
1398 /*
1399 * If ALTQ is enabled on the member interface, do
1400 * classification; the queueing discipline might
1401 * not require classification, but might require
1402 * the address family/header pointer in the pktattr.
1403 */
1404 if (ALTQ_IS_ENABLED(&dst_ifp->if_snd)) {
1405 /* XXX IFT_ETHER */
1406 altq_etherclassify(&dst_ifp->if_snd, m);
1407 }
1408 KERNEL_UNLOCK_ONE(NULL);
1409 #endif /* ALTQ */
1410
1411 len = m->m_pkthdr.len;
1412 mflags = m->m_flags;
1413
1414 error = if_transmit_lock(dst_ifp, m);
1415 if (error) {
1416 /* mbuf is already freed */
1417 sc->sc_if.if_oerrors++;
1418 return;
1419 }
1420
1421 sc->sc_if.if_opackets++;
1422 sc->sc_if.if_obytes += len;
1423 if (mflags & M_MCAST)
1424 sc->sc_if.if_omcasts++;
1425 }
1426
1427 /*
1428 * bridge_output:
1429 *
1430 * Send output from a bridge member interface. This
1431 * performs the bridging function for locally originated
1432 * packets.
1433 *
1434 * The mbuf has the Ethernet header already attached. We must
1435 * enqueue or free the mbuf before returning.
1436 */
1437 int
1438 bridge_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *sa,
1439 const struct rtentry *rt)
1440 {
1441 struct ether_header *eh;
1442 struct ifnet *dst_if;
1443 struct bridge_softc *sc;
1444 int s;
1445
1446 /*
1447 * bridge_output() is called from ether_output(), furthermore
1448 * ifp argument doesn't point to bridge(4). So, don't assert
1449 * IFEF_MPSAFE here.
1450 */
1451
1452 if (m->m_len < ETHER_HDR_LEN) {
1453 m = m_pullup(m, ETHER_HDR_LEN);
1454 if (m == NULL)
1455 return 0;
1456 }
1457
1458 eh = mtod(m, struct ether_header *);
1459 sc = ifp->if_bridge;
1460
1461 if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
1462 if (memcmp(etherbroadcastaddr,
1463 eh->ether_dhost, ETHER_ADDR_LEN) == 0)
1464 m->m_flags |= M_BCAST;
1465 else
1466 m->m_flags |= M_MCAST;
1467 }
1468
1469 /*
1470 * If bridge is down, but the original output interface is up,
1471 * go ahead and send out that interface. Otherwise, the packet
1472 * is dropped below.
1473 */
1474 if (__predict_false(sc == NULL) ||
1475 (sc->sc_if.if_flags & IFF_RUNNING) == 0) {
1476 dst_if = ifp;
1477 goto sendunicast;
1478 }
1479
1480 /*
1481 * If the packet is a multicast, or we don't know a better way to
1482 * get there, send to all interfaces.
1483 */
1484 if ((m->m_flags & (M_MCAST | M_BCAST)) != 0)
1485 dst_if = NULL;
1486 else
1487 dst_if = bridge_rtlookup(sc, eh->ether_dhost);
1488 if (dst_if == NULL) {
1489 /* XXX Should call bridge_broadcast, but there are locking
1490 * issues which need resolving first. */
1491 struct bridge_iflist *bif;
1492 struct mbuf *mc;
1493 bool used = false;
1494
1495 BRIDGE_PSZ_RENTER(s);
1496 BRIDGE_IFLIST_READER_FOREACH(bif, sc) {
1497 struct psref psref;
1498
1499 bridge_acquire_member(sc, bif, &psref);
1500 BRIDGE_PSZ_REXIT(s);
1501
1502 dst_if = bif->bif_ifp;
1503 if ((dst_if->if_flags & IFF_RUNNING) == 0)
1504 goto next;
1505
1506 /*
1507 * If this is not the original output interface,
1508 * and the interface is participating in spanning
1509 * tree, make sure the port is in a state that
1510 * allows forwarding.
1511 */
1512 if (dst_if != ifp &&
1513 (bif->bif_flags & IFBIF_STP) != 0) {
1514 switch (bif->bif_state) {
1515 case BSTP_IFSTATE_BLOCKING:
1516 case BSTP_IFSTATE_LISTENING:
1517 case BSTP_IFSTATE_DISABLED:
1518 goto next;
1519 }
1520 }
1521
1522 if (PSLIST_READER_NEXT(bif, struct bridge_iflist,
1523 bif_next) == NULL &&
1524 ((m->m_flags & (M_MCAST | M_BCAST)) == 0 ||
1525 dst_if == ifp))
1526 {
1527 used = true;
1528 mc = m;
1529 } else {
1530 mc = m_copym(m, 0, M_COPYALL, M_NOWAIT);
1531 if (mc == NULL) {
1532 sc->sc_if.if_oerrors++;
1533 goto next;
1534 }
1535 }
1536
1537 bridge_enqueue(sc, dst_if, mc, 0);
1538
1539 if ((m->m_flags & (M_MCAST | M_BCAST)) != 0 &&
1540 dst_if != ifp)
1541 {
1542 if (PSLIST_READER_NEXT(bif,
1543 struct bridge_iflist, bif_next) == NULL)
1544 {
1545 used = true;
1546 mc = m;
1547 } else {
1548 mc = m_copym(m, 0, M_COPYALL,
1549 M_DONTWAIT);
1550 if (mc == NULL) {
1551 sc->sc_if.if_oerrors++;
1552 goto next;
1553 }
1554 }
1555
1556 m_set_rcvif(mc, dst_if);
1557 mc->m_flags &= ~M_PROMISC;
1558
1559 #ifndef NET_MPSAFE
1560 s = splsoftnet();
1561 #endif
1562 ether_input(dst_if, mc);
1563 #ifndef NET_MPSAFE
1564 splx(s);
1565 #endif
1566 }
1567
1568 next:
1569 BRIDGE_PSZ_RENTER(s);
1570 bridge_release_member(sc, bif, &psref);
1571
1572 /* Guarantee we don't re-enter the loop as we already
1573 * decided we're at the end. */
1574 if (used)
1575 break;
1576 }
1577 BRIDGE_PSZ_REXIT(s);
1578
1579 if (!used)
1580 m_freem(m);
1581 return 0;
1582 }
1583
1584 sendunicast:
1585 /*
1586 * XXX Spanning tree consideration here?
1587 */
1588
1589 if ((dst_if->if_flags & IFF_RUNNING) == 0) {
1590 m_freem(m);
1591 return 0;
1592 }
1593
1594 bridge_enqueue(sc, dst_if, m, 0);
1595
1596 return 0;
1597 }
1598
1599 /*
1600 * bridge_start:
1601 *
1602 * Start output on a bridge.
1603 *
1604 * NOTE: This routine should never be called in this implementation.
1605 */
1606 static void
1607 bridge_start(struct ifnet *ifp)
1608 {
1609
1610 printf("%s: bridge_start() called\n", ifp->if_xname);
1611 }
1612
1613 /*
1614 * bridge_forward:
1615 *
1616 * The forwarding function of the bridge.
1617 */
1618 static void
1619 bridge_forward(struct bridge_softc *sc, struct mbuf *m)
1620 {
1621 struct bridge_iflist *bif;
1622 struct ifnet *src_if, *dst_if;
1623 struct ether_header *eh;
1624 struct psref psref;
1625 struct psref psref_src;
1626 DECLARE_LOCK_VARIABLE;
1627
1628 if ((sc->sc_if.if_flags & IFF_RUNNING) == 0)
1629 return;
1630
1631 src_if = m_get_rcvif_psref(m, &psref_src);
1632 if (src_if == NULL) {
1633 /* Interface is being destroyed? */
1634 m_freem(m);
1635 goto out;
1636 }
1637
1638 sc->sc_if.if_ipackets++;
1639 sc->sc_if.if_ibytes += m->m_pkthdr.len;
1640
1641 /*
1642 * Look up the bridge_iflist.
1643 */
1644 bif = bridge_lookup_member_if(sc, src_if, &psref);
1645 if (bif == NULL) {
1646 /* Interface is not a bridge member (anymore?) */
1647 m_freem(m);
1648 goto out;
1649 }
1650
1651 if (bif->bif_flags & IFBIF_STP) {
1652 switch (bif->bif_state) {
1653 case BSTP_IFSTATE_BLOCKING:
1654 case BSTP_IFSTATE_LISTENING:
1655 case BSTP_IFSTATE_DISABLED:
1656 m_freem(m);
1657 bridge_release_member(sc, bif, &psref);
1658 goto out;
1659 }
1660 }
1661
1662 eh = mtod(m, struct ether_header *);
1663
1664 /*
1665 * If the interface is learning, and the source
1666 * address is valid and not multicast, record
1667 * the address.
1668 */
1669 if ((bif->bif_flags & IFBIF_LEARNING) != 0 &&
1670 ETHER_IS_MULTICAST(eh->ether_shost) == 0 &&
1671 (eh->ether_shost[0] == 0 &&
1672 eh->ether_shost[1] == 0 &&
1673 eh->ether_shost[2] == 0 &&
1674 eh->ether_shost[3] == 0 &&
1675 eh->ether_shost[4] == 0 &&
1676 eh->ether_shost[5] == 0) == 0) {
1677 (void) bridge_rtupdate(sc, eh->ether_shost,
1678 src_if, 0, IFBAF_DYNAMIC);
1679 }
1680
1681 if ((bif->bif_flags & IFBIF_STP) != 0 &&
1682 bif->bif_state == BSTP_IFSTATE_LEARNING) {
1683 m_freem(m);
1684 bridge_release_member(sc, bif, &psref);
1685 goto out;
1686 }
1687
1688 bridge_release_member(sc, bif, &psref);
1689
1690 /*
1691 * At this point, the port either doesn't participate
1692 * in spanning tree or it is in the forwarding state.
1693 */
1694
1695 /*
1696 * If the packet is unicast, destined for someone on
1697 * "this" side of the bridge, drop it.
1698 */
1699 if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) {
1700 dst_if = bridge_rtlookup(sc, eh->ether_dhost);
1701 if (src_if == dst_if) {
1702 m_freem(m);
1703 goto out;
1704 }
1705 } else {
1706 /* ...forward it to all interfaces. */
1707 sc->sc_if.if_imcasts++;
1708 dst_if = NULL;
1709 }
1710
1711 if (pfil_run_hooks(sc->sc_if.if_pfil, &m, src_if, PFIL_IN) != 0) {
1712 if (m != NULL)
1713 m_freem(m);
1714 goto out;
1715 }
1716 if (m == NULL)
1717 goto out;
1718
1719 if (dst_if == NULL) {
1720 bridge_broadcast(sc, src_if, m);
1721 goto out;
1722 }
1723
1724 m_put_rcvif_psref(src_if, &psref_src);
1725 src_if = NULL;
1726
1727 /*
1728 * At this point, we're dealing with a unicast frame
1729 * going to a different interface.
1730 */
1731 if ((dst_if->if_flags & IFF_RUNNING) == 0) {
1732 m_freem(m);
1733 goto out;
1734 }
1735
1736 bif = bridge_lookup_member_if(sc, dst_if, &psref);
1737 if (bif == NULL) {
1738 /* Not a member of the bridge (anymore?) */
1739 m_freem(m);
1740 goto out;
1741 }
1742
1743 if (bif->bif_flags & IFBIF_STP) {
1744 switch (bif->bif_state) {
1745 case BSTP_IFSTATE_DISABLED:
1746 case BSTP_IFSTATE_BLOCKING:
1747 m_freem(m);
1748 bridge_release_member(sc, bif, &psref);
1749 goto out;
1750 }
1751 }
1752
1753 bridge_release_member(sc, bif, &psref);
1754
1755 ACQUIRE_GLOBAL_LOCKS();
1756 bridge_enqueue(sc, dst_if, m, 1);
1757 RELEASE_GLOBAL_LOCKS();
1758 out:
1759 if (src_if != NULL)
1760 m_put_rcvif_psref(src_if, &psref_src);
1761 return;
1762 }
1763
1764 static bool
1765 bstp_state_before_learning(struct bridge_iflist *bif)
1766 {
1767 if (bif->bif_flags & IFBIF_STP) {
1768 switch (bif->bif_state) {
1769 case BSTP_IFSTATE_BLOCKING:
1770 case BSTP_IFSTATE_LISTENING:
1771 case BSTP_IFSTATE_DISABLED:
1772 return true;
1773 }
1774 }
1775 return false;
1776 }
1777
1778 static bool
1779 bridge_ourether(struct bridge_iflist *bif, struct ether_header *eh, int src)
1780 {
1781 uint8_t *ether = src ? eh->ether_shost : eh->ether_dhost;
1782
1783 if (memcmp(CLLADDR(bif->bif_ifp->if_sadl), ether, ETHER_ADDR_LEN) == 0
1784 #if NCARP > 0
1785 || (bif->bif_ifp->if_carp &&
1786 carp_ourether(bif->bif_ifp->if_carp, eh, IFT_ETHER, src) != NULL)
1787 #endif /* NCARP > 0 */
1788 )
1789 return true;
1790
1791 return false;
1792 }
1793
1794 /*
1795 * bridge_input:
1796 *
1797 * Receive input from a member interface. Queue the packet for
1798 * bridging if it is not for us.
1799 */
1800 static void
1801 bridge_input(struct ifnet *ifp, struct mbuf *m)
1802 {
1803 struct bridge_softc *sc = ifp->if_bridge;
1804 struct bridge_iflist *bif;
1805 struct ether_header *eh;
1806 struct psref psref;
1807 int bound;
1808 DECLARE_LOCK_VARIABLE;
1809
1810 KASSERT(!cpu_intr_p());
1811
1812 if (__predict_false(sc == NULL) ||
1813 (sc->sc_if.if_flags & IFF_RUNNING) == 0) {
1814 ACQUIRE_GLOBAL_LOCKS();
1815 ether_input(ifp, m);
1816 RELEASE_GLOBAL_LOCKS();
1817 return;
1818 }
1819
1820 bound = curlwp_bind();
1821 bif = bridge_lookup_member_if(sc, ifp, &psref);
1822 if (bif == NULL) {
1823 curlwp_bindx(bound);
1824 ACQUIRE_GLOBAL_LOCKS();
1825 ether_input(ifp, m);
1826 RELEASE_GLOBAL_LOCKS();
1827 return;
1828 }
1829
1830 eh = mtod(m, struct ether_header *);
1831
1832 if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
1833 if (memcmp(etherbroadcastaddr,
1834 eh->ether_dhost, ETHER_ADDR_LEN) == 0)
1835 m->m_flags |= M_BCAST;
1836 else
1837 m->m_flags |= M_MCAST;
1838 }
1839
1840 /*
1841 * A 'fast' path for packets addressed to interfaces that are
1842 * part of this bridge.
1843 */
1844 if (!(m->m_flags & (M_BCAST|M_MCAST)) &&
1845 !bstp_state_before_learning(bif)) {
1846 struct bridge_iflist *_bif;
1847 struct ifnet *_ifp = NULL;
1848 int s;
1849 struct psref _psref;
1850
1851 BRIDGE_PSZ_RENTER(s);
1852 BRIDGE_IFLIST_READER_FOREACH(_bif, sc) {
1853 /* It is destined for us. */
1854 if (bridge_ourether(_bif, eh, 0)) {
1855 bridge_acquire_member(sc, _bif, &_psref);
1856 BRIDGE_PSZ_REXIT(s);
1857 if (_bif->bif_flags & IFBIF_LEARNING)
1858 (void) bridge_rtupdate(sc,
1859 eh->ether_shost, ifp, 0, IFBAF_DYNAMIC);
1860 m_set_rcvif(m, _bif->bif_ifp);
1861 _ifp = _bif->bif_ifp;
1862 bridge_release_member(sc, _bif, &_psref);
1863 goto out;
1864 }
1865
1866 /* We just received a packet that we sent out. */
1867 if (bridge_ourether(_bif, eh, 1))
1868 break;
1869 }
1870 BRIDGE_PSZ_REXIT(s);
1871 out:
1872
1873 if (_bif != NULL) {
1874 bridge_release_member(sc, bif, &psref);
1875 curlwp_bindx(bound);
1876 if (_ifp != NULL) {
1877 m->m_flags &= ~M_PROMISC;
1878 ACQUIRE_GLOBAL_LOCKS();
1879 ether_input(_ifp, m);
1880 RELEASE_GLOBAL_LOCKS();
1881 } else
1882 m_freem(m);
1883 return;
1884 }
1885 }
1886
1887 /* Tap off 802.1D packets; they do not get forwarded. */
1888 if (bif->bif_flags & IFBIF_STP &&
1889 memcmp(eh->ether_dhost, bstp_etheraddr, ETHER_ADDR_LEN) == 0) {
1890 bstp_input(sc, bif, m);
1891 bridge_release_member(sc, bif, &psref);
1892 curlwp_bindx(bound);
1893 return;
1894 }
1895
1896 /*
1897 * A normal switch would discard the packet here, but that's not what
1898 * we've done historically. This also prevents some obnoxious behaviour.
1899 */
1900 if (bstp_state_before_learning(bif)) {
1901 bridge_release_member(sc, bif, &psref);
1902 curlwp_bindx(bound);
1903 ACQUIRE_GLOBAL_LOCKS();
1904 ether_input(ifp, m);
1905 RELEASE_GLOBAL_LOCKS();
1906 return;
1907 }
1908
1909 bridge_release_member(sc, bif, &psref);
1910
1911 bridge_forward(sc, m);
1912
1913 curlwp_bindx(bound);
1914 }
1915
1916 /*
1917 * bridge_broadcast:
1918 *
1919 * Send a frame to all interfaces that are members of
1920 * the bridge, except for the one on which the packet
1921 * arrived.
1922 */
1923 static void
1924 bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if,
1925 struct mbuf *m)
1926 {
1927 struct bridge_iflist *bif;
1928 struct mbuf *mc;
1929 struct ifnet *dst_if;
1930 bool bmcast;
1931 int s;
1932 DECLARE_LOCK_VARIABLE;
1933
1934 bmcast = m->m_flags & (M_BCAST|M_MCAST);
1935
1936 BRIDGE_PSZ_RENTER(s);
1937 BRIDGE_IFLIST_READER_FOREACH(bif, sc) {
1938 struct psref psref;
1939
1940 bridge_acquire_member(sc, bif, &psref);
1941 BRIDGE_PSZ_REXIT(s);
1942
1943 dst_if = bif->bif_ifp;
1944
1945 if (bif->bif_flags & IFBIF_STP) {
1946 switch (bif->bif_state) {
1947 case BSTP_IFSTATE_BLOCKING:
1948 case BSTP_IFSTATE_DISABLED:
1949 goto next;
1950 }
1951 }
1952
1953 if ((bif->bif_flags & IFBIF_DISCOVER) == 0 && !bmcast)
1954 goto next;
1955
1956 if ((dst_if->if_flags & IFF_RUNNING) == 0)
1957 goto next;
1958
1959 if (dst_if != src_if) {
1960 mc = m_copym(m, 0, M_COPYALL, M_DONTWAIT);
1961 if (mc == NULL) {
1962 sc->sc_if.if_oerrors++;
1963 goto next;
1964 }
1965 ACQUIRE_GLOBAL_LOCKS();
1966 bridge_enqueue(sc, dst_if, mc, 1);
1967 RELEASE_GLOBAL_LOCKS();
1968 }
1969
1970 if (bmcast) {
1971 mc = m_copym(m, 0, M_COPYALL, M_DONTWAIT);
1972 if (mc == NULL) {
1973 sc->sc_if.if_oerrors++;
1974 goto next;
1975 }
1976
1977 m_set_rcvif(mc, dst_if);
1978 mc->m_flags &= ~M_PROMISC;
1979
1980 ACQUIRE_GLOBAL_LOCKS();
1981 ether_input(dst_if, mc);
1982 RELEASE_GLOBAL_LOCKS();
1983 }
1984 next:
1985 BRIDGE_PSZ_RENTER(s);
1986 bridge_release_member(sc, bif, &psref);
1987 }
1988 BRIDGE_PSZ_REXIT(s);
1989
1990 m_freem(m);
1991 }
1992
1993 static int
1994 bridge_rtalloc(struct bridge_softc *sc, const uint8_t *dst,
1995 struct bridge_rtnode **brtp)
1996 {
1997 struct bridge_rtnode *brt;
1998 int error;
1999
2000 if (sc->sc_brtcnt >= sc->sc_brtmax)
2001 return ENOSPC;
2002
2003 /*
2004 * Allocate a new bridge forwarding node, and
2005 * initialize the expiration time and Ethernet
2006 * address.
2007 */
2008 brt = pool_get(&bridge_rtnode_pool, PR_NOWAIT);
2009 if (brt == NULL)
2010 return ENOMEM;
2011
2012 memset(brt, 0, sizeof(*brt));
2013 brt->brt_expire = time_uptime + sc->sc_brttimeout;
2014 brt->brt_flags = IFBAF_DYNAMIC;
2015 memcpy(brt->brt_addr, dst, ETHER_ADDR_LEN);
2016
2017 BRIDGE_RT_LOCK(sc);
2018 error = bridge_rtnode_insert(sc, brt);
2019 BRIDGE_RT_UNLOCK(sc);
2020
2021 if (error != 0) {
2022 pool_put(&bridge_rtnode_pool, brt);
2023 return error;
2024 }
2025
2026 *brtp = brt;
2027 return 0;
2028 }
2029
2030 /*
2031 * bridge_rtupdate:
2032 *
2033 * Add a bridge routing entry.
2034 */
2035 static int
2036 bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst,
2037 struct ifnet *dst_if, int setflags, uint8_t flags)
2038 {
2039 struct bridge_rtnode *brt;
2040 int s;
2041
2042 again:
2043 /*
2044 * A route for this destination might already exist. If so,
2045 * update it, otherwise create a new one.
2046 */
2047 BRIDGE_RT_RENTER(s);
2048 brt = bridge_rtnode_lookup(sc, dst);
2049
2050 if (brt != NULL) {
2051 brt->brt_ifp = dst_if;
2052 if (setflags) {
2053 brt->brt_flags = flags;
2054 if (flags & IFBAF_STATIC)
2055 brt->brt_expire = 0;
2056 else
2057 brt->brt_expire = time_uptime + sc->sc_brttimeout;
2058 } else {
2059 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
2060 brt->brt_expire = time_uptime + sc->sc_brttimeout;
2061 }
2062 }
2063 BRIDGE_RT_REXIT(s);
2064
2065 if (brt == NULL) {
2066 int r;
2067
2068 r = bridge_rtalloc(sc, dst, &brt);
2069 if (r != 0)
2070 return r;
2071 goto again;
2072 }
2073
2074 return 0;
2075 }
2076
2077 /*
2078 * bridge_rtlookup:
2079 *
2080 * Lookup the destination interface for an address.
2081 */
2082 static struct ifnet *
2083 bridge_rtlookup(struct bridge_softc *sc, const uint8_t *addr)
2084 {
2085 struct bridge_rtnode *brt;
2086 struct ifnet *ifs = NULL;
2087 int s;
2088
2089 BRIDGE_RT_RENTER(s);
2090 brt = bridge_rtnode_lookup(sc, addr);
2091 if (brt != NULL)
2092 ifs = brt->brt_ifp;
2093 BRIDGE_RT_REXIT(s);
2094
2095 return ifs;
2096 }
2097
2098 typedef bool (*bridge_iterate_cb_t)
2099 (struct bridge_softc *, struct bridge_rtnode *, bool *, void *);
2100
2101 /*
2102 * bridge_rtlist_iterate_remove:
2103 *
2104 * It iterates on sc->sc_rtlist and removes rtnodes of it which func
2105 * callback judges to remove. Removals of rtnodes are done in a manner
2106 * of pserialize. To this end, all kmem_* operations are placed out of
2107 * mutexes.
2108 */
2109 static void
2110 bridge_rtlist_iterate_remove(struct bridge_softc *sc, bridge_iterate_cb_t func, void *arg)
2111 {
2112 struct bridge_rtnode *brt, *nbrt;
2113 struct bridge_rtnode **brt_list;
2114 int i, count;
2115
2116 retry:
2117 count = sc->sc_brtcnt;
2118 if (count == 0)
2119 return;
2120 brt_list = kmem_alloc(sizeof(*brt_list) * count, KM_SLEEP);
2121
2122 BRIDGE_RT_LOCK(sc);
2123 if (__predict_false(sc->sc_brtcnt > count)) {
2124 /* The rtnodes increased, we need more memory */
2125 BRIDGE_RT_UNLOCK(sc);
2126 kmem_free(brt_list, sizeof(*brt_list) * count);
2127 goto retry;
2128 }
2129
2130 i = 0;
2131 LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
2132 bool need_break = false;
2133 if (func(sc, brt, &need_break, arg)) {
2134 bridge_rtnode_remove(sc, brt);
2135 brt_list[i++] = brt;
2136 }
2137 if (need_break)
2138 break;
2139 }
2140
2141 if (i > 0)
2142 BRIDGE_RT_PSZ_PERFORM(sc);
2143 BRIDGE_RT_UNLOCK(sc);
2144
2145 while (--i >= 0)
2146 bridge_rtnode_destroy(brt_list[i]);
2147
2148 kmem_free(brt_list, sizeof(*brt_list) * count);
2149 }
2150
2151 static bool
2152 bridge_rttrim0_cb(struct bridge_softc *sc, struct bridge_rtnode *brt,
2153 bool *need_break, void *arg)
2154 {
2155 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
2156 /* Take into account of the subsequent removal */
2157 if ((sc->sc_brtcnt - 1) <= sc->sc_brtmax)
2158 *need_break = true;
2159 return true;
2160 } else
2161 return false;
2162 }
2163
2164 static void
2165 bridge_rttrim0(struct bridge_softc *sc)
2166 {
2167 bridge_rtlist_iterate_remove(sc, bridge_rttrim0_cb, NULL);
2168 }
2169
2170 /*
2171 * bridge_rttrim:
2172 *
2173 * Trim the routine table so that we have a number
2174 * of routing entries less than or equal to the
2175 * maximum number.
2176 */
2177 static void
2178 bridge_rttrim(struct bridge_softc *sc)
2179 {
2180
2181 /* Make sure we actually need to do this. */
2182 if (sc->sc_brtcnt <= sc->sc_brtmax)
2183 return;
2184
2185 /* Force an aging cycle; this might trim enough addresses. */
2186 bridge_rtage(sc);
2187 if (sc->sc_brtcnt <= sc->sc_brtmax)
2188 return;
2189
2190 bridge_rttrim0(sc);
2191
2192 return;
2193 }
2194
2195 /*
2196 * bridge_timer:
2197 *
2198 * Aging timer for the bridge.
2199 */
2200 static void
2201 bridge_timer(void *arg)
2202 {
2203 struct bridge_softc *sc = arg;
2204
2205 workqueue_enqueue(sc->sc_rtage_wq, &sc->sc_rtage_wk, NULL);
2206 }
2207
2208 static void
2209 bridge_rtage_work(struct work *wk, void *arg)
2210 {
2211 struct bridge_softc *sc = arg;
2212
2213 KASSERT(wk == &sc->sc_rtage_wk);
2214
2215 bridge_rtage(sc);
2216
2217 if (sc->sc_if.if_flags & IFF_RUNNING)
2218 callout_reset(&sc->sc_brcallout,
2219 bridge_rtable_prune_period * hz, bridge_timer, sc);
2220 }
2221
2222 static bool
2223 bridge_rtage_cb(struct bridge_softc *sc, struct bridge_rtnode *brt,
2224 bool *need_break, void *arg)
2225 {
2226 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
2227 time_uptime >= brt->brt_expire)
2228 return true;
2229 else
2230 return false;
2231 }
2232
2233 /*
2234 * bridge_rtage:
2235 *
2236 * Perform an aging cycle.
2237 */
2238 static void
2239 bridge_rtage(struct bridge_softc *sc)
2240 {
2241 bridge_rtlist_iterate_remove(sc, bridge_rtage_cb, NULL);
2242 }
2243
2244
2245 static bool
2246 bridge_rtflush_cb(struct bridge_softc *sc, struct bridge_rtnode *brt,
2247 bool *need_break, void *arg)
2248 {
2249 int full = *(int*)arg;
2250
2251 if (full || (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
2252 return true;
2253 else
2254 return false;
2255 }
2256
2257 /*
2258 * bridge_rtflush:
2259 *
2260 * Remove all dynamic addresses from the bridge.
2261 */
2262 static void
2263 bridge_rtflush(struct bridge_softc *sc, int full)
2264 {
2265 bridge_rtlist_iterate_remove(sc, bridge_rtflush_cb, &full);
2266 }
2267
2268 /*
2269 * bridge_rtdaddr:
2270 *
2271 * Remove an address from the table.
2272 */
2273 static int
2274 bridge_rtdaddr(struct bridge_softc *sc, const uint8_t *addr)
2275 {
2276 struct bridge_rtnode *brt;
2277
2278 BRIDGE_RT_LOCK(sc);
2279 if ((brt = bridge_rtnode_lookup(sc, addr)) == NULL) {
2280 BRIDGE_RT_UNLOCK(sc);
2281 return ENOENT;
2282 }
2283 bridge_rtnode_remove(sc, brt);
2284 BRIDGE_RT_PSZ_PERFORM(sc);
2285 BRIDGE_RT_UNLOCK(sc);
2286
2287 bridge_rtnode_destroy(brt);
2288
2289 return 0;
2290 }
2291
2292 /*
2293 * bridge_rtdelete:
2294 *
2295 * Delete routes to a speicifc member interface.
2296 */
2297 static void
2298 bridge_rtdelete(struct bridge_softc *sc, struct ifnet *ifp)
2299 {
2300 struct bridge_rtnode *brt;
2301
2302 BRIDGE_RT_LOCK(sc);
2303 LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) {
2304 if (brt->brt_ifp == ifp)
2305 break;
2306 }
2307 if (brt == NULL) {
2308 BRIDGE_RT_UNLOCK(sc);
2309 return;
2310 }
2311 bridge_rtnode_remove(sc, brt);
2312 BRIDGE_RT_PSZ_PERFORM(sc);
2313 BRIDGE_RT_UNLOCK(sc);
2314
2315 bridge_rtnode_destroy(brt);
2316 }
2317
2318 /*
2319 * bridge_rtable_init:
2320 *
2321 * Initialize the route table for this bridge.
2322 */
2323 static void
2324 bridge_rtable_init(struct bridge_softc *sc)
2325 {
2326 int i;
2327
2328 sc->sc_rthash = kmem_alloc(sizeof(*sc->sc_rthash) * BRIDGE_RTHASH_SIZE,
2329 KM_SLEEP);
2330
2331 for (i = 0; i < BRIDGE_RTHASH_SIZE; i++)
2332 LIST_INIT(&sc->sc_rthash[i]);
2333
2334 sc->sc_rthash_key = cprng_fast32();
2335
2336 LIST_INIT(&sc->sc_rtlist);
2337
2338 sc->sc_rtlist_psz = pserialize_create();
2339 sc->sc_rtlist_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET);
2340 }
2341
2342 /*
2343 * bridge_rtable_fini:
2344 *
2345 * Deconstruct the route table for this bridge.
2346 */
2347 static void
2348 bridge_rtable_fini(struct bridge_softc *sc)
2349 {
2350
2351 kmem_free(sc->sc_rthash, sizeof(*sc->sc_rthash) * BRIDGE_RTHASH_SIZE);
2352 if (sc->sc_rtlist_lock)
2353 mutex_obj_free(sc->sc_rtlist_lock);
2354 if (sc->sc_rtlist_psz)
2355 pserialize_destroy(sc->sc_rtlist_psz);
2356 }
2357
2358 /*
2359 * The following hash function is adapted from "Hash Functions" by Bob Jenkins
2360 * ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
2361 */
2362 #define mix(a, b, c) \
2363 do { \
2364 a -= b; a -= c; a ^= (c >> 13); \
2365 b -= c; b -= a; b ^= (a << 8); \
2366 c -= a; c -= b; c ^= (b >> 13); \
2367 a -= b; a -= c; a ^= (c >> 12); \
2368 b -= c; b -= a; b ^= (a << 16); \
2369 c -= a; c -= b; c ^= (b >> 5); \
2370 a -= b; a -= c; a ^= (c >> 3); \
2371 b -= c; b -= a; b ^= (a << 10); \
2372 c -= a; c -= b; c ^= (b >> 15); \
2373 } while (/*CONSTCOND*/0)
2374
2375 static inline uint32_t
2376 bridge_rthash(struct bridge_softc *sc, const uint8_t *addr)
2377 {
2378 uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = sc->sc_rthash_key;
2379
2380 b += addr[5] << 8;
2381 b += addr[4];
2382 a += addr[3] << 24;
2383 a += addr[2] << 16;
2384 a += addr[1] << 8;
2385 a += addr[0];
2386
2387 mix(a, b, c);
2388
2389 return (c & BRIDGE_RTHASH_MASK);
2390 }
2391
2392 #undef mix
2393
2394 /*
2395 * bridge_rtnode_lookup:
2396 *
2397 * Look up a bridge route node for the specified destination.
2398 */
2399 static struct bridge_rtnode *
2400 bridge_rtnode_lookup(struct bridge_softc *sc, const uint8_t *addr)
2401 {
2402 struct bridge_rtnode *brt;
2403 uint32_t hash;
2404 int dir;
2405
2406 hash = bridge_rthash(sc, addr);
2407 LIST_FOREACH(brt, &sc->sc_rthash[hash], brt_hash) {
2408 dir = memcmp(addr, brt->brt_addr, ETHER_ADDR_LEN);
2409 if (dir == 0)
2410 return brt;
2411 if (dir > 0)
2412 return NULL;
2413 }
2414
2415 return NULL;
2416 }
2417
2418 /*
2419 * bridge_rtnode_insert:
2420 *
2421 * Insert the specified bridge node into the route table. We
2422 * assume the entry is not already in the table.
2423 */
2424 static int
2425 bridge_rtnode_insert(struct bridge_softc *sc, struct bridge_rtnode *brt)
2426 {
2427 struct bridge_rtnode *lbrt;
2428 uint32_t hash;
2429 int dir;
2430
2431 KASSERT(BRIDGE_RT_LOCKED(sc));
2432
2433 hash = bridge_rthash(sc, brt->brt_addr);
2434
2435 lbrt = LIST_FIRST(&sc->sc_rthash[hash]);
2436 if (lbrt == NULL) {
2437 LIST_INSERT_HEAD(&sc->sc_rthash[hash], brt, brt_hash);
2438 goto out;
2439 }
2440
2441 do {
2442 dir = memcmp(brt->brt_addr, lbrt->brt_addr, ETHER_ADDR_LEN);
2443 if (dir == 0)
2444 return EEXIST;
2445 if (dir > 0) {
2446 LIST_INSERT_BEFORE(lbrt, brt, brt_hash);
2447 goto out;
2448 }
2449 if (LIST_NEXT(lbrt, brt_hash) == NULL) {
2450 LIST_INSERT_AFTER(lbrt, brt, brt_hash);
2451 goto out;
2452 }
2453 lbrt = LIST_NEXT(lbrt, brt_hash);
2454 } while (lbrt != NULL);
2455
2456 #ifdef DIAGNOSTIC
2457 panic("bridge_rtnode_insert: impossible");
2458 #endif
2459
2460 out:
2461 LIST_INSERT_HEAD(&sc->sc_rtlist, brt, brt_list);
2462 sc->sc_brtcnt++;
2463
2464 return 0;
2465 }
2466
2467 /*
2468 * bridge_rtnode_remove:
2469 *
2470 * Remove a bridge rtnode from the rthash and the rtlist of a bridge.
2471 */
2472 static void
2473 bridge_rtnode_remove(struct bridge_softc *sc, struct bridge_rtnode *brt)
2474 {
2475
2476 KASSERT(BRIDGE_RT_LOCKED(sc));
2477
2478 LIST_REMOVE(brt, brt_hash);
2479 LIST_REMOVE(brt, brt_list);
2480 sc->sc_brtcnt--;
2481 }
2482
2483 /*
2484 * bridge_rtnode_destroy:
2485 *
2486 * Destroy a bridge rtnode.
2487 */
2488 static void
2489 bridge_rtnode_destroy(struct bridge_rtnode *brt)
2490 {
2491
2492 pool_put(&bridge_rtnode_pool, brt);
2493 }
2494
2495 #if defined(BRIDGE_IPF)
2496 extern pfil_head_t *inet_pfil_hook; /* XXX */
2497 extern pfil_head_t *inet6_pfil_hook; /* XXX */
2498
2499 /*
2500 * Send bridge packets through IPF if they are one of the types IPF can deal
2501 * with, or if they are ARP or REVARP. (IPF will pass ARP and REVARP without
2502 * question.)
2503 */
2504 static int
2505 bridge_ipf(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
2506 {
2507 int snap, error;
2508 struct ether_header *eh1, eh2;
2509 struct llc llc1;
2510 uint16_t ether_type;
2511
2512 snap = 0;
2513 error = -1; /* Default error if not error == 0 */
2514 eh1 = mtod(*mp, struct ether_header *);
2515 ether_type = ntohs(eh1->ether_type);
2516
2517 /*
2518 * Check for SNAP/LLC.
2519 */
2520 if (ether_type < ETHERMTU) {
2521 struct llc *llc2 = (struct llc *)(eh1 + 1);
2522
2523 if ((*mp)->m_len >= ETHER_HDR_LEN + 8 &&
2524 llc2->llc_dsap == LLC_SNAP_LSAP &&
2525 llc2->llc_ssap == LLC_SNAP_LSAP &&
2526 llc2->llc_control == LLC_UI) {
2527 ether_type = htons(llc2->llc_un.type_snap.ether_type);
2528 snap = 1;
2529 }
2530 }
2531
2532 /*
2533 * If we're trying to filter bridge traffic, don't look at anything
2534 * other than IP and ARP traffic. If the filter doesn't understand
2535 * IPv6, don't allow IPv6 through the bridge either. This is lame
2536 * since if we really wanted, say, an AppleTalk filter, we are hosed,
2537 * but of course we don't have an AppleTalk filter to begin with.
2538 * (Note that since IPF doesn't understand ARP it will pass *ALL*
2539 * ARP traffic.)
2540 */
2541 switch (ether_type) {
2542 case ETHERTYPE_ARP:
2543 case ETHERTYPE_REVARP:
2544 return 0; /* Automatically pass */
2545 case ETHERTYPE_IP:
2546 # ifdef INET6
2547 case ETHERTYPE_IPV6:
2548 # endif /* INET6 */
2549 break;
2550 default:
2551 goto bad;
2552 }
2553
2554 /* Strip off the Ethernet header and keep a copy. */
2555 m_copydata(*mp, 0, ETHER_HDR_LEN, (void *) &eh2);
2556 m_adj(*mp, ETHER_HDR_LEN);
2557
2558 /* Strip off snap header, if present */
2559 if (snap) {
2560 m_copydata(*mp, 0, sizeof(struct llc), (void *) &llc1);
2561 m_adj(*mp, sizeof(struct llc));
2562 }
2563
2564 /*
2565 * Check basic packet sanity and run IPF through pfil.
2566 */
2567 KASSERT(!cpu_intr_p());
2568 switch (ether_type)
2569 {
2570 case ETHERTYPE_IP :
2571 error = bridge_ip_checkbasic(mp);
2572 if (error == 0)
2573 error = pfil_run_hooks(inet_pfil_hook, mp, ifp, dir);
2574 break;
2575 # ifdef INET6
2576 case ETHERTYPE_IPV6 :
2577 error = bridge_ip6_checkbasic(mp);
2578 if (error == 0)
2579 error = pfil_run_hooks(inet6_pfil_hook, mp, ifp, dir);
2580 break;
2581 # endif
2582 default :
2583 error = 0;
2584 break;
2585 }
2586
2587 if (*mp == NULL)
2588 return error;
2589 if (error != 0)
2590 goto bad;
2591
2592 error = -1;
2593
2594 /*
2595 * Finally, put everything back the way it was and return
2596 */
2597 if (snap) {
2598 M_PREPEND(*mp, sizeof(struct llc), M_DONTWAIT);
2599 if (*mp == NULL)
2600 return error;
2601 bcopy(&llc1, mtod(*mp, void *), sizeof(struct llc));
2602 }
2603
2604 M_PREPEND(*mp, ETHER_HDR_LEN, M_DONTWAIT);
2605 if (*mp == NULL)
2606 return error;
2607 bcopy(&eh2, mtod(*mp, void *), ETHER_HDR_LEN);
2608
2609 return 0;
2610
2611 bad:
2612 m_freem(*mp);
2613 *mp = NULL;
2614 return error;
2615 }
2616
2617 /*
2618 * Perform basic checks on header size since
2619 * IPF assumes ip_input has already processed
2620 * it for it. Cut-and-pasted from ip_input.c.
2621 * Given how simple the IPv6 version is,
2622 * does the IPv4 version really need to be
2623 * this complicated?
2624 *
2625 * XXX Should we update ipstat here, or not?
2626 * XXX Right now we update ipstat but not
2627 * XXX csum_counter.
2628 */
2629 static int
2630 bridge_ip_checkbasic(struct mbuf **mp)
2631 {
2632 struct mbuf *m = *mp;
2633 struct ip *ip;
2634 int len, hlen;
2635
2636 if (*mp == NULL)
2637 return -1;
2638
2639 if (IP_HDR_ALIGNED_P(mtod(m, void *)) == 0) {
2640 if ((m = m_copyup(m, sizeof(struct ip),
2641 (max_linkhdr + 3) & ~3)) == NULL) {
2642 /* XXXJRT new stat, please */
2643 ip_statinc(IP_STAT_TOOSMALL);
2644 goto bad;
2645 }
2646 } else if (__predict_false(m->m_len < sizeof (struct ip))) {
2647 if ((m = m_pullup(m, sizeof (struct ip))) == NULL) {
2648 ip_statinc(IP_STAT_TOOSMALL);
2649 goto bad;
2650 }
2651 }
2652 ip = mtod(m, struct ip *);
2653 if (ip == NULL) goto bad;
2654
2655 if (ip->ip_v != IPVERSION) {
2656 ip_statinc(IP_STAT_BADVERS);
2657 goto bad;
2658 }
2659 hlen = ip->ip_hl << 2;
2660 if (hlen < sizeof(struct ip)) { /* minimum header length */
2661 ip_statinc(IP_STAT_BADHLEN);
2662 goto bad;
2663 }
2664 if (hlen > m->m_len) {
2665 if ((m = m_pullup(m, hlen)) == 0) {
2666 ip_statinc(IP_STAT_BADHLEN);
2667 goto bad;
2668 }
2669 ip = mtod(m, struct ip *);
2670 if (ip == NULL) goto bad;
2671 }
2672
2673 switch (m->m_pkthdr.csum_flags &
2674 ((m_get_rcvif_NOMPSAFE(m)->if_csum_flags_rx & M_CSUM_IPv4) |
2675 M_CSUM_IPv4_BAD)) {
2676 case M_CSUM_IPv4|M_CSUM_IPv4_BAD:
2677 /* INET_CSUM_COUNTER_INCR(&ip_hwcsum_bad); */
2678 goto bad;
2679
2680 case M_CSUM_IPv4:
2681 /* Checksum was okay. */
2682 /* INET_CSUM_COUNTER_INCR(&ip_hwcsum_ok); */
2683 break;
2684
2685 default:
2686 /* Must compute it ourselves. */
2687 /* INET_CSUM_COUNTER_INCR(&ip_swcsum); */
2688 if (in_cksum(m, hlen) != 0)
2689 goto bad;
2690 break;
2691 }
2692
2693 /* Retrieve the packet length. */
2694 len = ntohs(ip->ip_len);
2695
2696 /*
2697 * Check for additional length bogosity
2698 */
2699 if (len < hlen) {
2700 ip_statinc(IP_STAT_BADLEN);
2701 goto bad;
2702 }
2703
2704 /*
2705 * Check that the amount of data in the buffers
2706 * is as at least much as the IP header would have us expect.
2707 * Drop packet if shorter than we expect.
2708 */
2709 if (m->m_pkthdr.len < len) {
2710 ip_statinc(IP_STAT_TOOSHORT);
2711 goto bad;
2712 }
2713
2714 /* Checks out, proceed */
2715 *mp = m;
2716 return 0;
2717
2718 bad:
2719 *mp = m;
2720 return -1;
2721 }
2722
2723 # ifdef INET6
2724 /*
2725 * Same as above, but for IPv6.
2726 * Cut-and-pasted from ip6_input.c.
2727 * XXX Should we update ip6stat, or not?
2728 */
2729 static int
2730 bridge_ip6_checkbasic(struct mbuf **mp)
2731 {
2732 struct mbuf *m = *mp;
2733 struct ip6_hdr *ip6;
2734
2735 /*
2736 * If the IPv6 header is not aligned, slurp it up into a new
2737 * mbuf with space for link headers, in the event we forward
2738 * it. Otherwise, if it is aligned, make sure the entire base
2739 * IPv6 header is in the first mbuf of the chain.
2740 */
2741 if (IP6_HDR_ALIGNED_P(mtod(m, void *)) == 0) {
2742 struct ifnet *inifp = m_get_rcvif_NOMPSAFE(m);
2743 if ((m = m_copyup(m, sizeof(struct ip6_hdr),
2744 (max_linkhdr + 3) & ~3)) == NULL) {
2745 /* XXXJRT new stat, please */
2746 ip6_statinc(IP6_STAT_TOOSMALL);
2747 in6_ifstat_inc(inifp, ifs6_in_hdrerr);
2748 goto bad;
2749 }
2750 } else if (__predict_false(m->m_len < sizeof(struct ip6_hdr))) {
2751 struct ifnet *inifp = m_get_rcvif_NOMPSAFE(m);
2752 if ((m = m_pullup(m, sizeof(struct ip6_hdr))) == NULL) {
2753 ip6_statinc(IP6_STAT_TOOSMALL);
2754 in6_ifstat_inc(inifp, ifs6_in_hdrerr);
2755 goto bad;
2756 }
2757 }
2758
2759 ip6 = mtod(m, struct ip6_hdr *);
2760
2761 if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
2762 ip6_statinc(IP6_STAT_BADVERS);
2763 in6_ifstat_inc(m_get_rcvif_NOMPSAFE(m), ifs6_in_hdrerr);
2764 goto bad;
2765 }
2766
2767 /* Checks out, proceed */
2768 *mp = m;
2769 return 0;
2770
2771 bad:
2772 *mp = m;
2773 return -1;
2774 }
2775 # endif /* INET6 */
2776 #endif /* BRIDGE_IPF */
2777