if_bridge.c revision 1.134.6.6 1 /* $NetBSD: if_bridge.c,v 1.134.6.6 2018/01/16 13:01:10 martin Exp $ */
2
3 /*
4 * Copyright 2001 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Copyright (c) 1999, 2000 Jason L. Wright (jason (at) thought.net)
40 * All rights reserved.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. All advertising materials mentioning features or use of this software
51 * must display the following acknowledgement:
52 * This product includes software developed by Jason L. Wright
53 * 4. The name of the author may not be used to endorse or promote products
54 * derived from this software without specific prior written permission.
55 *
56 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
57 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
58 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
59 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
60 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
61 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
62 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
64 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
65 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
66 * POSSIBILITY OF SUCH DAMAGE.
67 *
68 * OpenBSD: if_bridge.c,v 1.60 2001/06/15 03:38:33 itojun Exp
69 */
70
71 /*
72 * Network interface bridge support.
73 *
74 * TODO:
75 *
76 * - Currently only supports Ethernet-like interfaces (Ethernet,
77 * 802.11, VLANs on Ethernet, etc.) Figure out a nice way
78 * to bridge other types of interfaces (FDDI-FDDI, and maybe
79 * consider heterogenous bridges).
80 */
81
82 #include <sys/cdefs.h>
83 __KERNEL_RCSID(0, "$NetBSD: if_bridge.c,v 1.134.6.6 2018/01/16 13:01:10 martin Exp $");
84
85 #ifdef _KERNEL_OPT
86 #include "opt_bridge_ipf.h"
87 #include "opt_inet.h"
88 #include "opt_net_mpsafe.h"
89 #endif /* _KERNEL_OPT */
90
91 #include <sys/param.h>
92 #include <sys/kernel.h>
93 #include <sys/mbuf.h>
94 #include <sys/queue.h>
95 #include <sys/socket.h>
96 #include <sys/socketvar.h> /* for softnet_lock */
97 #include <sys/sockio.h>
98 #include <sys/systm.h>
99 #include <sys/proc.h>
100 #include <sys/pool.h>
101 #include <sys/kauth.h>
102 #include <sys/cpu.h>
103 #include <sys/cprng.h>
104 #include <sys/mutex.h>
105 #include <sys/kmem.h>
106
107 #include <net/bpf.h>
108 #include <net/if.h>
109 #include <net/if_dl.h>
110 #include <net/if_types.h>
111 #include <net/if_llc.h>
112
113 #include <net/if_ether.h>
114 #include <net/if_bridgevar.h>
115
116 #if defined(BRIDGE_IPF)
117 /* Used for bridge_ip[6]_checkbasic */
118 #include <netinet/in.h>
119 #include <netinet/in_systm.h>
120 #include <netinet/ip.h>
121 #include <netinet/ip_var.h>
122 #include <netinet/ip_private.h> /* XXX */
123
124 #include <netinet/ip6.h>
125 #include <netinet6/in6_var.h>
126 #include <netinet6/ip6_var.h>
127 #include <netinet6/ip6_private.h> /* XXX */
128 #endif /* BRIDGE_IPF */
129
130 /*
131 * Size of the route hash table. Must be a power of two.
132 */
133 #ifndef BRIDGE_RTHASH_SIZE
134 #define BRIDGE_RTHASH_SIZE 1024
135 #endif
136
137 #define BRIDGE_RTHASH_MASK (BRIDGE_RTHASH_SIZE - 1)
138
139 #include "carp.h"
140 #if NCARP > 0
141 #include <netinet/in.h>
142 #include <netinet/in_var.h>
143 #include <netinet/ip_carp.h>
144 #endif
145
146 #include "ioconf.h"
147
148 __CTASSERT(sizeof(struct ifbifconf) == sizeof(struct ifbaconf));
149 __CTASSERT(offsetof(struct ifbifconf, ifbic_len) == offsetof(struct ifbaconf, ifbac_len));
150 __CTASSERT(offsetof(struct ifbifconf, ifbic_buf) == offsetof(struct ifbaconf, ifbac_buf));
151
152 /*
153 * Maximum number of addresses to cache.
154 */
155 #ifndef BRIDGE_RTABLE_MAX
156 #define BRIDGE_RTABLE_MAX 100
157 #endif
158
159 /*
160 * Spanning tree defaults.
161 */
162 #define BSTP_DEFAULT_MAX_AGE (20 * 256)
163 #define BSTP_DEFAULT_HELLO_TIME (2 * 256)
164 #define BSTP_DEFAULT_FORWARD_DELAY (15 * 256)
165 #define BSTP_DEFAULT_HOLD_TIME (1 * 256)
166 #define BSTP_DEFAULT_BRIDGE_PRIORITY 0x8000
167 #define BSTP_DEFAULT_PORT_PRIORITY 0x80
168 #define BSTP_DEFAULT_PATH_COST 55
169
170 /*
171 * Timeout (in seconds) for entries learned dynamically.
172 */
173 #ifndef BRIDGE_RTABLE_TIMEOUT
174 #define BRIDGE_RTABLE_TIMEOUT (20 * 60) /* same as ARP */
175 #endif
176
177 /*
178 * Number of seconds between walks of the route list.
179 */
180 #ifndef BRIDGE_RTABLE_PRUNE_PERIOD
181 #define BRIDGE_RTABLE_PRUNE_PERIOD (5 * 60)
182 #endif
183
184 #define BRIDGE_RT_LOCK(_sc) if ((_sc)->sc_rtlist_lock) \
185 mutex_enter((_sc)->sc_rtlist_lock)
186 #define BRIDGE_RT_UNLOCK(_sc) if ((_sc)->sc_rtlist_lock) \
187 mutex_exit((_sc)->sc_rtlist_lock)
188 #define BRIDGE_RT_LOCKED(_sc) (!(_sc)->sc_rtlist_lock || \
189 mutex_owned((_sc)->sc_rtlist_lock))
190
191 #define BRIDGE_RT_PSZ_PERFORM(_sc) \
192 if ((_sc)->sc_rtlist_psz != NULL) \
193 pserialize_perform((_sc)->sc_rtlist_psz);
194
195 #define BRIDGE_RT_RENTER(__s) do { __s = pserialize_read_enter(); } while (0)
196 #define BRIDGE_RT_REXIT(__s) do { pserialize_read_exit(__s); } while (0)
197
198
199 #ifdef NET_MPSAFE
200 #define DECLARE_LOCK_VARIABLE
201 #define ACQUIRE_GLOBAL_LOCKS() do { } while (0)
202 #define RELEASE_GLOBAL_LOCKS() do { } while (0)
203 #else
204 #define DECLARE_LOCK_VARIABLE int __s
205 #define ACQUIRE_GLOBAL_LOCKS() do { \
206 KERNEL_LOCK(1, NULL); \
207 mutex_enter(softnet_lock); \
208 __s = splsoftnet(); \
209 } while (0)
210 #define RELEASE_GLOBAL_LOCKS() do { \
211 splx(__s); \
212 mutex_exit(softnet_lock); \
213 KERNEL_UNLOCK_ONE(NULL); \
214 } while (0)
215 #endif
216
217 struct psref_class *bridge_psref_class __read_mostly;
218
219 int bridge_rtable_prune_period = BRIDGE_RTABLE_PRUNE_PERIOD;
220
221 static struct pool bridge_rtnode_pool;
222
223 static int bridge_clone_create(struct if_clone *, int);
224 static int bridge_clone_destroy(struct ifnet *);
225
226 static int bridge_ioctl(struct ifnet *, u_long, void *);
227 static int bridge_init(struct ifnet *);
228 static void bridge_stop(struct ifnet *, int);
229 static void bridge_start(struct ifnet *);
230
231 static void bridge_input(struct ifnet *, struct mbuf *);
232 static void bridge_forward(struct bridge_softc *, struct mbuf *);
233
234 static void bridge_timer(void *);
235
236 static void bridge_broadcast(struct bridge_softc *, struct ifnet *,
237 struct mbuf *);
238
239 static int bridge_rtupdate(struct bridge_softc *, const uint8_t *,
240 struct ifnet *, int, uint8_t);
241 static struct ifnet *bridge_rtlookup(struct bridge_softc *, const uint8_t *);
242 static void bridge_rttrim(struct bridge_softc *);
243 static void bridge_rtage(struct bridge_softc *);
244 static void bridge_rtage_work(struct work *, void *);
245 static void bridge_rtflush(struct bridge_softc *, int);
246 static int bridge_rtdaddr(struct bridge_softc *, const uint8_t *);
247 static void bridge_rtdelete(struct bridge_softc *, struct ifnet *ifp);
248
249 static void bridge_rtable_init(struct bridge_softc *);
250 static void bridge_rtable_fini(struct bridge_softc *);
251
252 static struct bridge_rtnode *bridge_rtnode_lookup(struct bridge_softc *,
253 const uint8_t *);
254 static int bridge_rtnode_insert(struct bridge_softc *,
255 struct bridge_rtnode *);
256 static void bridge_rtnode_remove(struct bridge_softc *,
257 struct bridge_rtnode *);
258 static void bridge_rtnode_destroy(struct bridge_rtnode *);
259
260 static struct bridge_iflist *bridge_lookup_member(struct bridge_softc *,
261 const char *name,
262 struct psref *);
263 static struct bridge_iflist *bridge_lookup_member_if(struct bridge_softc *,
264 struct ifnet *ifp,
265 struct psref *);
266 static void bridge_release_member(struct bridge_softc *, struct bridge_iflist *,
267 struct psref *);
268 static void bridge_delete_member(struct bridge_softc *,
269 struct bridge_iflist *);
270 static void bridge_acquire_member(struct bridge_softc *sc,
271 struct bridge_iflist *,
272 struct psref *);
273
274 static int bridge_ioctl_add(struct bridge_softc *, void *);
275 static int bridge_ioctl_del(struct bridge_softc *, void *);
276 static int bridge_ioctl_gifflags(struct bridge_softc *, void *);
277 static int bridge_ioctl_sifflags(struct bridge_softc *, void *);
278 static int bridge_ioctl_scache(struct bridge_softc *, void *);
279 static int bridge_ioctl_gcache(struct bridge_softc *, void *);
280 static int bridge_ioctl_gifs(struct bridge_softc *, void *);
281 static int bridge_ioctl_rts(struct bridge_softc *, void *);
282 static int bridge_ioctl_saddr(struct bridge_softc *, void *);
283 static int bridge_ioctl_sto(struct bridge_softc *, void *);
284 static int bridge_ioctl_gto(struct bridge_softc *, void *);
285 static int bridge_ioctl_daddr(struct bridge_softc *, void *);
286 static int bridge_ioctl_flush(struct bridge_softc *, void *);
287 static int bridge_ioctl_gpri(struct bridge_softc *, void *);
288 static int bridge_ioctl_spri(struct bridge_softc *, void *);
289 static int bridge_ioctl_ght(struct bridge_softc *, void *);
290 static int bridge_ioctl_sht(struct bridge_softc *, void *);
291 static int bridge_ioctl_gfd(struct bridge_softc *, void *);
292 static int bridge_ioctl_sfd(struct bridge_softc *, void *);
293 static int bridge_ioctl_gma(struct bridge_softc *, void *);
294 static int bridge_ioctl_sma(struct bridge_softc *, void *);
295 static int bridge_ioctl_sifprio(struct bridge_softc *, void *);
296 static int bridge_ioctl_sifcost(struct bridge_softc *, void *);
297 #if defined(BRIDGE_IPF)
298 static int bridge_ioctl_gfilt(struct bridge_softc *, void *);
299 static int bridge_ioctl_sfilt(struct bridge_softc *, void *);
300 static int bridge_ipf(void *, struct mbuf **, struct ifnet *, int);
301 static int bridge_ip_checkbasic(struct mbuf **mp);
302 # ifdef INET6
303 static int bridge_ip6_checkbasic(struct mbuf **mp);
304 # endif /* INET6 */
305 #endif /* BRIDGE_IPF */
306
307 struct bridge_control {
308 int (*bc_func)(struct bridge_softc *, void *);
309 int bc_argsize;
310 int bc_flags;
311 };
312
313 #define BC_F_COPYIN 0x01 /* copy arguments in */
314 #define BC_F_COPYOUT 0x02 /* copy arguments out */
315 #define BC_F_SUSER 0x04 /* do super-user check */
316 #define BC_F_XLATEIN 0x08 /* xlate arguments in */
317 #define BC_F_XLATEOUT 0x10 /* xlate arguments out */
318
319 static const struct bridge_control bridge_control_table[] = {
320 [BRDGADD] = {bridge_ioctl_add, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
321 [BRDGDEL] = {bridge_ioctl_del, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
322
323 [BRDGGIFFLGS] = {bridge_ioctl_gifflags, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_COPYOUT},
324 [BRDGSIFFLGS] = {bridge_ioctl_sifflags, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
325
326 [BRDGSCACHE] = {bridge_ioctl_scache, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
327 [BRDGGCACHE] = {bridge_ioctl_gcache, sizeof(struct ifbrparam), BC_F_COPYOUT},
328
329 [OBRDGGIFS] = {bridge_ioctl_gifs, sizeof(struct ifbifconf), BC_F_COPYIN|BC_F_COPYOUT},
330 [OBRDGRTS] = {bridge_ioctl_rts, sizeof(struct ifbaconf), BC_F_COPYIN|BC_F_COPYOUT},
331
332 [BRDGSADDR] = {bridge_ioctl_saddr, sizeof(struct ifbareq), BC_F_COPYIN|BC_F_SUSER},
333
334 [BRDGSTO] = {bridge_ioctl_sto, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
335 [BRDGGTO] = {bridge_ioctl_gto, sizeof(struct ifbrparam), BC_F_COPYOUT},
336
337 [BRDGDADDR] = {bridge_ioctl_daddr, sizeof(struct ifbareq), BC_F_COPYIN|BC_F_SUSER},
338
339 [BRDGFLUSH] = {bridge_ioctl_flush, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
340
341 [BRDGGPRI] = {bridge_ioctl_gpri, sizeof(struct ifbrparam), BC_F_COPYOUT},
342 [BRDGSPRI] = {bridge_ioctl_spri, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
343
344 [BRDGGHT] = {bridge_ioctl_ght, sizeof(struct ifbrparam), BC_F_COPYOUT},
345 [BRDGSHT] = {bridge_ioctl_sht, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
346
347 [BRDGGFD] = {bridge_ioctl_gfd, sizeof(struct ifbrparam), BC_F_COPYOUT},
348 [BRDGSFD] = {bridge_ioctl_sfd, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
349
350 [BRDGGMA] = {bridge_ioctl_gma, sizeof(struct ifbrparam), BC_F_COPYOUT},
351 [BRDGSMA] = {bridge_ioctl_sma, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
352
353 [BRDGSIFPRIO] = {bridge_ioctl_sifprio, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
354
355 [BRDGSIFCOST] = {bridge_ioctl_sifcost, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
356 #if defined(BRIDGE_IPF)
357 [BRDGGFILT] = {bridge_ioctl_gfilt, sizeof(struct ifbrparam), BC_F_COPYOUT},
358 [BRDGSFILT] = {bridge_ioctl_sfilt, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
359 #endif /* BRIDGE_IPF */
360 [BRDGGIFS] = {bridge_ioctl_gifs, sizeof(struct ifbifconf), BC_F_XLATEIN|BC_F_XLATEOUT},
361 [BRDGRTS] = {bridge_ioctl_rts, sizeof(struct ifbaconf), BC_F_XLATEIN|BC_F_XLATEOUT},
362 };
363
364 static const int bridge_control_table_size = __arraycount(bridge_control_table);
365
366 static struct if_clone bridge_cloner =
367 IF_CLONE_INITIALIZER("bridge", bridge_clone_create, bridge_clone_destroy);
368
369 /*
370 * bridgeattach:
371 *
372 * Pseudo-device attach routine.
373 */
374 void
375 bridgeattach(int n)
376 {
377
378 pool_init(&bridge_rtnode_pool, sizeof(struct bridge_rtnode),
379 0, 0, 0, "brtpl", NULL, IPL_NET);
380
381 bridge_psref_class = psref_class_create("bridge", IPL_SOFTNET);
382
383 if_clone_attach(&bridge_cloner);
384 }
385
386 /*
387 * bridge_clone_create:
388 *
389 * Create a new bridge instance.
390 */
391 static int
392 bridge_clone_create(struct if_clone *ifc, int unit)
393 {
394 struct bridge_softc *sc;
395 struct ifnet *ifp;
396 int error;
397
398 sc = kmem_zalloc(sizeof(*sc), KM_SLEEP);
399 ifp = &sc->sc_if;
400
401 sc->sc_brtmax = BRIDGE_RTABLE_MAX;
402 sc->sc_brttimeout = BRIDGE_RTABLE_TIMEOUT;
403 sc->sc_bridge_max_age = BSTP_DEFAULT_MAX_AGE;
404 sc->sc_bridge_hello_time = BSTP_DEFAULT_HELLO_TIME;
405 sc->sc_bridge_forward_delay = BSTP_DEFAULT_FORWARD_DELAY;
406 sc->sc_bridge_priority = BSTP_DEFAULT_BRIDGE_PRIORITY;
407 sc->sc_hold_time = BSTP_DEFAULT_HOLD_TIME;
408 sc->sc_filter_flags = 0;
409
410 /* Initialize our routing table. */
411 bridge_rtable_init(sc);
412
413 error = workqueue_create(&sc->sc_rtage_wq, "bridge_rtage",
414 bridge_rtage_work, sc, PRI_SOFTNET, IPL_SOFTNET, WQ_MPSAFE);
415 if (error)
416 panic("%s: workqueue_create %d\n", __func__, error);
417
418 callout_init(&sc->sc_brcallout, CALLOUT_MPSAFE);
419 callout_init(&sc->sc_bstpcallout, CALLOUT_MPSAFE);
420
421 mutex_init(&sc->sc_iflist_psref.bip_lock, MUTEX_DEFAULT, IPL_NONE);
422 PSLIST_INIT(&sc->sc_iflist_psref.bip_iflist);
423 sc->sc_iflist_psref.bip_psz = pserialize_create();
424
425 if_initname(ifp, ifc->ifc_name, unit);
426 ifp->if_softc = sc;
427 ifp->if_extflags = IFEF_NO_LINK_STATE_CHANGE;
428 #ifdef NET_MPSAFE
429 ifp->if_extflags |= IFEF_MPSAFE;
430 #endif
431 ifp->if_mtu = ETHERMTU;
432 ifp->if_ioctl = bridge_ioctl;
433 ifp->if_output = bridge_output;
434 ifp->if_start = bridge_start;
435 ifp->if_stop = bridge_stop;
436 ifp->if_init = bridge_init;
437 ifp->if_type = IFT_BRIDGE;
438 ifp->if_addrlen = 0;
439 ifp->if_dlt = DLT_EN10MB;
440 ifp->if_hdrlen = ETHER_HDR_LEN;
441
442 error = if_initialize(ifp);
443 if (error != 0) {
444 pserialize_destroy(sc->sc_iflist_psref.bip_psz);
445 mutex_destroy(&sc->sc_iflist_psref.bip_lock);
446 callout_destroy(&sc->sc_brcallout);
447 callout_destroy(&sc->sc_bstpcallout);
448 workqueue_destroy(sc->sc_rtage_wq);
449 bridge_rtable_fini(sc);
450 kmem_free(sc, sizeof(*sc));
451
452 return error;
453 }
454 if_register(ifp);
455
456 if_alloc_sadl(ifp);
457
458 return 0;
459 }
460
461 /*
462 * bridge_clone_destroy:
463 *
464 * Destroy a bridge instance.
465 */
466 static int
467 bridge_clone_destroy(struct ifnet *ifp)
468 {
469 struct bridge_softc *sc = ifp->if_softc;
470 struct bridge_iflist *bif;
471 int s;
472
473 s = splsoftnet();
474
475 bridge_stop(ifp, 1);
476
477 BRIDGE_LOCK(sc);
478 for (;;) {
479 bif = PSLIST_WRITER_FIRST(&sc->sc_iflist_psref.bip_iflist, struct bridge_iflist,
480 bif_next);
481 if (bif == NULL)
482 break;
483 bridge_delete_member(sc, bif);
484 }
485 PSLIST_DESTROY(&sc->sc_iflist_psref.bip_iflist);
486 BRIDGE_UNLOCK(sc);
487
488 splx(s);
489
490 if_detach(ifp);
491
492 /* Tear down the routing table. */
493 bridge_rtable_fini(sc);
494
495 pserialize_destroy(sc->sc_iflist_psref.bip_psz);
496 mutex_destroy(&sc->sc_iflist_psref.bip_lock);
497 callout_destroy(&sc->sc_brcallout);
498 callout_destroy(&sc->sc_bstpcallout);
499 workqueue_destroy(sc->sc_rtage_wq);
500 kmem_free(sc, sizeof(*sc));
501
502 return 0;
503 }
504
505 /*
506 * bridge_ioctl:
507 *
508 * Handle a control request from the operator.
509 */
510 static int
511 bridge_ioctl(struct ifnet *ifp, u_long cmd, void *data)
512 {
513 struct bridge_softc *sc = ifp->if_softc;
514 struct lwp *l = curlwp; /* XXX */
515 union {
516 struct ifbreq ifbreq;
517 struct ifbifconf ifbifconf;
518 struct ifbareq ifbareq;
519 struct ifbaconf ifbaconf;
520 struct ifbrparam ifbrparam;
521 } args;
522 struct ifdrv *ifd = (struct ifdrv *) data;
523 const struct bridge_control *bc = NULL; /* XXXGCC */
524 int s, error = 0;
525
526 /* Authorize command before calling splsoftnet(). */
527 switch (cmd) {
528 case SIOCGDRVSPEC:
529 case SIOCSDRVSPEC:
530 if (ifd->ifd_cmd >= bridge_control_table_size
531 || (bc = &bridge_control_table[ifd->ifd_cmd]) == NULL) {
532 error = EINVAL;
533 return error;
534 }
535
536 /* We only care about BC_F_SUSER at this point. */
537 if ((bc->bc_flags & BC_F_SUSER) == 0)
538 break;
539
540 error = kauth_authorize_network(l->l_cred,
541 KAUTH_NETWORK_INTERFACE_BRIDGE,
542 cmd == SIOCGDRVSPEC ?
543 KAUTH_REQ_NETWORK_INTERFACE_BRIDGE_GETPRIV :
544 KAUTH_REQ_NETWORK_INTERFACE_BRIDGE_SETPRIV,
545 ifd, NULL, NULL);
546 if (error)
547 return error;
548
549 break;
550 }
551
552 s = splsoftnet();
553
554 switch (cmd) {
555 case SIOCGDRVSPEC:
556 case SIOCSDRVSPEC:
557 KASSERT(bc != NULL);
558 if (cmd == SIOCGDRVSPEC &&
559 (bc->bc_flags & (BC_F_COPYOUT|BC_F_XLATEOUT)) == 0) {
560 error = EINVAL;
561 break;
562 }
563 else if (cmd == SIOCSDRVSPEC &&
564 (bc->bc_flags & (BC_F_COPYOUT|BC_F_XLATEOUT)) != 0) {
565 error = EINVAL;
566 break;
567 }
568
569 /* BC_F_SUSER is checked above, before splsoftnet(). */
570
571 if ((bc->bc_flags & (BC_F_XLATEIN|BC_F_XLATEOUT)) == 0
572 && (ifd->ifd_len != bc->bc_argsize
573 || ifd->ifd_len > sizeof(args))) {
574 error = EINVAL;
575 break;
576 }
577
578 memset(&args, 0, sizeof(args));
579 if (bc->bc_flags & BC_F_COPYIN) {
580 error = copyin(ifd->ifd_data, &args, ifd->ifd_len);
581 if (error)
582 break;
583 } else if (bc->bc_flags & BC_F_XLATEIN) {
584 args.ifbifconf.ifbic_len = ifd->ifd_len;
585 args.ifbifconf.ifbic_buf = ifd->ifd_data;
586 }
587
588 error = (*bc->bc_func)(sc, &args);
589 if (error)
590 break;
591
592 if (bc->bc_flags & BC_F_COPYOUT) {
593 error = copyout(&args, ifd->ifd_data, ifd->ifd_len);
594 } else if (bc->bc_flags & BC_F_XLATEOUT) {
595 ifd->ifd_len = args.ifbifconf.ifbic_len;
596 ifd->ifd_data = args.ifbifconf.ifbic_buf;
597 }
598 break;
599
600 case SIOCSIFFLAGS:
601 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
602 break;
603 switch (ifp->if_flags & (IFF_UP|IFF_RUNNING)) {
604 case IFF_RUNNING:
605 /*
606 * If interface is marked down and it is running,
607 * then stop and disable it.
608 */
609 (*ifp->if_stop)(ifp, 1);
610 break;
611 case IFF_UP:
612 /*
613 * If interface is marked up and it is stopped, then
614 * start it.
615 */
616 error = (*ifp->if_init)(ifp);
617 break;
618 default:
619 break;
620 }
621 break;
622
623 case SIOCSIFMTU:
624 if ((error = ifioctl_common(ifp, cmd, data)) == ENETRESET)
625 error = 0;
626 break;
627
628 default:
629 error = ifioctl_common(ifp, cmd, data);
630 break;
631 }
632
633 splx(s);
634
635 return error;
636 }
637
638 /*
639 * bridge_lookup_member:
640 *
641 * Lookup a bridge member interface.
642 */
643 static struct bridge_iflist *
644 bridge_lookup_member(struct bridge_softc *sc, const char *name, struct psref *psref)
645 {
646 struct bridge_iflist *bif;
647 struct ifnet *ifp;
648 int s;
649
650 BRIDGE_PSZ_RENTER(s);
651
652 BRIDGE_IFLIST_READER_FOREACH(bif, sc) {
653 ifp = bif->bif_ifp;
654 if (strcmp(ifp->if_xname, name) == 0)
655 break;
656 }
657 if (bif != NULL)
658 bridge_acquire_member(sc, bif, psref);
659
660 BRIDGE_PSZ_REXIT(s);
661
662 return bif;
663 }
664
665 /*
666 * bridge_lookup_member_if:
667 *
668 * Lookup a bridge member interface by ifnet*.
669 */
670 static struct bridge_iflist *
671 bridge_lookup_member_if(struct bridge_softc *sc, struct ifnet *member_ifp,
672 struct psref *psref)
673 {
674 struct bridge_iflist *bif;
675 int s;
676
677 BRIDGE_PSZ_RENTER(s);
678
679 bif = member_ifp->if_bridgeif;
680 if (bif != NULL) {
681 psref_acquire(psref, &bif->bif_psref,
682 bridge_psref_class);
683 }
684
685 BRIDGE_PSZ_REXIT(s);
686
687 return bif;
688 }
689
690 static void
691 bridge_acquire_member(struct bridge_softc *sc, struct bridge_iflist *bif,
692 struct psref *psref)
693 {
694
695 psref_acquire(psref, &bif->bif_psref, bridge_psref_class);
696 }
697
698 /*
699 * bridge_release_member:
700 *
701 * Release the specified member interface.
702 */
703 static void
704 bridge_release_member(struct bridge_softc *sc, struct bridge_iflist *bif,
705 struct psref *psref)
706 {
707
708 psref_release(psref, &bif->bif_psref, bridge_psref_class);
709 }
710
711 /*
712 * bridge_delete_member:
713 *
714 * Delete the specified member interface.
715 */
716 static void
717 bridge_delete_member(struct bridge_softc *sc, struct bridge_iflist *bif)
718 {
719 struct ifnet *ifs = bif->bif_ifp;
720
721 KASSERT(BRIDGE_LOCKED(sc));
722
723 ifs->_if_input = ether_input;
724 ifs->if_bridge = NULL;
725 ifs->if_bridgeif = NULL;
726
727 PSLIST_WRITER_REMOVE(bif, bif_next);
728 BRIDGE_PSZ_PERFORM(sc);
729 BRIDGE_UNLOCK(sc);
730
731 psref_target_destroy(&bif->bif_psref, bridge_psref_class);
732
733 PSLIST_ENTRY_DESTROY(bif, bif_next);
734 kmem_free(bif, sizeof(*bif));
735
736 BRIDGE_LOCK(sc);
737 }
738
739 static int
740 bridge_ioctl_add(struct bridge_softc *sc, void *arg)
741 {
742 struct ifbreq *req = arg;
743 struct bridge_iflist *bif = NULL;
744 struct ifnet *ifs;
745 int error = 0;
746 struct psref psref;
747
748 ifs = if_get(req->ifbr_ifsname, &psref);
749 if (ifs == NULL)
750 return ENOENT;
751
752 if (ifs->if_bridge == sc) {
753 error = EEXIST;
754 goto out;
755 }
756
757 if (ifs->if_bridge != NULL) {
758 error = EBUSY;
759 goto out;
760 }
761
762 if (ifs->_if_input != ether_input) {
763 error = EINVAL;
764 goto out;
765 }
766
767 /* FIXME: doesn't work with non-IFF_SIMPLEX interfaces */
768 if ((ifs->if_flags & IFF_SIMPLEX) == 0) {
769 error = EINVAL;
770 goto out;
771 }
772
773 bif = kmem_alloc(sizeof(*bif), KM_SLEEP);
774
775 switch (ifs->if_type) {
776 case IFT_ETHER:
777 if (sc->sc_if.if_mtu != ifs->if_mtu) {
778 error = EINVAL;
779 goto out;
780 }
781 /* FALLTHROUGH */
782 case IFT_L2TP:
783 IFNET_LOCK(ifs);
784 error = ether_enable_vlan_mtu(ifs);
785 IFNET_UNLOCK(ifs);
786 if (error > 0)
787 goto out;
788 /*
789 * Place the interface into promiscuous mode.
790 */
791 error = ifpromisc(ifs, 1);
792 if (error)
793 goto out;
794 break;
795 default:
796 error = EINVAL;
797 goto out;
798 }
799
800 bif->bif_ifp = ifs;
801 bif->bif_flags = IFBIF_LEARNING | IFBIF_DISCOVER;
802 bif->bif_priority = BSTP_DEFAULT_PORT_PRIORITY;
803 bif->bif_path_cost = BSTP_DEFAULT_PATH_COST;
804 PSLIST_ENTRY_INIT(bif, bif_next);
805 psref_target_init(&bif->bif_psref, bridge_psref_class);
806
807 BRIDGE_LOCK(sc);
808
809 ifs->if_bridge = sc;
810 ifs->if_bridgeif = bif;
811 PSLIST_WRITER_INSERT_HEAD(&sc->sc_iflist_psref.bip_iflist, bif, bif_next);
812 ifs->_if_input = bridge_input;
813
814 BRIDGE_UNLOCK(sc);
815
816 if (sc->sc_if.if_flags & IFF_RUNNING)
817 bstp_initialization(sc);
818 else
819 bstp_stop(sc);
820
821 out:
822 if_put(ifs, &psref);
823 if (error) {
824 if (bif != NULL)
825 kmem_free(bif, sizeof(*bif));
826 }
827 return error;
828 }
829
830 static int
831 bridge_ioctl_del(struct bridge_softc *sc, void *arg)
832 {
833 struct ifbreq *req = arg;
834 const char *name = req->ifbr_ifsname;
835 struct bridge_iflist *bif;
836 struct ifnet *ifs;
837
838 BRIDGE_LOCK(sc);
839
840 /*
841 * Don't use bridge_lookup_member. We want to get a member
842 * with bif_refs == 0.
843 */
844 BRIDGE_IFLIST_WRITER_FOREACH(bif, sc) {
845 ifs = bif->bif_ifp;
846 if (strcmp(ifs->if_xname, name) == 0)
847 break;
848 }
849
850 if (bif == NULL) {
851 BRIDGE_UNLOCK(sc);
852 return ENOENT;
853 }
854
855 bridge_delete_member(sc, bif);
856
857 BRIDGE_UNLOCK(sc);
858
859 switch (ifs->if_type) {
860 case IFT_ETHER:
861 case IFT_L2TP:
862 /*
863 * Take the interface out of promiscuous mode.
864 * Don't call it with holding a spin lock.
865 */
866 (void) ifpromisc(ifs, 0);
867 IFNET_LOCK(ifs);
868 (void) ether_disable_vlan_mtu(ifs);
869 IFNET_UNLOCK(ifs);
870 break;
871 default:
872 #ifdef DIAGNOSTIC
873 panic("bridge_delete_member: impossible");
874 #endif
875 break;
876 }
877
878 bridge_rtdelete(sc, ifs);
879
880 if (sc->sc_if.if_flags & IFF_RUNNING)
881 bstp_initialization(sc);
882
883 return 0;
884 }
885
886 static int
887 bridge_ioctl_gifflags(struct bridge_softc *sc, void *arg)
888 {
889 struct ifbreq *req = arg;
890 struct bridge_iflist *bif;
891 struct psref psref;
892
893 bif = bridge_lookup_member(sc, req->ifbr_ifsname, &psref);
894 if (bif == NULL)
895 return ENOENT;
896
897 req->ifbr_ifsflags = bif->bif_flags;
898 req->ifbr_state = bif->bif_state;
899 req->ifbr_priority = bif->bif_priority;
900 req->ifbr_path_cost = bif->bif_path_cost;
901 req->ifbr_portno = bif->bif_ifp->if_index & 0xff;
902
903 bridge_release_member(sc, bif, &psref);
904
905 return 0;
906 }
907
908 static int
909 bridge_ioctl_sifflags(struct bridge_softc *sc, void *arg)
910 {
911 struct ifbreq *req = arg;
912 struct bridge_iflist *bif;
913 struct psref psref;
914
915 bif = bridge_lookup_member(sc, req->ifbr_ifsname, &psref);
916 if (bif == NULL)
917 return ENOENT;
918
919 if (req->ifbr_ifsflags & IFBIF_STP) {
920 switch (bif->bif_ifp->if_type) {
921 case IFT_ETHER:
922 case IFT_L2TP:
923 /* These can do spanning tree. */
924 break;
925
926 default:
927 /* Nothing else can. */
928 bridge_release_member(sc, bif, &psref);
929 return EINVAL;
930 }
931 }
932
933 bif->bif_flags = req->ifbr_ifsflags;
934
935 bridge_release_member(sc, bif, &psref);
936
937 if (sc->sc_if.if_flags & IFF_RUNNING)
938 bstp_initialization(sc);
939
940 return 0;
941 }
942
943 static int
944 bridge_ioctl_scache(struct bridge_softc *sc, void *arg)
945 {
946 struct ifbrparam *param = arg;
947
948 sc->sc_brtmax = param->ifbrp_csize;
949 bridge_rttrim(sc);
950
951 return 0;
952 }
953
954 static int
955 bridge_ioctl_gcache(struct bridge_softc *sc, void *arg)
956 {
957 struct ifbrparam *param = arg;
958
959 param->ifbrp_csize = sc->sc_brtmax;
960
961 return 0;
962 }
963
964 static int
965 bridge_ioctl_gifs(struct bridge_softc *sc, void *arg)
966 {
967 struct ifbifconf *bifc = arg;
968 struct bridge_iflist *bif;
969 struct ifbreq *breqs;
970 int i, count, error = 0;
971
972 retry:
973 BRIDGE_LOCK(sc);
974 count = 0;
975 BRIDGE_IFLIST_WRITER_FOREACH(bif, sc)
976 count++;
977 BRIDGE_UNLOCK(sc);
978
979 if (count == 0) {
980 bifc->ifbic_len = 0;
981 return 0;
982 }
983
984 if (bifc->ifbic_len == 0 || bifc->ifbic_len < (sizeof(*breqs) * count)) {
985 /* Tell that a larger buffer is needed */
986 bifc->ifbic_len = sizeof(*breqs) * count;
987 return 0;
988 }
989
990 breqs = kmem_alloc(sizeof(*breqs) * count, KM_SLEEP);
991
992 BRIDGE_LOCK(sc);
993
994 i = 0;
995 BRIDGE_IFLIST_WRITER_FOREACH(bif, sc)
996 i++;
997 if (i > count) {
998 /*
999 * The number of members has been increased.
1000 * We need more memory!
1001 */
1002 BRIDGE_UNLOCK(sc);
1003 kmem_free(breqs, sizeof(*breqs) * count);
1004 goto retry;
1005 }
1006
1007 i = 0;
1008 BRIDGE_IFLIST_WRITER_FOREACH(bif, sc) {
1009 struct ifbreq *breq = &breqs[i++];
1010 memset(breq, 0, sizeof(*breq));
1011
1012 strlcpy(breq->ifbr_ifsname, bif->bif_ifp->if_xname,
1013 sizeof(breq->ifbr_ifsname));
1014 breq->ifbr_ifsflags = bif->bif_flags;
1015 breq->ifbr_state = bif->bif_state;
1016 breq->ifbr_priority = bif->bif_priority;
1017 breq->ifbr_path_cost = bif->bif_path_cost;
1018 breq->ifbr_portno = bif->bif_ifp->if_index & 0xff;
1019 }
1020
1021 /* Don't call copyout with holding the mutex */
1022 BRIDGE_UNLOCK(sc);
1023
1024 for (i = 0; i < count; i++) {
1025 error = copyout(&breqs[i], bifc->ifbic_req + i, sizeof(*breqs));
1026 if (error)
1027 break;
1028 }
1029 bifc->ifbic_len = sizeof(*breqs) * i;
1030
1031 kmem_free(breqs, sizeof(*breqs) * count);
1032
1033 return error;
1034 }
1035
1036 static int
1037 bridge_ioctl_rts(struct bridge_softc *sc, void *arg)
1038 {
1039 struct ifbaconf *bac = arg;
1040 struct bridge_rtnode *brt;
1041 struct ifbareq bareq;
1042 int count = 0, error = 0, len;
1043
1044 if (bac->ifbac_len == 0)
1045 return 0;
1046
1047 BRIDGE_RT_LOCK(sc);
1048
1049 len = bac->ifbac_len;
1050 LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) {
1051 if (len < sizeof(bareq))
1052 goto out;
1053 memset(&bareq, 0, sizeof(bareq));
1054 strlcpy(bareq.ifba_ifsname, brt->brt_ifp->if_xname,
1055 sizeof(bareq.ifba_ifsname));
1056 memcpy(bareq.ifba_dst, brt->brt_addr, sizeof(brt->brt_addr));
1057 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
1058 bareq.ifba_expire = brt->brt_expire - time_uptime;
1059 } else
1060 bareq.ifba_expire = 0;
1061 bareq.ifba_flags = brt->brt_flags;
1062
1063 error = copyout(&bareq, bac->ifbac_req + count, sizeof(bareq));
1064 if (error)
1065 goto out;
1066 count++;
1067 len -= sizeof(bareq);
1068 }
1069 out:
1070 BRIDGE_RT_UNLOCK(sc);
1071
1072 bac->ifbac_len = sizeof(bareq) * count;
1073 return error;
1074 }
1075
1076 static int
1077 bridge_ioctl_saddr(struct bridge_softc *sc, void *arg)
1078 {
1079 struct ifbareq *req = arg;
1080 struct bridge_iflist *bif;
1081 int error;
1082 struct psref psref;
1083
1084 bif = bridge_lookup_member(sc, req->ifba_ifsname, &psref);
1085 if (bif == NULL)
1086 return ENOENT;
1087
1088 error = bridge_rtupdate(sc, req->ifba_dst, bif->bif_ifp, 1,
1089 req->ifba_flags);
1090
1091 bridge_release_member(sc, bif, &psref);
1092
1093 return error;
1094 }
1095
1096 static int
1097 bridge_ioctl_sto(struct bridge_softc *sc, void *arg)
1098 {
1099 struct ifbrparam *param = arg;
1100
1101 sc->sc_brttimeout = param->ifbrp_ctime;
1102
1103 return 0;
1104 }
1105
1106 static int
1107 bridge_ioctl_gto(struct bridge_softc *sc, void *arg)
1108 {
1109 struct ifbrparam *param = arg;
1110
1111 param->ifbrp_ctime = sc->sc_brttimeout;
1112
1113 return 0;
1114 }
1115
1116 static int
1117 bridge_ioctl_daddr(struct bridge_softc *sc, void *arg)
1118 {
1119 struct ifbareq *req = arg;
1120
1121 return (bridge_rtdaddr(sc, req->ifba_dst));
1122 }
1123
1124 static int
1125 bridge_ioctl_flush(struct bridge_softc *sc, void *arg)
1126 {
1127 struct ifbreq *req = arg;
1128
1129 bridge_rtflush(sc, req->ifbr_ifsflags);
1130
1131 return 0;
1132 }
1133
1134 static int
1135 bridge_ioctl_gpri(struct bridge_softc *sc, void *arg)
1136 {
1137 struct ifbrparam *param = arg;
1138
1139 param->ifbrp_prio = sc->sc_bridge_priority;
1140
1141 return 0;
1142 }
1143
1144 static int
1145 bridge_ioctl_spri(struct bridge_softc *sc, void *arg)
1146 {
1147 struct ifbrparam *param = arg;
1148
1149 sc->sc_bridge_priority = param->ifbrp_prio;
1150
1151 if (sc->sc_if.if_flags & IFF_RUNNING)
1152 bstp_initialization(sc);
1153
1154 return 0;
1155 }
1156
1157 static int
1158 bridge_ioctl_ght(struct bridge_softc *sc, void *arg)
1159 {
1160 struct ifbrparam *param = arg;
1161
1162 param->ifbrp_hellotime = sc->sc_bridge_hello_time >> 8;
1163
1164 return 0;
1165 }
1166
1167 static int
1168 bridge_ioctl_sht(struct bridge_softc *sc, void *arg)
1169 {
1170 struct ifbrparam *param = arg;
1171
1172 if (param->ifbrp_hellotime == 0)
1173 return EINVAL;
1174 sc->sc_bridge_hello_time = param->ifbrp_hellotime << 8;
1175
1176 if (sc->sc_if.if_flags & IFF_RUNNING)
1177 bstp_initialization(sc);
1178
1179 return 0;
1180 }
1181
1182 static int
1183 bridge_ioctl_gfd(struct bridge_softc *sc, void *arg)
1184 {
1185 struct ifbrparam *param = arg;
1186
1187 param->ifbrp_fwddelay = sc->sc_bridge_forward_delay >> 8;
1188
1189 return 0;
1190 }
1191
1192 static int
1193 bridge_ioctl_sfd(struct bridge_softc *sc, void *arg)
1194 {
1195 struct ifbrparam *param = arg;
1196
1197 if (param->ifbrp_fwddelay == 0)
1198 return EINVAL;
1199 sc->sc_bridge_forward_delay = param->ifbrp_fwddelay << 8;
1200
1201 if (sc->sc_if.if_flags & IFF_RUNNING)
1202 bstp_initialization(sc);
1203
1204 return 0;
1205 }
1206
1207 static int
1208 bridge_ioctl_gma(struct bridge_softc *sc, void *arg)
1209 {
1210 struct ifbrparam *param = arg;
1211
1212 param->ifbrp_maxage = sc->sc_bridge_max_age >> 8;
1213
1214 return 0;
1215 }
1216
1217 static int
1218 bridge_ioctl_sma(struct bridge_softc *sc, void *arg)
1219 {
1220 struct ifbrparam *param = arg;
1221
1222 if (param->ifbrp_maxage == 0)
1223 return EINVAL;
1224 sc->sc_bridge_max_age = param->ifbrp_maxage << 8;
1225
1226 if (sc->sc_if.if_flags & IFF_RUNNING)
1227 bstp_initialization(sc);
1228
1229 return 0;
1230 }
1231
1232 static int
1233 bridge_ioctl_sifprio(struct bridge_softc *sc, void *arg)
1234 {
1235 struct ifbreq *req = arg;
1236 struct bridge_iflist *bif;
1237 struct psref psref;
1238
1239 bif = bridge_lookup_member(sc, req->ifbr_ifsname, &psref);
1240 if (bif == NULL)
1241 return ENOENT;
1242
1243 bif->bif_priority = req->ifbr_priority;
1244
1245 if (sc->sc_if.if_flags & IFF_RUNNING)
1246 bstp_initialization(sc);
1247
1248 bridge_release_member(sc, bif, &psref);
1249
1250 return 0;
1251 }
1252
1253 #if defined(BRIDGE_IPF)
1254 static int
1255 bridge_ioctl_gfilt(struct bridge_softc *sc, void *arg)
1256 {
1257 struct ifbrparam *param = arg;
1258
1259 param->ifbrp_filter = sc->sc_filter_flags;
1260
1261 return 0;
1262 }
1263
1264 static int
1265 bridge_ioctl_sfilt(struct bridge_softc *sc, void *arg)
1266 {
1267 struct ifbrparam *param = arg;
1268 uint32_t nflags, oflags;
1269
1270 if (param->ifbrp_filter & ~IFBF_FILT_MASK)
1271 return EINVAL;
1272
1273 nflags = param->ifbrp_filter;
1274 oflags = sc->sc_filter_flags;
1275
1276 if ((nflags & IFBF_FILT_USEIPF) && !(oflags & IFBF_FILT_USEIPF)) {
1277 pfil_add_hook((void *)bridge_ipf, NULL, PFIL_IN|PFIL_OUT,
1278 sc->sc_if.if_pfil);
1279 }
1280 if (!(nflags & IFBF_FILT_USEIPF) && (oflags & IFBF_FILT_USEIPF)) {
1281 pfil_remove_hook((void *)bridge_ipf, NULL, PFIL_IN|PFIL_OUT,
1282 sc->sc_if.if_pfil);
1283 }
1284
1285 sc->sc_filter_flags = nflags;
1286
1287 return 0;
1288 }
1289 #endif /* BRIDGE_IPF */
1290
1291 static int
1292 bridge_ioctl_sifcost(struct bridge_softc *sc, void *arg)
1293 {
1294 struct ifbreq *req = arg;
1295 struct bridge_iflist *bif;
1296 struct psref psref;
1297
1298 bif = bridge_lookup_member(sc, req->ifbr_ifsname, &psref);
1299 if (bif == NULL)
1300 return ENOENT;
1301
1302 bif->bif_path_cost = req->ifbr_path_cost;
1303
1304 if (sc->sc_if.if_flags & IFF_RUNNING)
1305 bstp_initialization(sc);
1306
1307 bridge_release_member(sc, bif, &psref);
1308
1309 return 0;
1310 }
1311
1312 /*
1313 * bridge_ifdetach:
1314 *
1315 * Detach an interface from a bridge. Called when a member
1316 * interface is detaching.
1317 */
1318 void
1319 bridge_ifdetach(struct ifnet *ifp)
1320 {
1321 struct bridge_softc *sc = ifp->if_bridge;
1322 struct ifbreq breq;
1323
1324 /* ioctl_lock should prevent this from happening */
1325 KASSERT(sc != NULL);
1326
1327 memset(&breq, 0, sizeof(breq));
1328 strlcpy(breq.ifbr_ifsname, ifp->if_xname, sizeof(breq.ifbr_ifsname));
1329
1330 (void) bridge_ioctl_del(sc, &breq);
1331 }
1332
1333 /*
1334 * bridge_init:
1335 *
1336 * Initialize a bridge interface.
1337 */
1338 static int
1339 bridge_init(struct ifnet *ifp)
1340 {
1341 struct bridge_softc *sc = ifp->if_softc;
1342
1343 KASSERT((ifp->if_flags & IFF_RUNNING) == 0);
1344
1345 callout_reset(&sc->sc_brcallout, bridge_rtable_prune_period * hz,
1346 bridge_timer, sc);
1347 bstp_initialization(sc);
1348
1349 ifp->if_flags |= IFF_RUNNING;
1350 return 0;
1351 }
1352
1353 /*
1354 * bridge_stop:
1355 *
1356 * Stop the bridge interface.
1357 */
1358 static void
1359 bridge_stop(struct ifnet *ifp, int disable)
1360 {
1361 struct bridge_softc *sc = ifp->if_softc;
1362
1363 KASSERT((ifp->if_flags & IFF_RUNNING) != 0);
1364 ifp->if_flags &= ~IFF_RUNNING;
1365
1366 callout_halt(&sc->sc_brcallout, NULL);
1367 workqueue_wait(sc->sc_rtage_wq, &sc->sc_rtage_wk);
1368 bstp_stop(sc);
1369 bridge_rtflush(sc, IFBF_FLUSHDYN);
1370 }
1371
1372 /*
1373 * bridge_enqueue:
1374 *
1375 * Enqueue a packet on a bridge member interface.
1376 */
1377 void
1378 bridge_enqueue(struct bridge_softc *sc, struct ifnet *dst_ifp, struct mbuf *m,
1379 int runfilt)
1380 {
1381 int len, error;
1382 short mflags;
1383
1384 /*
1385 * Clear any in-bound checksum flags for this packet.
1386 */
1387 m->m_pkthdr.csum_flags = 0;
1388
1389 if (runfilt) {
1390 if (pfil_run_hooks(sc->sc_if.if_pfil, &m,
1391 dst_ifp, PFIL_OUT) != 0) {
1392 if (m != NULL)
1393 m_freem(m);
1394 return;
1395 }
1396 if (m == NULL)
1397 return;
1398 }
1399
1400 #ifdef ALTQ
1401 KERNEL_LOCK(1, NULL);
1402 /*
1403 * If ALTQ is enabled on the member interface, do
1404 * classification; the queueing discipline might
1405 * not require classification, but might require
1406 * the address family/header pointer in the pktattr.
1407 */
1408 if (ALTQ_IS_ENABLED(&dst_ifp->if_snd)) {
1409 /* XXX IFT_ETHER */
1410 altq_etherclassify(&dst_ifp->if_snd, m);
1411 }
1412 KERNEL_UNLOCK_ONE(NULL);
1413 #endif /* ALTQ */
1414
1415 len = m->m_pkthdr.len;
1416 mflags = m->m_flags;
1417
1418 error = if_transmit_lock(dst_ifp, m);
1419 if (error) {
1420 /* mbuf is already freed */
1421 sc->sc_if.if_oerrors++;
1422 return;
1423 }
1424
1425 sc->sc_if.if_opackets++;
1426 sc->sc_if.if_obytes += len;
1427 if (mflags & M_MCAST)
1428 sc->sc_if.if_omcasts++;
1429 }
1430
1431 /*
1432 * bridge_output:
1433 *
1434 * Send output from a bridge member interface. This
1435 * performs the bridging function for locally originated
1436 * packets.
1437 *
1438 * The mbuf has the Ethernet header already attached. We must
1439 * enqueue or free the mbuf before returning.
1440 */
1441 int
1442 bridge_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *sa,
1443 const struct rtentry *rt)
1444 {
1445 struct ether_header *eh;
1446 struct ifnet *dst_if;
1447 struct bridge_softc *sc;
1448 int s;
1449
1450 /*
1451 * bridge_output() is called from ether_output(), furthermore
1452 * ifp argument doesn't point to bridge(4). So, don't assert
1453 * IFEF_MPSAFE here.
1454 */
1455
1456 if (m->m_len < ETHER_HDR_LEN) {
1457 m = m_pullup(m, ETHER_HDR_LEN);
1458 if (m == NULL)
1459 return 0;
1460 }
1461
1462 eh = mtod(m, struct ether_header *);
1463 sc = ifp->if_bridge;
1464
1465 if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
1466 if (memcmp(etherbroadcastaddr,
1467 eh->ether_dhost, ETHER_ADDR_LEN) == 0)
1468 m->m_flags |= M_BCAST;
1469 else
1470 m->m_flags |= M_MCAST;
1471 }
1472
1473 /*
1474 * If bridge is down, but the original output interface is up,
1475 * go ahead and send out that interface. Otherwise, the packet
1476 * is dropped below.
1477 */
1478 if (__predict_false(sc == NULL) ||
1479 (sc->sc_if.if_flags & IFF_RUNNING) == 0) {
1480 dst_if = ifp;
1481 goto sendunicast;
1482 }
1483
1484 /*
1485 * If the packet is a multicast, or we don't know a better way to
1486 * get there, send to all interfaces.
1487 */
1488 if ((m->m_flags & (M_MCAST | M_BCAST)) != 0)
1489 dst_if = NULL;
1490 else
1491 dst_if = bridge_rtlookup(sc, eh->ether_dhost);
1492 if (dst_if == NULL) {
1493 /* XXX Should call bridge_broadcast, but there are locking
1494 * issues which need resolving first. */
1495 struct bridge_iflist *bif;
1496 struct mbuf *mc;
1497 bool used = false;
1498
1499 BRIDGE_PSZ_RENTER(s);
1500 BRIDGE_IFLIST_READER_FOREACH(bif, sc) {
1501 struct psref psref;
1502
1503 bridge_acquire_member(sc, bif, &psref);
1504 BRIDGE_PSZ_REXIT(s);
1505
1506 dst_if = bif->bif_ifp;
1507 if ((dst_if->if_flags & IFF_RUNNING) == 0)
1508 goto next;
1509
1510 /*
1511 * If this is not the original output interface,
1512 * and the interface is participating in spanning
1513 * tree, make sure the port is in a state that
1514 * allows forwarding.
1515 */
1516 if (dst_if != ifp &&
1517 (bif->bif_flags & IFBIF_STP) != 0) {
1518 switch (bif->bif_state) {
1519 case BSTP_IFSTATE_BLOCKING:
1520 case BSTP_IFSTATE_LISTENING:
1521 case BSTP_IFSTATE_DISABLED:
1522 goto next;
1523 }
1524 }
1525
1526 if (PSLIST_READER_NEXT(bif, struct bridge_iflist,
1527 bif_next) == NULL &&
1528 ((m->m_flags & (M_MCAST | M_BCAST)) == 0 ||
1529 dst_if == ifp))
1530 {
1531 used = true;
1532 mc = m;
1533 } else {
1534 mc = m_copym(m, 0, M_COPYALL, M_NOWAIT);
1535 if (mc == NULL) {
1536 sc->sc_if.if_oerrors++;
1537 goto next;
1538 }
1539 }
1540
1541 bridge_enqueue(sc, dst_if, mc, 0);
1542
1543 if ((m->m_flags & (M_MCAST | M_BCAST)) != 0 &&
1544 dst_if != ifp)
1545 {
1546 if (PSLIST_READER_NEXT(bif,
1547 struct bridge_iflist, bif_next) == NULL)
1548 {
1549 used = true;
1550 mc = m;
1551 } else {
1552 mc = m_copym(m, 0, M_COPYALL,
1553 M_DONTWAIT);
1554 if (mc == NULL) {
1555 sc->sc_if.if_oerrors++;
1556 goto next;
1557 }
1558 }
1559
1560 m_set_rcvif(mc, dst_if);
1561 mc->m_flags &= ~M_PROMISC;
1562
1563 #ifndef NET_MPSAFE
1564 s = splsoftnet();
1565 #endif
1566 ether_input(dst_if, mc);
1567 #ifndef NET_MPSAFE
1568 splx(s);
1569 #endif
1570 }
1571
1572 next:
1573 BRIDGE_PSZ_RENTER(s);
1574 bridge_release_member(sc, bif, &psref);
1575
1576 /* Guarantee we don't re-enter the loop as we already
1577 * decided we're at the end. */
1578 if (used)
1579 break;
1580 }
1581 BRIDGE_PSZ_REXIT(s);
1582
1583 if (!used)
1584 m_freem(m);
1585 return 0;
1586 }
1587
1588 sendunicast:
1589 /*
1590 * XXX Spanning tree consideration here?
1591 */
1592
1593 if ((dst_if->if_flags & IFF_RUNNING) == 0) {
1594 m_freem(m);
1595 return 0;
1596 }
1597
1598 bridge_enqueue(sc, dst_if, m, 0);
1599
1600 return 0;
1601 }
1602
1603 /*
1604 * bridge_start:
1605 *
1606 * Start output on a bridge.
1607 *
1608 * NOTE: This routine should never be called in this implementation.
1609 */
1610 static void
1611 bridge_start(struct ifnet *ifp)
1612 {
1613
1614 printf("%s: bridge_start() called\n", ifp->if_xname);
1615 }
1616
1617 /*
1618 * bridge_forward:
1619 *
1620 * The forwarding function of the bridge.
1621 */
1622 static void
1623 bridge_forward(struct bridge_softc *sc, struct mbuf *m)
1624 {
1625 struct bridge_iflist *bif;
1626 struct ifnet *src_if, *dst_if;
1627 struct ether_header *eh;
1628 struct psref psref;
1629 struct psref psref_src;
1630 DECLARE_LOCK_VARIABLE;
1631
1632 if ((sc->sc_if.if_flags & IFF_RUNNING) == 0)
1633 return;
1634
1635 src_if = m_get_rcvif_psref(m, &psref_src);
1636 if (src_if == NULL) {
1637 /* Interface is being destroyed? */
1638 m_freem(m);
1639 goto out;
1640 }
1641
1642 sc->sc_if.if_ipackets++;
1643 sc->sc_if.if_ibytes += m->m_pkthdr.len;
1644
1645 /*
1646 * Look up the bridge_iflist.
1647 */
1648 bif = bridge_lookup_member_if(sc, src_if, &psref);
1649 if (bif == NULL) {
1650 /* Interface is not a bridge member (anymore?) */
1651 m_freem(m);
1652 goto out;
1653 }
1654
1655 if (bif->bif_flags & IFBIF_STP) {
1656 switch (bif->bif_state) {
1657 case BSTP_IFSTATE_BLOCKING:
1658 case BSTP_IFSTATE_LISTENING:
1659 case BSTP_IFSTATE_DISABLED:
1660 m_freem(m);
1661 bridge_release_member(sc, bif, &psref);
1662 goto out;
1663 }
1664 }
1665
1666 eh = mtod(m, struct ether_header *);
1667
1668 /*
1669 * If the interface is learning, and the source
1670 * address is valid and not multicast, record
1671 * the address.
1672 */
1673 if ((bif->bif_flags & IFBIF_LEARNING) != 0 &&
1674 ETHER_IS_MULTICAST(eh->ether_shost) == 0 &&
1675 (eh->ether_shost[0] == 0 &&
1676 eh->ether_shost[1] == 0 &&
1677 eh->ether_shost[2] == 0 &&
1678 eh->ether_shost[3] == 0 &&
1679 eh->ether_shost[4] == 0 &&
1680 eh->ether_shost[5] == 0) == 0) {
1681 (void) bridge_rtupdate(sc, eh->ether_shost,
1682 src_if, 0, IFBAF_DYNAMIC);
1683 }
1684
1685 if ((bif->bif_flags & IFBIF_STP) != 0 &&
1686 bif->bif_state == BSTP_IFSTATE_LEARNING) {
1687 m_freem(m);
1688 bridge_release_member(sc, bif, &psref);
1689 goto out;
1690 }
1691
1692 bridge_release_member(sc, bif, &psref);
1693
1694 /*
1695 * At this point, the port either doesn't participate
1696 * in spanning tree or it is in the forwarding state.
1697 */
1698
1699 /*
1700 * If the packet is unicast, destined for someone on
1701 * "this" side of the bridge, drop it.
1702 */
1703 if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) {
1704 dst_if = bridge_rtlookup(sc, eh->ether_dhost);
1705 if (src_if == dst_if) {
1706 m_freem(m);
1707 goto out;
1708 }
1709 } else {
1710 /* ...forward it to all interfaces. */
1711 sc->sc_if.if_imcasts++;
1712 dst_if = NULL;
1713 }
1714
1715 if (pfil_run_hooks(sc->sc_if.if_pfil, &m, src_if, PFIL_IN) != 0) {
1716 if (m != NULL)
1717 m_freem(m);
1718 goto out;
1719 }
1720 if (m == NULL)
1721 goto out;
1722
1723 if (dst_if == NULL) {
1724 bridge_broadcast(sc, src_if, m);
1725 goto out;
1726 }
1727
1728 m_put_rcvif_psref(src_if, &psref_src);
1729 src_if = NULL;
1730
1731 /*
1732 * At this point, we're dealing with a unicast frame
1733 * going to a different interface.
1734 */
1735 if ((dst_if->if_flags & IFF_RUNNING) == 0) {
1736 m_freem(m);
1737 goto out;
1738 }
1739
1740 bif = bridge_lookup_member_if(sc, dst_if, &psref);
1741 if (bif == NULL) {
1742 /* Not a member of the bridge (anymore?) */
1743 m_freem(m);
1744 goto out;
1745 }
1746
1747 if (bif->bif_flags & IFBIF_STP) {
1748 switch (bif->bif_state) {
1749 case BSTP_IFSTATE_DISABLED:
1750 case BSTP_IFSTATE_BLOCKING:
1751 m_freem(m);
1752 bridge_release_member(sc, bif, &psref);
1753 goto out;
1754 }
1755 }
1756
1757 bridge_release_member(sc, bif, &psref);
1758
1759 ACQUIRE_GLOBAL_LOCKS();
1760 bridge_enqueue(sc, dst_if, m, 1);
1761 RELEASE_GLOBAL_LOCKS();
1762 out:
1763 if (src_if != NULL)
1764 m_put_rcvif_psref(src_if, &psref_src);
1765 return;
1766 }
1767
1768 static bool
1769 bstp_state_before_learning(struct bridge_iflist *bif)
1770 {
1771 if (bif->bif_flags & IFBIF_STP) {
1772 switch (bif->bif_state) {
1773 case BSTP_IFSTATE_BLOCKING:
1774 case BSTP_IFSTATE_LISTENING:
1775 case BSTP_IFSTATE_DISABLED:
1776 return true;
1777 }
1778 }
1779 return false;
1780 }
1781
1782 static bool
1783 bridge_ourether(struct bridge_iflist *bif, struct ether_header *eh, int src)
1784 {
1785 uint8_t *ether = src ? eh->ether_shost : eh->ether_dhost;
1786
1787 if (memcmp(CLLADDR(bif->bif_ifp->if_sadl), ether, ETHER_ADDR_LEN) == 0
1788 #if NCARP > 0
1789 || (bif->bif_ifp->if_carp &&
1790 carp_ourether(bif->bif_ifp->if_carp, eh, IFT_ETHER, src) != NULL)
1791 #endif /* NCARP > 0 */
1792 )
1793 return true;
1794
1795 return false;
1796 }
1797
1798 /*
1799 * bridge_input:
1800 *
1801 * Receive input from a member interface. Queue the packet for
1802 * bridging if it is not for us.
1803 */
1804 static void
1805 bridge_input(struct ifnet *ifp, struct mbuf *m)
1806 {
1807 struct bridge_softc *sc = ifp->if_bridge;
1808 struct bridge_iflist *bif;
1809 struct ether_header *eh;
1810 struct psref psref;
1811 int bound;
1812 DECLARE_LOCK_VARIABLE;
1813
1814 KASSERT(!cpu_intr_p());
1815
1816 if (__predict_false(sc == NULL) ||
1817 (sc->sc_if.if_flags & IFF_RUNNING) == 0) {
1818 ACQUIRE_GLOBAL_LOCKS();
1819 ether_input(ifp, m);
1820 RELEASE_GLOBAL_LOCKS();
1821 return;
1822 }
1823
1824 bound = curlwp_bind();
1825 bif = bridge_lookup_member_if(sc, ifp, &psref);
1826 if (bif == NULL) {
1827 curlwp_bindx(bound);
1828 ACQUIRE_GLOBAL_LOCKS();
1829 ether_input(ifp, m);
1830 RELEASE_GLOBAL_LOCKS();
1831 return;
1832 }
1833
1834 eh = mtod(m, struct ether_header *);
1835
1836 if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
1837 if (memcmp(etherbroadcastaddr,
1838 eh->ether_dhost, ETHER_ADDR_LEN) == 0)
1839 m->m_flags |= M_BCAST;
1840 else
1841 m->m_flags |= M_MCAST;
1842 }
1843
1844 /*
1845 * A 'fast' path for packets addressed to interfaces that are
1846 * part of this bridge.
1847 */
1848 if (!(m->m_flags & (M_BCAST|M_MCAST)) &&
1849 !bstp_state_before_learning(bif)) {
1850 struct bridge_iflist *_bif;
1851 struct ifnet *_ifp = NULL;
1852 int s;
1853 struct psref _psref;
1854
1855 BRIDGE_PSZ_RENTER(s);
1856 BRIDGE_IFLIST_READER_FOREACH(_bif, sc) {
1857 /* It is destined for us. */
1858 if (bridge_ourether(_bif, eh, 0)) {
1859 bridge_acquire_member(sc, _bif, &_psref);
1860 BRIDGE_PSZ_REXIT(s);
1861 if (_bif->bif_flags & IFBIF_LEARNING)
1862 (void) bridge_rtupdate(sc,
1863 eh->ether_shost, ifp, 0, IFBAF_DYNAMIC);
1864 m_set_rcvif(m, _bif->bif_ifp);
1865 _ifp = _bif->bif_ifp;
1866 bridge_release_member(sc, _bif, &_psref);
1867 goto out;
1868 }
1869
1870 /* We just received a packet that we sent out. */
1871 if (bridge_ourether(_bif, eh, 1))
1872 break;
1873 }
1874 BRIDGE_PSZ_REXIT(s);
1875 out:
1876
1877 if (_bif != NULL) {
1878 bridge_release_member(sc, bif, &psref);
1879 curlwp_bindx(bound);
1880 if (_ifp != NULL) {
1881 m->m_flags &= ~M_PROMISC;
1882 ACQUIRE_GLOBAL_LOCKS();
1883 ether_input(_ifp, m);
1884 RELEASE_GLOBAL_LOCKS();
1885 } else
1886 m_freem(m);
1887 return;
1888 }
1889 }
1890
1891 /* Tap off 802.1D packets; they do not get forwarded. */
1892 if (bif->bif_flags & IFBIF_STP &&
1893 memcmp(eh->ether_dhost, bstp_etheraddr, ETHER_ADDR_LEN) == 0) {
1894 bstp_input(sc, bif, m);
1895 bridge_release_member(sc, bif, &psref);
1896 curlwp_bindx(bound);
1897 return;
1898 }
1899
1900 /*
1901 * A normal switch would discard the packet here, but that's not what
1902 * we've done historically. This also prevents some obnoxious behaviour.
1903 */
1904 if (bstp_state_before_learning(bif)) {
1905 bridge_release_member(sc, bif, &psref);
1906 curlwp_bindx(bound);
1907 ACQUIRE_GLOBAL_LOCKS();
1908 ether_input(ifp, m);
1909 RELEASE_GLOBAL_LOCKS();
1910 return;
1911 }
1912
1913 bridge_release_member(sc, bif, &psref);
1914
1915 bridge_forward(sc, m);
1916
1917 curlwp_bindx(bound);
1918 }
1919
1920 /*
1921 * bridge_broadcast:
1922 *
1923 * Send a frame to all interfaces that are members of
1924 * the bridge, except for the one on which the packet
1925 * arrived.
1926 */
1927 static void
1928 bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if,
1929 struct mbuf *m)
1930 {
1931 struct bridge_iflist *bif;
1932 struct mbuf *mc;
1933 struct ifnet *dst_if;
1934 bool bmcast;
1935 int s;
1936 DECLARE_LOCK_VARIABLE;
1937
1938 bmcast = m->m_flags & (M_BCAST|M_MCAST);
1939
1940 BRIDGE_PSZ_RENTER(s);
1941 BRIDGE_IFLIST_READER_FOREACH(bif, sc) {
1942 struct psref psref;
1943
1944 bridge_acquire_member(sc, bif, &psref);
1945 BRIDGE_PSZ_REXIT(s);
1946
1947 dst_if = bif->bif_ifp;
1948
1949 if (bif->bif_flags & IFBIF_STP) {
1950 switch (bif->bif_state) {
1951 case BSTP_IFSTATE_BLOCKING:
1952 case BSTP_IFSTATE_DISABLED:
1953 goto next;
1954 }
1955 }
1956
1957 if ((bif->bif_flags & IFBIF_DISCOVER) == 0 && !bmcast)
1958 goto next;
1959
1960 if ((dst_if->if_flags & IFF_RUNNING) == 0)
1961 goto next;
1962
1963 if (dst_if != src_if) {
1964 mc = m_copym(m, 0, M_COPYALL, M_DONTWAIT);
1965 if (mc == NULL) {
1966 sc->sc_if.if_oerrors++;
1967 goto next;
1968 }
1969 ACQUIRE_GLOBAL_LOCKS();
1970 bridge_enqueue(sc, dst_if, mc, 1);
1971 RELEASE_GLOBAL_LOCKS();
1972 }
1973
1974 if (bmcast) {
1975 mc = m_copym(m, 0, M_COPYALL, M_DONTWAIT);
1976 if (mc == NULL) {
1977 sc->sc_if.if_oerrors++;
1978 goto next;
1979 }
1980
1981 m_set_rcvif(mc, dst_if);
1982 mc->m_flags &= ~M_PROMISC;
1983
1984 ACQUIRE_GLOBAL_LOCKS();
1985 ether_input(dst_if, mc);
1986 RELEASE_GLOBAL_LOCKS();
1987 }
1988 next:
1989 BRIDGE_PSZ_RENTER(s);
1990 bridge_release_member(sc, bif, &psref);
1991 }
1992 BRIDGE_PSZ_REXIT(s);
1993
1994 m_freem(m);
1995 }
1996
1997 static int
1998 bridge_rtalloc(struct bridge_softc *sc, const uint8_t *dst,
1999 struct bridge_rtnode **brtp)
2000 {
2001 struct bridge_rtnode *brt;
2002 int error;
2003
2004 if (sc->sc_brtcnt >= sc->sc_brtmax)
2005 return ENOSPC;
2006
2007 /*
2008 * Allocate a new bridge forwarding node, and
2009 * initialize the expiration time and Ethernet
2010 * address.
2011 */
2012 brt = pool_get(&bridge_rtnode_pool, PR_NOWAIT);
2013 if (brt == NULL)
2014 return ENOMEM;
2015
2016 memset(brt, 0, sizeof(*brt));
2017 brt->brt_expire = time_uptime + sc->sc_brttimeout;
2018 brt->brt_flags = IFBAF_DYNAMIC;
2019 memcpy(brt->brt_addr, dst, ETHER_ADDR_LEN);
2020
2021 BRIDGE_RT_LOCK(sc);
2022 error = bridge_rtnode_insert(sc, brt);
2023 BRIDGE_RT_UNLOCK(sc);
2024
2025 if (error != 0) {
2026 pool_put(&bridge_rtnode_pool, brt);
2027 return error;
2028 }
2029
2030 *brtp = brt;
2031 return 0;
2032 }
2033
2034 /*
2035 * bridge_rtupdate:
2036 *
2037 * Add a bridge routing entry.
2038 */
2039 static int
2040 bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst,
2041 struct ifnet *dst_if, int setflags, uint8_t flags)
2042 {
2043 struct bridge_rtnode *brt;
2044 int s;
2045
2046 again:
2047 /*
2048 * A route for this destination might already exist. If so,
2049 * update it, otherwise create a new one.
2050 */
2051 BRIDGE_RT_RENTER(s);
2052 brt = bridge_rtnode_lookup(sc, dst);
2053
2054 if (brt != NULL) {
2055 brt->brt_ifp = dst_if;
2056 if (setflags) {
2057 brt->brt_flags = flags;
2058 if (flags & IFBAF_STATIC)
2059 brt->brt_expire = 0;
2060 else
2061 brt->brt_expire = time_uptime + sc->sc_brttimeout;
2062 } else {
2063 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
2064 brt->brt_expire = time_uptime + sc->sc_brttimeout;
2065 }
2066 }
2067 BRIDGE_RT_REXIT(s);
2068
2069 if (brt == NULL) {
2070 int r;
2071
2072 r = bridge_rtalloc(sc, dst, &brt);
2073 if (r != 0)
2074 return r;
2075 goto again;
2076 }
2077
2078 return 0;
2079 }
2080
2081 /*
2082 * bridge_rtlookup:
2083 *
2084 * Lookup the destination interface for an address.
2085 */
2086 static struct ifnet *
2087 bridge_rtlookup(struct bridge_softc *sc, const uint8_t *addr)
2088 {
2089 struct bridge_rtnode *brt;
2090 struct ifnet *ifs = NULL;
2091 int s;
2092
2093 BRIDGE_RT_RENTER(s);
2094 brt = bridge_rtnode_lookup(sc, addr);
2095 if (brt != NULL)
2096 ifs = brt->brt_ifp;
2097 BRIDGE_RT_REXIT(s);
2098
2099 return ifs;
2100 }
2101
2102 typedef bool (*bridge_iterate_cb_t)
2103 (struct bridge_softc *, struct bridge_rtnode *, bool *, void *);
2104
2105 /*
2106 * bridge_rtlist_iterate_remove:
2107 *
2108 * It iterates on sc->sc_rtlist and removes rtnodes of it which func
2109 * callback judges to remove. Removals of rtnodes are done in a manner
2110 * of pserialize. To this end, all kmem_* operations are placed out of
2111 * mutexes.
2112 */
2113 static void
2114 bridge_rtlist_iterate_remove(struct bridge_softc *sc, bridge_iterate_cb_t func, void *arg)
2115 {
2116 struct bridge_rtnode *brt, *nbrt;
2117 struct bridge_rtnode **brt_list;
2118 int i, count;
2119
2120 retry:
2121 count = sc->sc_brtcnt;
2122 if (count == 0)
2123 return;
2124 brt_list = kmem_alloc(sizeof(*brt_list) * count, KM_SLEEP);
2125
2126 BRIDGE_RT_LOCK(sc);
2127 if (__predict_false(sc->sc_brtcnt > count)) {
2128 /* The rtnodes increased, we need more memory */
2129 BRIDGE_RT_UNLOCK(sc);
2130 kmem_free(brt_list, sizeof(*brt_list) * count);
2131 goto retry;
2132 }
2133
2134 i = 0;
2135 LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
2136 bool need_break = false;
2137 if (func(sc, brt, &need_break, arg)) {
2138 bridge_rtnode_remove(sc, brt);
2139 brt_list[i++] = brt;
2140 }
2141 if (need_break)
2142 break;
2143 }
2144
2145 if (i > 0)
2146 BRIDGE_RT_PSZ_PERFORM(sc);
2147 BRIDGE_RT_UNLOCK(sc);
2148
2149 while (--i >= 0)
2150 bridge_rtnode_destroy(brt_list[i]);
2151
2152 kmem_free(brt_list, sizeof(*brt_list) * count);
2153 }
2154
2155 static bool
2156 bridge_rttrim0_cb(struct bridge_softc *sc, struct bridge_rtnode *brt,
2157 bool *need_break, void *arg)
2158 {
2159 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
2160 /* Take into account of the subsequent removal */
2161 if ((sc->sc_brtcnt - 1) <= sc->sc_brtmax)
2162 *need_break = true;
2163 return true;
2164 } else
2165 return false;
2166 }
2167
2168 static void
2169 bridge_rttrim0(struct bridge_softc *sc)
2170 {
2171 bridge_rtlist_iterate_remove(sc, bridge_rttrim0_cb, NULL);
2172 }
2173
2174 /*
2175 * bridge_rttrim:
2176 *
2177 * Trim the routine table so that we have a number
2178 * of routing entries less than or equal to the
2179 * maximum number.
2180 */
2181 static void
2182 bridge_rttrim(struct bridge_softc *sc)
2183 {
2184
2185 /* Make sure we actually need to do this. */
2186 if (sc->sc_brtcnt <= sc->sc_brtmax)
2187 return;
2188
2189 /* Force an aging cycle; this might trim enough addresses. */
2190 bridge_rtage(sc);
2191 if (sc->sc_brtcnt <= sc->sc_brtmax)
2192 return;
2193
2194 bridge_rttrim0(sc);
2195
2196 return;
2197 }
2198
2199 /*
2200 * bridge_timer:
2201 *
2202 * Aging timer for the bridge.
2203 */
2204 static void
2205 bridge_timer(void *arg)
2206 {
2207 struct bridge_softc *sc = arg;
2208
2209 workqueue_enqueue(sc->sc_rtage_wq, &sc->sc_rtage_wk, NULL);
2210 }
2211
2212 static void
2213 bridge_rtage_work(struct work *wk, void *arg)
2214 {
2215 struct bridge_softc *sc = arg;
2216
2217 KASSERT(wk == &sc->sc_rtage_wk);
2218
2219 bridge_rtage(sc);
2220
2221 if (sc->sc_if.if_flags & IFF_RUNNING)
2222 callout_reset(&sc->sc_brcallout,
2223 bridge_rtable_prune_period * hz, bridge_timer, sc);
2224 }
2225
2226 static bool
2227 bridge_rtage_cb(struct bridge_softc *sc, struct bridge_rtnode *brt,
2228 bool *need_break, void *arg)
2229 {
2230 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
2231 time_uptime >= brt->brt_expire)
2232 return true;
2233 else
2234 return false;
2235 }
2236
2237 /*
2238 * bridge_rtage:
2239 *
2240 * Perform an aging cycle.
2241 */
2242 static void
2243 bridge_rtage(struct bridge_softc *sc)
2244 {
2245 bridge_rtlist_iterate_remove(sc, bridge_rtage_cb, NULL);
2246 }
2247
2248
2249 static bool
2250 bridge_rtflush_cb(struct bridge_softc *sc, struct bridge_rtnode *brt,
2251 bool *need_break, void *arg)
2252 {
2253 int full = *(int*)arg;
2254
2255 if (full || (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
2256 return true;
2257 else
2258 return false;
2259 }
2260
2261 /*
2262 * bridge_rtflush:
2263 *
2264 * Remove all dynamic addresses from the bridge.
2265 */
2266 static void
2267 bridge_rtflush(struct bridge_softc *sc, int full)
2268 {
2269 bridge_rtlist_iterate_remove(sc, bridge_rtflush_cb, &full);
2270 }
2271
2272 /*
2273 * bridge_rtdaddr:
2274 *
2275 * Remove an address from the table.
2276 */
2277 static int
2278 bridge_rtdaddr(struct bridge_softc *sc, const uint8_t *addr)
2279 {
2280 struct bridge_rtnode *brt;
2281
2282 BRIDGE_RT_LOCK(sc);
2283 if ((brt = bridge_rtnode_lookup(sc, addr)) == NULL) {
2284 BRIDGE_RT_UNLOCK(sc);
2285 return ENOENT;
2286 }
2287 bridge_rtnode_remove(sc, brt);
2288 BRIDGE_RT_PSZ_PERFORM(sc);
2289 BRIDGE_RT_UNLOCK(sc);
2290
2291 bridge_rtnode_destroy(brt);
2292
2293 return 0;
2294 }
2295
2296 /*
2297 * bridge_rtdelete:
2298 *
2299 * Delete routes to a speicifc member interface.
2300 */
2301 static void
2302 bridge_rtdelete(struct bridge_softc *sc, struct ifnet *ifp)
2303 {
2304 struct bridge_rtnode *brt;
2305
2306 BRIDGE_RT_LOCK(sc);
2307 LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) {
2308 if (brt->brt_ifp == ifp)
2309 break;
2310 }
2311 if (brt == NULL) {
2312 BRIDGE_RT_UNLOCK(sc);
2313 return;
2314 }
2315 bridge_rtnode_remove(sc, brt);
2316 BRIDGE_RT_PSZ_PERFORM(sc);
2317 BRIDGE_RT_UNLOCK(sc);
2318
2319 bridge_rtnode_destroy(brt);
2320 }
2321
2322 /*
2323 * bridge_rtable_init:
2324 *
2325 * Initialize the route table for this bridge.
2326 */
2327 static void
2328 bridge_rtable_init(struct bridge_softc *sc)
2329 {
2330 int i;
2331
2332 sc->sc_rthash = kmem_alloc(sizeof(*sc->sc_rthash) * BRIDGE_RTHASH_SIZE,
2333 KM_SLEEP);
2334
2335 for (i = 0; i < BRIDGE_RTHASH_SIZE; i++)
2336 LIST_INIT(&sc->sc_rthash[i]);
2337
2338 sc->sc_rthash_key = cprng_fast32();
2339
2340 LIST_INIT(&sc->sc_rtlist);
2341
2342 sc->sc_rtlist_psz = pserialize_create();
2343 sc->sc_rtlist_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET);
2344 }
2345
2346 /*
2347 * bridge_rtable_fini:
2348 *
2349 * Deconstruct the route table for this bridge.
2350 */
2351 static void
2352 bridge_rtable_fini(struct bridge_softc *sc)
2353 {
2354
2355 kmem_free(sc->sc_rthash, sizeof(*sc->sc_rthash) * BRIDGE_RTHASH_SIZE);
2356 if (sc->sc_rtlist_lock)
2357 mutex_obj_free(sc->sc_rtlist_lock);
2358 if (sc->sc_rtlist_psz)
2359 pserialize_destroy(sc->sc_rtlist_psz);
2360 }
2361
2362 /*
2363 * The following hash function is adapted from "Hash Functions" by Bob Jenkins
2364 * ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
2365 */
2366 #define mix(a, b, c) \
2367 do { \
2368 a -= b; a -= c; a ^= (c >> 13); \
2369 b -= c; b -= a; b ^= (a << 8); \
2370 c -= a; c -= b; c ^= (b >> 13); \
2371 a -= b; a -= c; a ^= (c >> 12); \
2372 b -= c; b -= a; b ^= (a << 16); \
2373 c -= a; c -= b; c ^= (b >> 5); \
2374 a -= b; a -= c; a ^= (c >> 3); \
2375 b -= c; b -= a; b ^= (a << 10); \
2376 c -= a; c -= b; c ^= (b >> 15); \
2377 } while (/*CONSTCOND*/0)
2378
2379 static inline uint32_t
2380 bridge_rthash(struct bridge_softc *sc, const uint8_t *addr)
2381 {
2382 uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = sc->sc_rthash_key;
2383
2384 b += addr[5] << 8;
2385 b += addr[4];
2386 a += addr[3] << 24;
2387 a += addr[2] << 16;
2388 a += addr[1] << 8;
2389 a += addr[0];
2390
2391 mix(a, b, c);
2392
2393 return (c & BRIDGE_RTHASH_MASK);
2394 }
2395
2396 #undef mix
2397
2398 /*
2399 * bridge_rtnode_lookup:
2400 *
2401 * Look up a bridge route node for the specified destination.
2402 */
2403 static struct bridge_rtnode *
2404 bridge_rtnode_lookup(struct bridge_softc *sc, const uint8_t *addr)
2405 {
2406 struct bridge_rtnode *brt;
2407 uint32_t hash;
2408 int dir;
2409
2410 hash = bridge_rthash(sc, addr);
2411 LIST_FOREACH(brt, &sc->sc_rthash[hash], brt_hash) {
2412 dir = memcmp(addr, brt->brt_addr, ETHER_ADDR_LEN);
2413 if (dir == 0)
2414 return brt;
2415 if (dir > 0)
2416 return NULL;
2417 }
2418
2419 return NULL;
2420 }
2421
2422 /*
2423 * bridge_rtnode_insert:
2424 *
2425 * Insert the specified bridge node into the route table. We
2426 * assume the entry is not already in the table.
2427 */
2428 static int
2429 bridge_rtnode_insert(struct bridge_softc *sc, struct bridge_rtnode *brt)
2430 {
2431 struct bridge_rtnode *lbrt;
2432 uint32_t hash;
2433 int dir;
2434
2435 KASSERT(BRIDGE_RT_LOCKED(sc));
2436
2437 hash = bridge_rthash(sc, brt->brt_addr);
2438
2439 lbrt = LIST_FIRST(&sc->sc_rthash[hash]);
2440 if (lbrt == NULL) {
2441 LIST_INSERT_HEAD(&sc->sc_rthash[hash], brt, brt_hash);
2442 goto out;
2443 }
2444
2445 do {
2446 dir = memcmp(brt->brt_addr, lbrt->brt_addr, ETHER_ADDR_LEN);
2447 if (dir == 0)
2448 return EEXIST;
2449 if (dir > 0) {
2450 LIST_INSERT_BEFORE(lbrt, brt, brt_hash);
2451 goto out;
2452 }
2453 if (LIST_NEXT(lbrt, brt_hash) == NULL) {
2454 LIST_INSERT_AFTER(lbrt, brt, brt_hash);
2455 goto out;
2456 }
2457 lbrt = LIST_NEXT(lbrt, brt_hash);
2458 } while (lbrt != NULL);
2459
2460 #ifdef DIAGNOSTIC
2461 panic("bridge_rtnode_insert: impossible");
2462 #endif
2463
2464 out:
2465 LIST_INSERT_HEAD(&sc->sc_rtlist, brt, brt_list);
2466 sc->sc_brtcnt++;
2467
2468 return 0;
2469 }
2470
2471 /*
2472 * bridge_rtnode_remove:
2473 *
2474 * Remove a bridge rtnode from the rthash and the rtlist of a bridge.
2475 */
2476 static void
2477 bridge_rtnode_remove(struct bridge_softc *sc, struct bridge_rtnode *brt)
2478 {
2479
2480 KASSERT(BRIDGE_RT_LOCKED(sc));
2481
2482 LIST_REMOVE(brt, brt_hash);
2483 LIST_REMOVE(brt, brt_list);
2484 sc->sc_brtcnt--;
2485 }
2486
2487 /*
2488 * bridge_rtnode_destroy:
2489 *
2490 * Destroy a bridge rtnode.
2491 */
2492 static void
2493 bridge_rtnode_destroy(struct bridge_rtnode *brt)
2494 {
2495
2496 pool_put(&bridge_rtnode_pool, brt);
2497 }
2498
2499 #if defined(BRIDGE_IPF)
2500 extern pfil_head_t *inet_pfil_hook; /* XXX */
2501 extern pfil_head_t *inet6_pfil_hook; /* XXX */
2502
2503 /*
2504 * Send bridge packets through IPF if they are one of the types IPF can deal
2505 * with, or if they are ARP or REVARP. (IPF will pass ARP and REVARP without
2506 * question.)
2507 */
2508 static int
2509 bridge_ipf(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
2510 {
2511 int snap, error;
2512 struct ether_header *eh1, eh2;
2513 struct llc llc1;
2514 uint16_t ether_type;
2515
2516 snap = 0;
2517 error = -1; /* Default error if not error == 0 */
2518 eh1 = mtod(*mp, struct ether_header *);
2519 ether_type = ntohs(eh1->ether_type);
2520
2521 /*
2522 * Check for SNAP/LLC.
2523 */
2524 if (ether_type < ETHERMTU) {
2525 struct llc *llc2 = (struct llc *)(eh1 + 1);
2526
2527 if ((*mp)->m_len >= ETHER_HDR_LEN + 8 &&
2528 llc2->llc_dsap == LLC_SNAP_LSAP &&
2529 llc2->llc_ssap == LLC_SNAP_LSAP &&
2530 llc2->llc_control == LLC_UI) {
2531 ether_type = htons(llc2->llc_un.type_snap.ether_type);
2532 snap = 1;
2533 }
2534 }
2535
2536 /*
2537 * If we're trying to filter bridge traffic, don't look at anything
2538 * other than IP and ARP traffic. If the filter doesn't understand
2539 * IPv6, don't allow IPv6 through the bridge either. This is lame
2540 * since if we really wanted, say, an AppleTalk filter, we are hosed,
2541 * but of course we don't have an AppleTalk filter to begin with.
2542 * (Note that since IPF doesn't understand ARP it will pass *ALL*
2543 * ARP traffic.)
2544 */
2545 switch (ether_type) {
2546 case ETHERTYPE_ARP:
2547 case ETHERTYPE_REVARP:
2548 return 0; /* Automatically pass */
2549 case ETHERTYPE_IP:
2550 # ifdef INET6
2551 case ETHERTYPE_IPV6:
2552 # endif /* INET6 */
2553 break;
2554 default:
2555 goto bad;
2556 }
2557
2558 /* Strip off the Ethernet header and keep a copy. */
2559 m_copydata(*mp, 0, ETHER_HDR_LEN, (void *) &eh2);
2560 m_adj(*mp, ETHER_HDR_LEN);
2561
2562 /* Strip off snap header, if present */
2563 if (snap) {
2564 m_copydata(*mp, 0, sizeof(struct llc), (void *) &llc1);
2565 m_adj(*mp, sizeof(struct llc));
2566 }
2567
2568 /*
2569 * Check basic packet sanity and run IPF through pfil.
2570 */
2571 KASSERT(!cpu_intr_p());
2572 switch (ether_type)
2573 {
2574 case ETHERTYPE_IP :
2575 error = bridge_ip_checkbasic(mp);
2576 if (error == 0)
2577 error = pfil_run_hooks(inet_pfil_hook, mp, ifp, dir);
2578 break;
2579 # ifdef INET6
2580 case ETHERTYPE_IPV6 :
2581 error = bridge_ip6_checkbasic(mp);
2582 if (error == 0)
2583 error = pfil_run_hooks(inet6_pfil_hook, mp, ifp, dir);
2584 break;
2585 # endif
2586 default :
2587 error = 0;
2588 break;
2589 }
2590
2591 if (*mp == NULL)
2592 return error;
2593 if (error != 0)
2594 goto bad;
2595
2596 error = -1;
2597
2598 /*
2599 * Finally, put everything back the way it was and return
2600 */
2601 if (snap) {
2602 M_PREPEND(*mp, sizeof(struct llc), M_DONTWAIT);
2603 if (*mp == NULL)
2604 return error;
2605 bcopy(&llc1, mtod(*mp, void *), sizeof(struct llc));
2606 }
2607
2608 M_PREPEND(*mp, ETHER_HDR_LEN, M_DONTWAIT);
2609 if (*mp == NULL)
2610 return error;
2611 bcopy(&eh2, mtod(*mp, void *), ETHER_HDR_LEN);
2612
2613 return 0;
2614
2615 bad:
2616 m_freem(*mp);
2617 *mp = NULL;
2618 return error;
2619 }
2620
2621 /*
2622 * Perform basic checks on header size since
2623 * IPF assumes ip_input has already processed
2624 * it for it. Cut-and-pasted from ip_input.c.
2625 * Given how simple the IPv6 version is,
2626 * does the IPv4 version really need to be
2627 * this complicated?
2628 *
2629 * XXX Should we update ipstat here, or not?
2630 * XXX Right now we update ipstat but not
2631 * XXX csum_counter.
2632 */
2633 static int
2634 bridge_ip_checkbasic(struct mbuf **mp)
2635 {
2636 struct mbuf *m = *mp;
2637 struct ip *ip;
2638 int len, hlen;
2639
2640 if (*mp == NULL)
2641 return -1;
2642
2643 if (IP_HDR_ALIGNED_P(mtod(m, void *)) == 0) {
2644 if ((m = m_copyup(m, sizeof(struct ip),
2645 (max_linkhdr + 3) & ~3)) == NULL) {
2646 /* XXXJRT new stat, please */
2647 ip_statinc(IP_STAT_TOOSMALL);
2648 goto bad;
2649 }
2650 } else if (__predict_false(m->m_len < sizeof (struct ip))) {
2651 if ((m = m_pullup(m, sizeof (struct ip))) == NULL) {
2652 ip_statinc(IP_STAT_TOOSMALL);
2653 goto bad;
2654 }
2655 }
2656 ip = mtod(m, struct ip *);
2657 if (ip == NULL) goto bad;
2658
2659 if (ip->ip_v != IPVERSION) {
2660 ip_statinc(IP_STAT_BADVERS);
2661 goto bad;
2662 }
2663 hlen = ip->ip_hl << 2;
2664 if (hlen < sizeof(struct ip)) { /* minimum header length */
2665 ip_statinc(IP_STAT_BADHLEN);
2666 goto bad;
2667 }
2668 if (hlen > m->m_len) {
2669 if ((m = m_pullup(m, hlen)) == 0) {
2670 ip_statinc(IP_STAT_BADHLEN);
2671 goto bad;
2672 }
2673 ip = mtod(m, struct ip *);
2674 if (ip == NULL) goto bad;
2675 }
2676
2677 switch (m->m_pkthdr.csum_flags &
2678 ((m_get_rcvif_NOMPSAFE(m)->if_csum_flags_rx & M_CSUM_IPv4) |
2679 M_CSUM_IPv4_BAD)) {
2680 case M_CSUM_IPv4|M_CSUM_IPv4_BAD:
2681 /* INET_CSUM_COUNTER_INCR(&ip_hwcsum_bad); */
2682 goto bad;
2683
2684 case M_CSUM_IPv4:
2685 /* Checksum was okay. */
2686 /* INET_CSUM_COUNTER_INCR(&ip_hwcsum_ok); */
2687 break;
2688
2689 default:
2690 /* Must compute it ourselves. */
2691 /* INET_CSUM_COUNTER_INCR(&ip_swcsum); */
2692 if (in_cksum(m, hlen) != 0)
2693 goto bad;
2694 break;
2695 }
2696
2697 /* Retrieve the packet length. */
2698 len = ntohs(ip->ip_len);
2699
2700 /*
2701 * Check for additional length bogosity
2702 */
2703 if (len < hlen) {
2704 ip_statinc(IP_STAT_BADLEN);
2705 goto bad;
2706 }
2707
2708 /*
2709 * Check that the amount of data in the buffers
2710 * is as at least much as the IP header would have us expect.
2711 * Drop packet if shorter than we expect.
2712 */
2713 if (m->m_pkthdr.len < len) {
2714 ip_statinc(IP_STAT_TOOSHORT);
2715 goto bad;
2716 }
2717
2718 /* Checks out, proceed */
2719 *mp = m;
2720 return 0;
2721
2722 bad:
2723 *mp = m;
2724 return -1;
2725 }
2726
2727 # ifdef INET6
2728 /*
2729 * Same as above, but for IPv6.
2730 * Cut-and-pasted from ip6_input.c.
2731 * XXX Should we update ip6stat, or not?
2732 */
2733 static int
2734 bridge_ip6_checkbasic(struct mbuf **mp)
2735 {
2736 struct mbuf *m = *mp;
2737 struct ip6_hdr *ip6;
2738
2739 /*
2740 * If the IPv6 header is not aligned, slurp it up into a new
2741 * mbuf with space for link headers, in the event we forward
2742 * it. Otherwise, if it is aligned, make sure the entire base
2743 * IPv6 header is in the first mbuf of the chain.
2744 */
2745 if (IP6_HDR_ALIGNED_P(mtod(m, void *)) == 0) {
2746 struct ifnet *inifp = m_get_rcvif_NOMPSAFE(m);
2747 if ((m = m_copyup(m, sizeof(struct ip6_hdr),
2748 (max_linkhdr + 3) & ~3)) == NULL) {
2749 /* XXXJRT new stat, please */
2750 ip6_statinc(IP6_STAT_TOOSMALL);
2751 in6_ifstat_inc(inifp, ifs6_in_hdrerr);
2752 goto bad;
2753 }
2754 } else if (__predict_false(m->m_len < sizeof(struct ip6_hdr))) {
2755 struct ifnet *inifp = m_get_rcvif_NOMPSAFE(m);
2756 if ((m = m_pullup(m, sizeof(struct ip6_hdr))) == NULL) {
2757 ip6_statinc(IP6_STAT_TOOSMALL);
2758 in6_ifstat_inc(inifp, ifs6_in_hdrerr);
2759 goto bad;
2760 }
2761 }
2762
2763 ip6 = mtod(m, struct ip6_hdr *);
2764
2765 if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
2766 ip6_statinc(IP6_STAT_BADVERS);
2767 in6_ifstat_inc(m_get_rcvif_NOMPSAFE(m), ifs6_in_hdrerr);
2768 goto bad;
2769 }
2770
2771 /* Checks out, proceed */
2772 *mp = m;
2773 return 0;
2774
2775 bad:
2776 *mp = m;
2777 return -1;
2778 }
2779 # endif /* INET6 */
2780 #endif /* BRIDGE_IPF */
2781