if_bridge.c revision 1.91.2.2 1 /* $NetBSD: if_bridge.c,v 1.91.2.2 2015/06/06 14:40:25 skrll Exp $ */
2
3 /*
4 * Copyright 2001 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Copyright (c) 1999, 2000 Jason L. Wright (jason (at) thought.net)
40 * All rights reserved.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. All advertising materials mentioning features or use of this software
51 * must display the following acknowledgement:
52 * This product includes software developed by Jason L. Wright
53 * 4. The name of the author may not be used to endorse or promote products
54 * derived from this software without specific prior written permission.
55 *
56 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
57 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
58 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
59 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
60 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
61 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
62 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
64 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
65 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
66 * POSSIBILITY OF SUCH DAMAGE.
67 *
68 * OpenBSD: if_bridge.c,v 1.60 2001/06/15 03:38:33 itojun Exp
69 */
70
71 /*
72 * Network interface bridge support.
73 *
74 * TODO:
75 *
76 * - Currently only supports Ethernet-like interfaces (Ethernet,
77 * 802.11, VLANs on Ethernet, etc.) Figure out a nice way
78 * to bridge other types of interfaces (FDDI-FDDI, and maybe
79 * consider heterogenous bridges).
80 */
81
82 #include <sys/cdefs.h>
83 __KERNEL_RCSID(0, "$NetBSD: if_bridge.c,v 1.91.2.2 2015/06/06 14:40:25 skrll Exp $");
84
85 #ifdef _KERNEL_OPT
86 #include "opt_bridge_ipf.h"
87 #include "opt_inet.h"
88 #endif /* _KERNEL_OPT */
89
90 #include <sys/param.h>
91 #include <sys/kernel.h>
92 #include <sys/mbuf.h>
93 #include <sys/queue.h>
94 #include <sys/socket.h>
95 #include <sys/socketvar.h> /* for softnet_lock */
96 #include <sys/sockio.h>
97 #include <sys/systm.h>
98 #include <sys/proc.h>
99 #include <sys/pool.h>
100 #include <sys/kauth.h>
101 #include <sys/cpu.h>
102 #include <sys/cprng.h>
103 #include <sys/mutex.h>
104 #include <sys/kmem.h>
105
106 #include <net/bpf.h>
107 #include <net/if.h>
108 #include <net/if_dl.h>
109 #include <net/if_types.h>
110 #include <net/if_llc.h>
111 #include <net/pktqueue.h>
112
113 #include <net/if_ether.h>
114 #include <net/if_bridgevar.h>
115
116 #if defined(BRIDGE_IPF)
117 /* Used for bridge_ip[6]_checkbasic */
118 #include <netinet/in.h>
119 #include <netinet/in_systm.h>
120 #include <netinet/ip.h>
121 #include <netinet/ip_var.h>
122 #include <netinet/ip_private.h> /* XXX */
123
124 #include <netinet/ip6.h>
125 #include <netinet6/in6_var.h>
126 #include <netinet6/ip6_var.h>
127 #include <netinet6/ip6_private.h> /* XXX */
128 #endif /* BRIDGE_IPF */
129
130 /*
131 * Size of the route hash table. Must be a power of two.
132 */
133 #ifndef BRIDGE_RTHASH_SIZE
134 #define BRIDGE_RTHASH_SIZE 1024
135 #endif
136
137 #define BRIDGE_RTHASH_MASK (BRIDGE_RTHASH_SIZE - 1)
138
139 #include "carp.h"
140 #if NCARP > 0
141 #include <netinet/in.h>
142 #include <netinet/in_var.h>
143 #include <netinet/ip_carp.h>
144 #endif
145
146 __CTASSERT(sizeof(struct ifbifconf) == sizeof(struct ifbaconf));
147 __CTASSERT(offsetof(struct ifbifconf, ifbic_len) == offsetof(struct ifbaconf, ifbac_len));
148 __CTASSERT(offsetof(struct ifbifconf, ifbic_buf) == offsetof(struct ifbaconf, ifbac_buf));
149
150 /*
151 * Maximum number of addresses to cache.
152 */
153 #ifndef BRIDGE_RTABLE_MAX
154 #define BRIDGE_RTABLE_MAX 100
155 #endif
156
157 /*
158 * Spanning tree defaults.
159 */
160 #define BSTP_DEFAULT_MAX_AGE (20 * 256)
161 #define BSTP_DEFAULT_HELLO_TIME (2 * 256)
162 #define BSTP_DEFAULT_FORWARD_DELAY (15 * 256)
163 #define BSTP_DEFAULT_HOLD_TIME (1 * 256)
164 #define BSTP_DEFAULT_BRIDGE_PRIORITY 0x8000
165 #define BSTP_DEFAULT_PORT_PRIORITY 0x80
166 #define BSTP_DEFAULT_PATH_COST 55
167
168 /*
169 * Timeout (in seconds) for entries learned dynamically.
170 */
171 #ifndef BRIDGE_RTABLE_TIMEOUT
172 #define BRIDGE_RTABLE_TIMEOUT (20 * 60) /* same as ARP */
173 #endif
174
175 /*
176 * Number of seconds between walks of the route list.
177 */
178 #ifndef BRIDGE_RTABLE_PRUNE_PERIOD
179 #define BRIDGE_RTABLE_PRUNE_PERIOD (5 * 60)
180 #endif
181
182 #define BRIDGE_RT_INTR_LOCK(_sc) mutex_enter((_sc)->sc_rtlist_intr_lock)
183 #define BRIDGE_RT_INTR_UNLOCK(_sc) mutex_exit((_sc)->sc_rtlist_intr_lock)
184 #define BRIDGE_RT_INTR_LOCKED(_sc) mutex_owned((_sc)->sc_rtlist_intr_lock)
185
186 #define BRIDGE_RT_LOCK(_sc) if ((_sc)->sc_rtlist_lock) \
187 mutex_enter((_sc)->sc_rtlist_lock)
188 #define BRIDGE_RT_UNLOCK(_sc) if ((_sc)->sc_rtlist_lock) \
189 mutex_exit((_sc)->sc_rtlist_lock)
190 #define BRIDGE_RT_LOCKED(_sc) (!(_sc)->sc_rtlist_lock || \
191 mutex_owned((_sc)->sc_rtlist_lock))
192
193 #define BRIDGE_RT_PSZ_PERFORM(_sc) \
194 if ((_sc)->sc_rtlist_psz != NULL) \
195 pserialize_perform((_sc)->sc_rtlist_psz);
196
197 #ifdef BRIDGE_MPSAFE
198 #define BRIDGE_RT_RENTER(__s) do { \
199 if (!cpu_intr_p()) \
200 __s = pserialize_read_enter(); \
201 else \
202 __s = splhigh(); \
203 } while (0)
204 #define BRIDGE_RT_REXIT(__s) do { \
205 if (!cpu_intr_p()) \
206 pserialize_read_exit(__s); \
207 else \
208 splx(__s); \
209 } while (0)
210 #else /* BRIDGE_MPSAFE */
211 #define BRIDGE_RT_RENTER(__s) do { __s = 0; } while (0)
212 #define BRIDGE_RT_REXIT(__s) do { (void)__s; } while (0)
213 #endif /* BRIDGE_MPSAFE */
214
215 int bridge_rtable_prune_period = BRIDGE_RTABLE_PRUNE_PERIOD;
216
217 static struct pool bridge_rtnode_pool;
218 static struct work bridge_rtage_wk;
219
220 void bridgeattach(int);
221
222 static int bridge_clone_create(struct if_clone *, int);
223 static int bridge_clone_destroy(struct ifnet *);
224
225 static int bridge_ioctl(struct ifnet *, u_long, void *);
226 static int bridge_init(struct ifnet *);
227 static void bridge_stop(struct ifnet *, int);
228 static void bridge_start(struct ifnet *);
229
230 static void bridge_input(struct ifnet *, struct mbuf *);
231 static void bridge_forward(void *);
232
233 static void bridge_timer(void *);
234
235 static void bridge_broadcast(struct bridge_softc *, struct ifnet *,
236 struct mbuf *);
237
238 static int bridge_rtupdate(struct bridge_softc *, const uint8_t *,
239 struct ifnet *, int, uint8_t);
240 static struct ifnet *bridge_rtlookup(struct bridge_softc *, const uint8_t *);
241 static void bridge_rttrim(struct bridge_softc *);
242 static void bridge_rtage(struct bridge_softc *);
243 static void bridge_rtage_work(struct work *, void *);
244 static void bridge_rtflush(struct bridge_softc *, int);
245 static int bridge_rtdaddr(struct bridge_softc *, const uint8_t *);
246 static void bridge_rtdelete(struct bridge_softc *, struct ifnet *ifp);
247
248 static void bridge_rtable_init(struct bridge_softc *);
249 static void bridge_rtable_fini(struct bridge_softc *);
250
251 static struct bridge_rtnode *bridge_rtnode_lookup(struct bridge_softc *,
252 const uint8_t *);
253 static int bridge_rtnode_insert(struct bridge_softc *,
254 struct bridge_rtnode *);
255 static void bridge_rtnode_remove(struct bridge_softc *,
256 struct bridge_rtnode *);
257 static void bridge_rtnode_destroy(struct bridge_rtnode *);
258
259 static struct bridge_iflist *bridge_lookup_member(struct bridge_softc *,
260 const char *name);
261 static struct bridge_iflist *bridge_lookup_member_if(struct bridge_softc *,
262 struct ifnet *ifp);
263 static void bridge_release_member(struct bridge_softc *, struct bridge_iflist *);
264 static void bridge_delete_member(struct bridge_softc *,
265 struct bridge_iflist *);
266 static struct bridge_iflist *bridge_try_hold_bif(struct bridge_iflist *);
267
268 static int bridge_ioctl_add(struct bridge_softc *, void *);
269 static int bridge_ioctl_del(struct bridge_softc *, void *);
270 static int bridge_ioctl_gifflags(struct bridge_softc *, void *);
271 static int bridge_ioctl_sifflags(struct bridge_softc *, void *);
272 static int bridge_ioctl_scache(struct bridge_softc *, void *);
273 static int bridge_ioctl_gcache(struct bridge_softc *, void *);
274 static int bridge_ioctl_gifs(struct bridge_softc *, void *);
275 static int bridge_ioctl_rts(struct bridge_softc *, void *);
276 static int bridge_ioctl_saddr(struct bridge_softc *, void *);
277 static int bridge_ioctl_sto(struct bridge_softc *, void *);
278 static int bridge_ioctl_gto(struct bridge_softc *, void *);
279 static int bridge_ioctl_daddr(struct bridge_softc *, void *);
280 static int bridge_ioctl_flush(struct bridge_softc *, void *);
281 static int bridge_ioctl_gpri(struct bridge_softc *, void *);
282 static int bridge_ioctl_spri(struct bridge_softc *, void *);
283 static int bridge_ioctl_ght(struct bridge_softc *, void *);
284 static int bridge_ioctl_sht(struct bridge_softc *, void *);
285 static int bridge_ioctl_gfd(struct bridge_softc *, void *);
286 static int bridge_ioctl_sfd(struct bridge_softc *, void *);
287 static int bridge_ioctl_gma(struct bridge_softc *, void *);
288 static int bridge_ioctl_sma(struct bridge_softc *, void *);
289 static int bridge_ioctl_sifprio(struct bridge_softc *, void *);
290 static int bridge_ioctl_sifcost(struct bridge_softc *, void *);
291 #if defined(BRIDGE_IPF)
292 static int bridge_ioctl_gfilt(struct bridge_softc *, void *);
293 static int bridge_ioctl_sfilt(struct bridge_softc *, void *);
294 static int bridge_ipf(void *, struct mbuf **, struct ifnet *, int);
295 static int bridge_ip_checkbasic(struct mbuf **mp);
296 # ifdef INET6
297 static int bridge_ip6_checkbasic(struct mbuf **mp);
298 # endif /* INET6 */
299 #endif /* BRIDGE_IPF */
300
301 static void bridge_sysctl_fwdq_setup(struct sysctllog **clog,
302 struct bridge_softc *sc);
303
304 struct bridge_control {
305 int (*bc_func)(struct bridge_softc *, void *);
306 int bc_argsize;
307 int bc_flags;
308 };
309
310 #define BC_F_COPYIN 0x01 /* copy arguments in */
311 #define BC_F_COPYOUT 0x02 /* copy arguments out */
312 #define BC_F_SUSER 0x04 /* do super-user check */
313 #define BC_F_XLATEIN 0x08 /* xlate arguments in */
314 #define BC_F_XLATEOUT 0x10 /* xlate arguments out */
315
316 static const struct bridge_control bridge_control_table[] = {
317 [BRDGADD] = {bridge_ioctl_add, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
318 [BRDGDEL] = {bridge_ioctl_del, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
319
320 [BRDGGIFFLGS] = {bridge_ioctl_gifflags, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_COPYOUT},
321 [BRDGSIFFLGS] = {bridge_ioctl_sifflags, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
322
323 [BRDGSCACHE] = {bridge_ioctl_scache, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
324 [BRDGGCACHE] = {bridge_ioctl_gcache, sizeof(struct ifbrparam), BC_F_COPYOUT},
325
326 [OBRDGGIFS] = {bridge_ioctl_gifs, sizeof(struct ifbifconf), BC_F_COPYIN|BC_F_COPYOUT},
327 [OBRDGRTS] = {bridge_ioctl_rts, sizeof(struct ifbaconf), BC_F_COPYIN|BC_F_COPYOUT},
328
329 [BRDGSADDR] = {bridge_ioctl_saddr, sizeof(struct ifbareq), BC_F_COPYIN|BC_F_SUSER},
330
331 [BRDGSTO] = {bridge_ioctl_sto, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
332 [BRDGGTO] = {bridge_ioctl_gto, sizeof(struct ifbrparam), BC_F_COPYOUT},
333
334 [BRDGDADDR] = {bridge_ioctl_daddr, sizeof(struct ifbareq), BC_F_COPYIN|BC_F_SUSER},
335
336 [BRDGFLUSH] = {bridge_ioctl_flush, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
337
338 [BRDGGPRI] = {bridge_ioctl_gpri, sizeof(struct ifbrparam), BC_F_COPYOUT},
339 [BRDGSPRI] = {bridge_ioctl_spri, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
340
341 [BRDGGHT] = {bridge_ioctl_ght, sizeof(struct ifbrparam), BC_F_COPYOUT},
342 [BRDGSHT] = {bridge_ioctl_sht, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
343
344 [BRDGGFD] = {bridge_ioctl_gfd, sizeof(struct ifbrparam), BC_F_COPYOUT},
345 [BRDGSFD] = {bridge_ioctl_sfd, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
346
347 [BRDGGMA] = {bridge_ioctl_gma, sizeof(struct ifbrparam), BC_F_COPYOUT},
348 [BRDGSMA] = {bridge_ioctl_sma, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
349
350 [BRDGSIFPRIO] = {bridge_ioctl_sifprio, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
351
352 [BRDGSIFCOST] = {bridge_ioctl_sifcost, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
353 #if defined(BRIDGE_IPF)
354 [BRDGGFILT] = {bridge_ioctl_gfilt, sizeof(struct ifbrparam), BC_F_COPYOUT},
355 [BRDGSFILT] = {bridge_ioctl_sfilt, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
356 #endif /* BRIDGE_IPF */
357 [BRDGGIFS] = {bridge_ioctl_gifs, sizeof(struct ifbifconf), BC_F_XLATEIN|BC_F_XLATEOUT},
358 [BRDGRTS] = {bridge_ioctl_rts, sizeof(struct ifbaconf), BC_F_XLATEIN|BC_F_XLATEOUT},
359 };
360
361 static const int bridge_control_table_size = __arraycount(bridge_control_table);
362
363 static LIST_HEAD(, bridge_softc) bridge_list;
364 static kmutex_t bridge_list_lock;
365
366 static struct if_clone bridge_cloner =
367 IF_CLONE_INITIALIZER("bridge", bridge_clone_create, bridge_clone_destroy);
368
369 /*
370 * bridgeattach:
371 *
372 * Pseudo-device attach routine.
373 */
374 void
375 bridgeattach(int n)
376 {
377
378 pool_init(&bridge_rtnode_pool, sizeof(struct bridge_rtnode),
379 0, 0, 0, "brtpl", NULL, IPL_NET);
380
381 LIST_INIT(&bridge_list);
382 mutex_init(&bridge_list_lock, MUTEX_DEFAULT, IPL_NET);
383 if_clone_attach(&bridge_cloner);
384 }
385
386 /*
387 * bridge_clone_create:
388 *
389 * Create a new bridge instance.
390 */
391 static int
392 bridge_clone_create(struct if_clone *ifc, int unit)
393 {
394 struct bridge_softc *sc;
395 struct ifnet *ifp;
396 int error, flags;
397
398 sc = kmem_zalloc(sizeof(*sc), KM_SLEEP);
399 ifp = &sc->sc_if;
400
401 sc->sc_brtmax = BRIDGE_RTABLE_MAX;
402 sc->sc_brttimeout = BRIDGE_RTABLE_TIMEOUT;
403 sc->sc_bridge_max_age = BSTP_DEFAULT_MAX_AGE;
404 sc->sc_bridge_hello_time = BSTP_DEFAULT_HELLO_TIME;
405 sc->sc_bridge_forward_delay = BSTP_DEFAULT_FORWARD_DELAY;
406 sc->sc_bridge_priority = BSTP_DEFAULT_BRIDGE_PRIORITY;
407 sc->sc_hold_time = BSTP_DEFAULT_HOLD_TIME;
408 sc->sc_filter_flags = 0;
409
410 /* Initialize our routing table. */
411 bridge_rtable_init(sc);
412
413 #ifdef BRIDGE_MPSAFE
414 flags = WQ_MPSAFE;
415 #else
416 flags = 0;
417 #endif
418 error = workqueue_create(&sc->sc_rtage_wq, "bridge_rtage",
419 bridge_rtage_work, sc, PRI_SOFTNET, IPL_SOFTNET, flags);
420 if (error)
421 panic("%s: workqueue_create %d\n", __func__, error);
422
423 callout_init(&sc->sc_brcallout, 0);
424 callout_init(&sc->sc_bstpcallout, 0);
425
426 LIST_INIT(&sc->sc_iflist);
427 #ifdef BRIDGE_MPSAFE
428 sc->sc_iflist_intr_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
429 sc->sc_iflist_psz = pserialize_create();
430 sc->sc_iflist_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET);
431 #else
432 sc->sc_iflist_intr_lock = NULL;
433 sc->sc_iflist_psz = NULL;
434 sc->sc_iflist_lock = NULL;
435 #endif
436 cv_init(&sc->sc_iflist_cv, "if_bridge_cv");
437
438 if_initname(ifp, ifc->ifc_name, unit);
439 ifp->if_softc = sc;
440 ifp->if_mtu = ETHERMTU;
441 ifp->if_ioctl = bridge_ioctl;
442 ifp->if_output = bridge_output;
443 ifp->if_start = bridge_start;
444 ifp->if_stop = bridge_stop;
445 ifp->if_init = bridge_init;
446 ifp->if_type = IFT_BRIDGE;
447 ifp->if_addrlen = 0;
448 ifp->if_dlt = DLT_EN10MB;
449 ifp->if_hdrlen = ETHER_HDR_LEN;
450
451 sc->sc_fwd_pktq = pktq_create(IFQ_MAXLEN, bridge_forward, sc);
452 KASSERT(sc->sc_fwd_pktq != NULL);
453
454 bridge_sysctl_fwdq_setup(&ifp->if_sysctl_log, sc);
455
456 if_attach(ifp);
457
458 if_alloc_sadl(ifp);
459
460 mutex_enter(&bridge_list_lock);
461 LIST_INSERT_HEAD(&bridge_list, sc, sc_list);
462 mutex_exit(&bridge_list_lock);
463
464 return (0);
465 }
466
467 /*
468 * bridge_clone_destroy:
469 *
470 * Destroy a bridge instance.
471 */
472 static int
473 bridge_clone_destroy(struct ifnet *ifp)
474 {
475 struct bridge_softc *sc = ifp->if_softc;
476 struct bridge_iflist *bif;
477 int s;
478
479 /* Must be called during IFF_RUNNING, i.e., before bridge_stop */
480 pktq_barrier(sc->sc_fwd_pktq);
481
482 s = splnet();
483
484 bridge_stop(ifp, 1);
485
486 BRIDGE_LOCK(sc);
487 while ((bif = LIST_FIRST(&sc->sc_iflist)) != NULL)
488 bridge_delete_member(sc, bif);
489 BRIDGE_UNLOCK(sc);
490
491 mutex_enter(&bridge_list_lock);
492 LIST_REMOVE(sc, sc_list);
493 mutex_exit(&bridge_list_lock);
494
495 splx(s);
496
497 if_detach(ifp);
498
499 /* Should be called after if_detach for safe */
500 pktq_flush(sc->sc_fwd_pktq);
501 pktq_destroy(sc->sc_fwd_pktq);
502
503 /* Tear down the routing table. */
504 bridge_rtable_fini(sc);
505
506 cv_destroy(&sc->sc_iflist_cv);
507 if (sc->sc_iflist_intr_lock)
508 mutex_obj_free(sc->sc_iflist_intr_lock);
509
510 if (sc->sc_iflist_psz)
511 pserialize_destroy(sc->sc_iflist_psz);
512 if (sc->sc_iflist_lock)
513 mutex_obj_free(sc->sc_iflist_lock);
514
515 workqueue_destroy(sc->sc_rtage_wq);
516
517 kmem_free(sc, sizeof(*sc));
518
519 return (0);
520 }
521
522 static int
523 bridge_sysctl_fwdq_maxlen(SYSCTLFN_ARGS)
524 {
525 struct sysctlnode node = *rnode;
526 const struct bridge_softc *sc = node.sysctl_data;
527 return sysctl_pktq_maxlen(SYSCTLFN_CALL(rnode), sc->sc_fwd_pktq);
528 }
529
530 #define SYSCTL_BRIDGE_PKTQ(cn, c) \
531 static int \
532 bridge_sysctl_fwdq_##cn(SYSCTLFN_ARGS) \
533 { \
534 struct sysctlnode node = *rnode; \
535 const struct bridge_softc *sc = node.sysctl_data; \
536 return sysctl_pktq_count(SYSCTLFN_CALL(rnode), \
537 sc->sc_fwd_pktq, c); \
538 }
539
540 SYSCTL_BRIDGE_PKTQ(items, PKTQ_NITEMS)
541 SYSCTL_BRIDGE_PKTQ(drops, PKTQ_DROPS)
542
543 static void
544 bridge_sysctl_fwdq_setup(struct sysctllog **clog, struct bridge_softc *sc)
545 {
546 const struct sysctlnode *cnode, *rnode;
547 sysctlfn len_func = NULL, maxlen_func = NULL, drops_func = NULL;
548 const char *ifname = sc->sc_if.if_xname;
549
550 len_func = bridge_sysctl_fwdq_items;
551 maxlen_func = bridge_sysctl_fwdq_maxlen;
552 drops_func = bridge_sysctl_fwdq_drops;
553
554 if (sysctl_createv(clog, 0, NULL, &rnode,
555 CTLFLAG_PERMANENT,
556 CTLTYPE_NODE, "interfaces",
557 SYSCTL_DESCR("Per-interface controls"),
558 NULL, 0, NULL, 0,
559 CTL_NET, CTL_CREATE, CTL_EOL) != 0)
560 goto bad;
561
562 if (sysctl_createv(clog, 0, &rnode, &rnode,
563 CTLFLAG_PERMANENT,
564 CTLTYPE_NODE, ifname,
565 SYSCTL_DESCR("Interface controls"),
566 NULL, 0, NULL, 0,
567 CTL_CREATE, CTL_EOL) != 0)
568 goto bad;
569
570 if (sysctl_createv(clog, 0, &rnode, &rnode,
571 CTLFLAG_PERMANENT,
572 CTLTYPE_NODE, "fwdq",
573 SYSCTL_DESCR("Protocol input queue controls"),
574 NULL, 0, NULL, 0,
575 CTL_CREATE, CTL_EOL) != 0)
576 goto bad;
577
578 if (sysctl_createv(clog, 0, &rnode, &cnode,
579 CTLFLAG_PERMANENT,
580 CTLTYPE_INT, "len",
581 SYSCTL_DESCR("Current forwarding queue length"),
582 len_func, 0, (void *)sc, 0,
583 CTL_CREATE, IFQCTL_LEN, CTL_EOL) != 0)
584 goto bad;
585
586 if (sysctl_createv(clog, 0, &rnode, &cnode,
587 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
588 CTLTYPE_INT, "maxlen",
589 SYSCTL_DESCR("Maximum allowed forwarding queue length"),
590 maxlen_func, 0, (void *)sc, 0,
591 CTL_CREATE, IFQCTL_MAXLEN, CTL_EOL) != 0)
592 goto bad;
593
594 if (sysctl_createv(clog, 0, &rnode, &cnode,
595 CTLFLAG_PERMANENT,
596 CTLTYPE_INT, "drops",
597 SYSCTL_DESCR("Packets dropped due to full forwarding queue"),
598 drops_func, 0, (void *)sc, 0,
599 CTL_CREATE, IFQCTL_DROPS, CTL_EOL) != 0)
600 goto bad;
601
602 return;
603 bad:
604 aprint_error("%s: could not attach sysctl nodes\n", ifname);
605 return;
606 }
607
608 /*
609 * bridge_ioctl:
610 *
611 * Handle a control request from the operator.
612 */
613 static int
614 bridge_ioctl(struct ifnet *ifp, u_long cmd, void *data)
615 {
616 struct bridge_softc *sc = ifp->if_softc;
617 struct lwp *l = curlwp; /* XXX */
618 union {
619 struct ifbreq ifbreq;
620 struct ifbifconf ifbifconf;
621 struct ifbareq ifbareq;
622 struct ifbaconf ifbaconf;
623 struct ifbrparam ifbrparam;
624 } args;
625 struct ifdrv *ifd = (struct ifdrv *) data;
626 const struct bridge_control *bc = NULL; /* XXXGCC */
627 int s, error = 0;
628
629 /* Authorize command before calling splnet(). */
630 switch (cmd) {
631 case SIOCGDRVSPEC:
632 case SIOCSDRVSPEC:
633 if (ifd->ifd_cmd >= bridge_control_table_size
634 || (bc = &bridge_control_table[ifd->ifd_cmd]) == NULL) {
635 error = EINVAL;
636 return error;
637 }
638
639 /* We only care about BC_F_SUSER at this point. */
640 if ((bc->bc_flags & BC_F_SUSER) == 0)
641 break;
642
643 error = kauth_authorize_network(l->l_cred,
644 KAUTH_NETWORK_INTERFACE_BRIDGE,
645 cmd == SIOCGDRVSPEC ?
646 KAUTH_REQ_NETWORK_INTERFACE_BRIDGE_GETPRIV :
647 KAUTH_REQ_NETWORK_INTERFACE_BRIDGE_SETPRIV,
648 ifd, NULL, NULL);
649 if (error)
650 return (error);
651
652 break;
653 }
654
655 s = splnet();
656
657 switch (cmd) {
658 case SIOCGDRVSPEC:
659 case SIOCSDRVSPEC:
660 KASSERT(bc != NULL);
661 if (cmd == SIOCGDRVSPEC &&
662 (bc->bc_flags & (BC_F_COPYOUT|BC_F_XLATEOUT)) == 0) {
663 error = EINVAL;
664 break;
665 }
666 else if (cmd == SIOCSDRVSPEC &&
667 (bc->bc_flags & (BC_F_COPYOUT|BC_F_XLATEOUT)) != 0) {
668 error = EINVAL;
669 break;
670 }
671
672 /* BC_F_SUSER is checked above, before splnet(). */
673
674 if ((bc->bc_flags & (BC_F_XLATEIN|BC_F_XLATEOUT)) == 0
675 && (ifd->ifd_len != bc->bc_argsize
676 || ifd->ifd_len > sizeof(args))) {
677 error = EINVAL;
678 break;
679 }
680
681 memset(&args, 0, sizeof(args));
682 if (bc->bc_flags & BC_F_COPYIN) {
683 error = copyin(ifd->ifd_data, &args, ifd->ifd_len);
684 if (error)
685 break;
686 } else if (bc->bc_flags & BC_F_XLATEIN) {
687 args.ifbifconf.ifbic_len = ifd->ifd_len;
688 args.ifbifconf.ifbic_buf = ifd->ifd_data;
689 }
690
691 error = (*bc->bc_func)(sc, &args);
692 if (error)
693 break;
694
695 if (bc->bc_flags & BC_F_COPYOUT) {
696 error = copyout(&args, ifd->ifd_data, ifd->ifd_len);
697 } else if (bc->bc_flags & BC_F_XLATEOUT) {
698 ifd->ifd_len = args.ifbifconf.ifbic_len;
699 ifd->ifd_data = args.ifbifconf.ifbic_buf;
700 }
701 break;
702
703 case SIOCSIFFLAGS:
704 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
705 break;
706 switch (ifp->if_flags & (IFF_UP|IFF_RUNNING)) {
707 case IFF_RUNNING:
708 /*
709 * If interface is marked down and it is running,
710 * then stop and disable it.
711 */
712 (*ifp->if_stop)(ifp, 1);
713 break;
714 case IFF_UP:
715 /*
716 * If interface is marked up and it is stopped, then
717 * start it.
718 */
719 error = (*ifp->if_init)(ifp);
720 break;
721 default:
722 break;
723 }
724 break;
725
726 case SIOCSIFMTU:
727 if ((error = ifioctl_common(ifp, cmd, data)) == ENETRESET)
728 error = 0;
729 break;
730
731 default:
732 error = ifioctl_common(ifp, cmd, data);
733 break;
734 }
735
736 splx(s);
737
738 return (error);
739 }
740
741 /*
742 * bridge_lookup_member:
743 *
744 * Lookup a bridge member interface.
745 */
746 static struct bridge_iflist *
747 bridge_lookup_member(struct bridge_softc *sc, const char *name)
748 {
749 struct bridge_iflist *bif;
750 struct ifnet *ifp;
751 int s;
752
753 BRIDGE_PSZ_RENTER(s);
754
755 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
756 ifp = bif->bif_ifp;
757 if (strcmp(ifp->if_xname, name) == 0)
758 break;
759 }
760 bif = bridge_try_hold_bif(bif);
761
762 BRIDGE_PSZ_REXIT(s);
763
764 return bif;
765 }
766
767 /*
768 * bridge_lookup_member_if:
769 *
770 * Lookup a bridge member interface by ifnet*.
771 */
772 static struct bridge_iflist *
773 bridge_lookup_member_if(struct bridge_softc *sc, struct ifnet *member_ifp)
774 {
775 struct bridge_iflist *bif;
776 int s;
777
778 BRIDGE_PSZ_RENTER(s);
779
780 bif = member_ifp->if_bridgeif;
781 bif = bridge_try_hold_bif(bif);
782
783 BRIDGE_PSZ_REXIT(s);
784
785 return bif;
786 }
787
788 static struct bridge_iflist *
789 bridge_try_hold_bif(struct bridge_iflist *bif)
790 {
791 #ifdef BRIDGE_MPSAFE
792 if (bif != NULL) {
793 if (bif->bif_waiting)
794 bif = NULL;
795 else
796 atomic_inc_32(&bif->bif_refs);
797 }
798 #endif
799 return bif;
800 }
801
802 /*
803 * bridge_release_member:
804 *
805 * Release the specified member interface.
806 */
807 static void
808 bridge_release_member(struct bridge_softc *sc, struct bridge_iflist *bif)
809 {
810 #ifdef BRIDGE_MPSAFE
811 uint32_t refs;
812
813 refs = atomic_dec_uint_nv(&bif->bif_refs);
814 if (__predict_false(refs == 0 && bif->bif_waiting)) {
815 BRIDGE_INTR_LOCK(sc);
816 cv_broadcast(&sc->sc_iflist_cv);
817 BRIDGE_INTR_UNLOCK(sc);
818 }
819 #else
820 (void)sc;
821 (void)bif;
822 #endif
823 }
824
825 /*
826 * bridge_delete_member:
827 *
828 * Delete the specified member interface.
829 */
830 static void
831 bridge_delete_member(struct bridge_softc *sc, struct bridge_iflist *bif)
832 {
833 struct ifnet *ifs = bif->bif_ifp;
834
835 KASSERT(BRIDGE_LOCKED(sc));
836
837 ifs->if_input = ether_input;
838 ifs->if_bridge = NULL;
839 ifs->if_bridgeif = NULL;
840
841 LIST_REMOVE(bif, bif_next);
842
843 BRIDGE_PSZ_PERFORM(sc);
844
845 BRIDGE_UNLOCK(sc);
846
847 #ifdef BRIDGE_MPSAFE
848 BRIDGE_INTR_LOCK(sc);
849 bif->bif_waiting = true;
850 membar_sync();
851 while (bif->bif_refs > 0) {
852 aprint_debug("%s: cv_wait on iflist\n", __func__);
853 cv_wait(&sc->sc_iflist_cv, sc->sc_iflist_intr_lock);
854 }
855 bif->bif_waiting = false;
856 BRIDGE_INTR_UNLOCK(sc);
857 #endif
858
859 kmem_free(bif, sizeof(*bif));
860
861 BRIDGE_LOCK(sc);
862 }
863
864 static int
865 bridge_ioctl_add(struct bridge_softc *sc, void *arg)
866 {
867 struct ifbreq *req = arg;
868 struct bridge_iflist *bif = NULL;
869 struct ifnet *ifs;
870 int error = 0;
871
872 ifs = ifunit(req->ifbr_ifsname);
873 if (ifs == NULL)
874 return (ENOENT);
875
876 if (sc->sc_if.if_mtu != ifs->if_mtu)
877 return (EINVAL);
878
879 if (ifs->if_bridge == sc)
880 return (EEXIST);
881
882 if (ifs->if_bridge != NULL)
883 return (EBUSY);
884
885 if (ifs->if_input != ether_input)
886 return EINVAL;
887
888 /* FIXME: doesn't work with non-IFF_SIMPLEX interfaces */
889 if ((ifs->if_flags & IFF_SIMPLEX) == 0)
890 return EINVAL;
891
892 bif = kmem_alloc(sizeof(*bif), KM_SLEEP);
893
894 switch (ifs->if_type) {
895 case IFT_ETHER:
896 /*
897 * Place the interface into promiscuous mode.
898 */
899 error = ifpromisc(ifs, 1);
900 if (error)
901 goto out;
902 break;
903 default:
904 error = EINVAL;
905 goto out;
906 }
907
908 bif->bif_ifp = ifs;
909 bif->bif_flags = IFBIF_LEARNING | IFBIF_DISCOVER;
910 bif->bif_priority = BSTP_DEFAULT_PORT_PRIORITY;
911 bif->bif_path_cost = BSTP_DEFAULT_PATH_COST;
912 bif->bif_refs = 0;
913 bif->bif_waiting = false;
914
915 BRIDGE_LOCK(sc);
916
917 ifs->if_bridge = sc;
918 ifs->if_bridgeif = bif;
919 LIST_INSERT_HEAD(&sc->sc_iflist, bif, bif_next);
920 ifs->if_input = bridge_input;
921
922 BRIDGE_UNLOCK(sc);
923
924 if (sc->sc_if.if_flags & IFF_RUNNING)
925 bstp_initialization(sc);
926 else
927 bstp_stop(sc);
928
929 out:
930 if (error) {
931 if (bif != NULL)
932 kmem_free(bif, sizeof(*bif));
933 }
934 return (error);
935 }
936
937 static int
938 bridge_ioctl_del(struct bridge_softc *sc, void *arg)
939 {
940 struct ifbreq *req = arg;
941 const char *name = req->ifbr_ifsname;
942 struct bridge_iflist *bif;
943 struct ifnet *ifs;
944
945 BRIDGE_LOCK(sc);
946
947 /*
948 * Don't use bridge_lookup_member. We want to get a member
949 * with bif_refs == 0.
950 */
951 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
952 ifs = bif->bif_ifp;
953 if (strcmp(ifs->if_xname, name) == 0)
954 break;
955 }
956
957 if (bif == NULL) {
958 BRIDGE_UNLOCK(sc);
959 return ENOENT;
960 }
961
962 bridge_delete_member(sc, bif);
963
964 BRIDGE_UNLOCK(sc);
965
966 switch (ifs->if_type) {
967 case IFT_ETHER:
968 /*
969 * Take the interface out of promiscuous mode.
970 * Don't call it with holding a spin lock.
971 */
972 (void) ifpromisc(ifs, 0);
973 break;
974 default:
975 #ifdef DIAGNOSTIC
976 panic("bridge_delete_member: impossible");
977 #endif
978 break;
979 }
980
981 bridge_rtdelete(sc, ifs);
982
983 if (sc->sc_if.if_flags & IFF_RUNNING)
984 bstp_initialization(sc);
985
986 return 0;
987 }
988
989 static int
990 bridge_ioctl_gifflags(struct bridge_softc *sc, void *arg)
991 {
992 struct ifbreq *req = arg;
993 struct bridge_iflist *bif;
994
995 bif = bridge_lookup_member(sc, req->ifbr_ifsname);
996 if (bif == NULL)
997 return (ENOENT);
998
999 req->ifbr_ifsflags = bif->bif_flags;
1000 req->ifbr_state = bif->bif_state;
1001 req->ifbr_priority = bif->bif_priority;
1002 req->ifbr_path_cost = bif->bif_path_cost;
1003 req->ifbr_portno = bif->bif_ifp->if_index & 0xff;
1004
1005 bridge_release_member(sc, bif);
1006
1007 return (0);
1008 }
1009
1010 static int
1011 bridge_ioctl_sifflags(struct bridge_softc *sc, void *arg)
1012 {
1013 struct ifbreq *req = arg;
1014 struct bridge_iflist *bif;
1015
1016 bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1017 if (bif == NULL)
1018 return (ENOENT);
1019
1020 if (req->ifbr_ifsflags & IFBIF_STP) {
1021 switch (bif->bif_ifp->if_type) {
1022 case IFT_ETHER:
1023 /* These can do spanning tree. */
1024 break;
1025
1026 default:
1027 /* Nothing else can. */
1028 bridge_release_member(sc, bif);
1029 return (EINVAL);
1030 }
1031 }
1032
1033 bif->bif_flags = req->ifbr_ifsflags;
1034
1035 bridge_release_member(sc, bif);
1036
1037 if (sc->sc_if.if_flags & IFF_RUNNING)
1038 bstp_initialization(sc);
1039
1040 return (0);
1041 }
1042
1043 static int
1044 bridge_ioctl_scache(struct bridge_softc *sc, void *arg)
1045 {
1046 struct ifbrparam *param = arg;
1047
1048 sc->sc_brtmax = param->ifbrp_csize;
1049 bridge_rttrim(sc);
1050
1051 return (0);
1052 }
1053
1054 static int
1055 bridge_ioctl_gcache(struct bridge_softc *sc, void *arg)
1056 {
1057 struct ifbrparam *param = arg;
1058
1059 param->ifbrp_csize = sc->sc_brtmax;
1060
1061 return (0);
1062 }
1063
1064 static int
1065 bridge_ioctl_gifs(struct bridge_softc *sc, void *arg)
1066 {
1067 struct ifbifconf *bifc = arg;
1068 struct bridge_iflist *bif;
1069 struct ifbreq *breqs;
1070 int i, count, error = 0;
1071
1072 retry:
1073 BRIDGE_LOCK(sc);
1074 count = 0;
1075 LIST_FOREACH(bif, &sc->sc_iflist, bif_next)
1076 count++;
1077 BRIDGE_UNLOCK(sc);
1078
1079 if (count == 0) {
1080 bifc->ifbic_len = 0;
1081 return 0;
1082 }
1083
1084 if (bifc->ifbic_len == 0 || bifc->ifbic_len < (sizeof(*breqs) * count)) {
1085 /* Tell that a larger buffer is needed */
1086 bifc->ifbic_len = sizeof(*breqs) * count;
1087 return 0;
1088 }
1089
1090 breqs = kmem_alloc(sizeof(*breqs) * count, KM_SLEEP);
1091
1092 BRIDGE_LOCK(sc);
1093
1094 i = 0;
1095 LIST_FOREACH(bif, &sc->sc_iflist, bif_next)
1096 i++;
1097 if (i > count) {
1098 /*
1099 * The number of members has been increased.
1100 * We need more memory!
1101 */
1102 BRIDGE_UNLOCK(sc);
1103 kmem_free(breqs, sizeof(*breqs) * count);
1104 goto retry;
1105 }
1106
1107 i = 0;
1108 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1109 struct ifbreq *breq = &breqs[i++];
1110 memset(breq, 0, sizeof(*breq));
1111
1112 strlcpy(breq->ifbr_ifsname, bif->bif_ifp->if_xname,
1113 sizeof(breq->ifbr_ifsname));
1114 breq->ifbr_ifsflags = bif->bif_flags;
1115 breq->ifbr_state = bif->bif_state;
1116 breq->ifbr_priority = bif->bif_priority;
1117 breq->ifbr_path_cost = bif->bif_path_cost;
1118 breq->ifbr_portno = bif->bif_ifp->if_index & 0xff;
1119 }
1120
1121 /* Don't call copyout with holding the mutex */
1122 BRIDGE_UNLOCK(sc);
1123
1124 for (i = 0; i < count; i++) {
1125 error = copyout(&breqs[i], bifc->ifbic_req + i, sizeof(*breqs));
1126 if (error)
1127 break;
1128 }
1129 bifc->ifbic_len = sizeof(*breqs) * i;
1130
1131 kmem_free(breqs, sizeof(*breqs) * count);
1132
1133 return error;
1134 }
1135
1136 static int
1137 bridge_ioctl_rts(struct bridge_softc *sc, void *arg)
1138 {
1139 struct ifbaconf *bac = arg;
1140 struct bridge_rtnode *brt;
1141 struct ifbareq bareq;
1142 int count = 0, error = 0, len;
1143
1144 if (bac->ifbac_len == 0)
1145 return (0);
1146
1147 BRIDGE_RT_INTR_LOCK(sc);
1148
1149 len = bac->ifbac_len;
1150 LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) {
1151 if (len < sizeof(bareq))
1152 goto out;
1153 memset(&bareq, 0, sizeof(bareq));
1154 strlcpy(bareq.ifba_ifsname, brt->brt_ifp->if_xname,
1155 sizeof(bareq.ifba_ifsname));
1156 memcpy(bareq.ifba_dst, brt->brt_addr, sizeof(brt->brt_addr));
1157 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
1158 bareq.ifba_expire = brt->brt_expire - time_uptime;
1159 } else
1160 bareq.ifba_expire = 0;
1161 bareq.ifba_flags = brt->brt_flags;
1162
1163 error = copyout(&bareq, bac->ifbac_req + count, sizeof(bareq));
1164 if (error)
1165 goto out;
1166 count++;
1167 len -= sizeof(bareq);
1168 }
1169 out:
1170 BRIDGE_RT_INTR_UNLOCK(sc);
1171
1172 bac->ifbac_len = sizeof(bareq) * count;
1173 return (error);
1174 }
1175
1176 static int
1177 bridge_ioctl_saddr(struct bridge_softc *sc, void *arg)
1178 {
1179 struct ifbareq *req = arg;
1180 struct bridge_iflist *bif;
1181 int error;
1182
1183 bif = bridge_lookup_member(sc, req->ifba_ifsname);
1184 if (bif == NULL)
1185 return (ENOENT);
1186
1187 error = bridge_rtupdate(sc, req->ifba_dst, bif->bif_ifp, 1,
1188 req->ifba_flags);
1189
1190 bridge_release_member(sc, bif);
1191
1192 return (error);
1193 }
1194
1195 static int
1196 bridge_ioctl_sto(struct bridge_softc *sc, void *arg)
1197 {
1198 struct ifbrparam *param = arg;
1199
1200 sc->sc_brttimeout = param->ifbrp_ctime;
1201
1202 return (0);
1203 }
1204
1205 static int
1206 bridge_ioctl_gto(struct bridge_softc *sc, void *arg)
1207 {
1208 struct ifbrparam *param = arg;
1209
1210 param->ifbrp_ctime = sc->sc_brttimeout;
1211
1212 return (0);
1213 }
1214
1215 static int
1216 bridge_ioctl_daddr(struct bridge_softc *sc, void *arg)
1217 {
1218 struct ifbareq *req = arg;
1219
1220 return (bridge_rtdaddr(sc, req->ifba_dst));
1221 }
1222
1223 static int
1224 bridge_ioctl_flush(struct bridge_softc *sc, void *arg)
1225 {
1226 struct ifbreq *req = arg;
1227
1228 bridge_rtflush(sc, req->ifbr_ifsflags);
1229
1230 return (0);
1231 }
1232
1233 static int
1234 bridge_ioctl_gpri(struct bridge_softc *sc, void *arg)
1235 {
1236 struct ifbrparam *param = arg;
1237
1238 param->ifbrp_prio = sc->sc_bridge_priority;
1239
1240 return (0);
1241 }
1242
1243 static int
1244 bridge_ioctl_spri(struct bridge_softc *sc, void *arg)
1245 {
1246 struct ifbrparam *param = arg;
1247
1248 sc->sc_bridge_priority = param->ifbrp_prio;
1249
1250 if (sc->sc_if.if_flags & IFF_RUNNING)
1251 bstp_initialization(sc);
1252
1253 return (0);
1254 }
1255
1256 static int
1257 bridge_ioctl_ght(struct bridge_softc *sc, void *arg)
1258 {
1259 struct ifbrparam *param = arg;
1260
1261 param->ifbrp_hellotime = sc->sc_bridge_hello_time >> 8;
1262
1263 return (0);
1264 }
1265
1266 static int
1267 bridge_ioctl_sht(struct bridge_softc *sc, void *arg)
1268 {
1269 struct ifbrparam *param = arg;
1270
1271 if (param->ifbrp_hellotime == 0)
1272 return (EINVAL);
1273 sc->sc_bridge_hello_time = param->ifbrp_hellotime << 8;
1274
1275 if (sc->sc_if.if_flags & IFF_RUNNING)
1276 bstp_initialization(sc);
1277
1278 return (0);
1279 }
1280
1281 static int
1282 bridge_ioctl_gfd(struct bridge_softc *sc, void *arg)
1283 {
1284 struct ifbrparam *param = arg;
1285
1286 param->ifbrp_fwddelay = sc->sc_bridge_forward_delay >> 8;
1287
1288 return (0);
1289 }
1290
1291 static int
1292 bridge_ioctl_sfd(struct bridge_softc *sc, void *arg)
1293 {
1294 struct ifbrparam *param = arg;
1295
1296 if (param->ifbrp_fwddelay == 0)
1297 return (EINVAL);
1298 sc->sc_bridge_forward_delay = param->ifbrp_fwddelay << 8;
1299
1300 if (sc->sc_if.if_flags & IFF_RUNNING)
1301 bstp_initialization(sc);
1302
1303 return (0);
1304 }
1305
1306 static int
1307 bridge_ioctl_gma(struct bridge_softc *sc, void *arg)
1308 {
1309 struct ifbrparam *param = arg;
1310
1311 param->ifbrp_maxage = sc->sc_bridge_max_age >> 8;
1312
1313 return (0);
1314 }
1315
1316 static int
1317 bridge_ioctl_sma(struct bridge_softc *sc, void *arg)
1318 {
1319 struct ifbrparam *param = arg;
1320
1321 if (param->ifbrp_maxage == 0)
1322 return (EINVAL);
1323 sc->sc_bridge_max_age = param->ifbrp_maxage << 8;
1324
1325 if (sc->sc_if.if_flags & IFF_RUNNING)
1326 bstp_initialization(sc);
1327
1328 return (0);
1329 }
1330
1331 static int
1332 bridge_ioctl_sifprio(struct bridge_softc *sc, void *arg)
1333 {
1334 struct ifbreq *req = arg;
1335 struct bridge_iflist *bif;
1336
1337 bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1338 if (bif == NULL)
1339 return (ENOENT);
1340
1341 bif->bif_priority = req->ifbr_priority;
1342
1343 if (sc->sc_if.if_flags & IFF_RUNNING)
1344 bstp_initialization(sc);
1345
1346 bridge_release_member(sc, bif);
1347
1348 return (0);
1349 }
1350
1351 #if defined(BRIDGE_IPF)
1352 static int
1353 bridge_ioctl_gfilt(struct bridge_softc *sc, void *arg)
1354 {
1355 struct ifbrparam *param = arg;
1356
1357 param->ifbrp_filter = sc->sc_filter_flags;
1358
1359 return (0);
1360 }
1361
1362 static int
1363 bridge_ioctl_sfilt(struct bridge_softc *sc, void *arg)
1364 {
1365 struct ifbrparam *param = arg;
1366 uint32_t nflags, oflags;
1367
1368 if (param->ifbrp_filter & ~IFBF_FILT_MASK)
1369 return (EINVAL);
1370
1371 nflags = param->ifbrp_filter;
1372 oflags = sc->sc_filter_flags;
1373
1374 if ((nflags & IFBF_FILT_USEIPF) && !(oflags & IFBF_FILT_USEIPF)) {
1375 pfil_add_hook((void *)bridge_ipf, NULL, PFIL_IN|PFIL_OUT,
1376 sc->sc_if.if_pfil);
1377 }
1378 if (!(nflags & IFBF_FILT_USEIPF) && (oflags & IFBF_FILT_USEIPF)) {
1379 pfil_remove_hook((void *)bridge_ipf, NULL, PFIL_IN|PFIL_OUT,
1380 sc->sc_if.if_pfil);
1381 }
1382
1383 sc->sc_filter_flags = nflags;
1384
1385 return (0);
1386 }
1387 #endif /* BRIDGE_IPF */
1388
1389 static int
1390 bridge_ioctl_sifcost(struct bridge_softc *sc, void *arg)
1391 {
1392 struct ifbreq *req = arg;
1393 struct bridge_iflist *bif;
1394
1395 bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1396 if (bif == NULL)
1397 return (ENOENT);
1398
1399 bif->bif_path_cost = req->ifbr_path_cost;
1400
1401 if (sc->sc_if.if_flags & IFF_RUNNING)
1402 bstp_initialization(sc);
1403
1404 bridge_release_member(sc, bif);
1405
1406 return (0);
1407 }
1408
1409 /*
1410 * bridge_ifdetach:
1411 *
1412 * Detach an interface from a bridge. Called when a member
1413 * interface is detaching.
1414 */
1415 void
1416 bridge_ifdetach(struct ifnet *ifp)
1417 {
1418 struct bridge_softc *sc = ifp->if_bridge;
1419 struct ifbreq breq;
1420
1421 /* ioctl_lock should prevent this from happening */
1422 KASSERT(sc != NULL);
1423
1424 memset(&breq, 0, sizeof(breq));
1425 strlcpy(breq.ifbr_ifsname, ifp->if_xname, sizeof(breq.ifbr_ifsname));
1426
1427 (void) bridge_ioctl_del(sc, &breq);
1428 }
1429
1430 /*
1431 * bridge_init:
1432 *
1433 * Initialize a bridge interface.
1434 */
1435 static int
1436 bridge_init(struct ifnet *ifp)
1437 {
1438 struct bridge_softc *sc = ifp->if_softc;
1439
1440 if (ifp->if_flags & IFF_RUNNING)
1441 return (0);
1442
1443 callout_reset(&sc->sc_brcallout, bridge_rtable_prune_period * hz,
1444 bridge_timer, sc);
1445
1446 ifp->if_flags |= IFF_RUNNING;
1447 bstp_initialization(sc);
1448 return (0);
1449 }
1450
1451 /*
1452 * bridge_stop:
1453 *
1454 * Stop the bridge interface.
1455 */
1456 static void
1457 bridge_stop(struct ifnet *ifp, int disable)
1458 {
1459 struct bridge_softc *sc = ifp->if_softc;
1460
1461 if ((ifp->if_flags & IFF_RUNNING) == 0)
1462 return;
1463
1464 callout_stop(&sc->sc_brcallout);
1465 bstp_stop(sc);
1466
1467 bridge_rtflush(sc, IFBF_FLUSHDYN);
1468
1469 ifp->if_flags &= ~IFF_RUNNING;
1470 }
1471
1472 /*
1473 * bridge_enqueue:
1474 *
1475 * Enqueue a packet on a bridge member interface.
1476 */
1477 void
1478 bridge_enqueue(struct bridge_softc *sc, struct ifnet *dst_ifp, struct mbuf *m,
1479 int runfilt)
1480 {
1481 ALTQ_DECL(struct altq_pktattr pktattr;)
1482 int len, error;
1483 short mflags;
1484
1485 /*
1486 * Clear any in-bound checksum flags for this packet.
1487 */
1488 m->m_pkthdr.csum_flags = 0;
1489
1490 if (runfilt) {
1491 if (pfil_run_hooks(sc->sc_if.if_pfil, &m,
1492 dst_ifp, PFIL_OUT) != 0) {
1493 if (m != NULL)
1494 m_freem(m);
1495 return;
1496 }
1497 if (m == NULL)
1498 return;
1499 }
1500
1501 #ifdef ALTQ
1502 /*
1503 * If ALTQ is enabled on the member interface, do
1504 * classification; the queueing discipline might
1505 * not require classification, but might require
1506 * the address family/header pointer in the pktattr.
1507 */
1508 if (ALTQ_IS_ENABLED(&dst_ifp->if_snd)) {
1509 /* XXX IFT_ETHER */
1510 altq_etherclassify(&dst_ifp->if_snd, m, &pktattr);
1511 }
1512 #endif /* ALTQ */
1513
1514 len = m->m_pkthdr.len;
1515 m->m_flags |= M_PROTO1;
1516 mflags = m->m_flags;
1517
1518 IFQ_ENQUEUE(&dst_ifp->if_snd, m, &pktattr, error);
1519
1520 if (error) {
1521 /* mbuf is already freed */
1522 sc->sc_if.if_oerrors++;
1523 return;
1524 }
1525
1526 sc->sc_if.if_opackets++;
1527 sc->sc_if.if_obytes += len;
1528
1529 dst_ifp->if_obytes += len;
1530
1531 if (mflags & M_MCAST) {
1532 sc->sc_if.if_omcasts++;
1533 dst_ifp->if_omcasts++;
1534 }
1535
1536 if ((dst_ifp->if_flags & IFF_OACTIVE) == 0)
1537 (*dst_ifp->if_start)(dst_ifp);
1538 }
1539
1540 /*
1541 * bridge_output:
1542 *
1543 * Send output from a bridge member interface. This
1544 * performs the bridging function for locally originated
1545 * packets.
1546 *
1547 * The mbuf has the Ethernet header already attached. We must
1548 * enqueue or free the mbuf before returning.
1549 */
1550 int
1551 bridge_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *sa,
1552 struct rtentry *rt)
1553 {
1554 struct ether_header *eh;
1555 struct ifnet *dst_if;
1556 struct bridge_softc *sc;
1557 #ifndef BRIDGE_MPSAFE
1558 int s;
1559 #endif
1560
1561 if (m->m_len < ETHER_HDR_LEN) {
1562 m = m_pullup(m, ETHER_HDR_LEN);
1563 if (m == NULL)
1564 return (0);
1565 }
1566
1567 eh = mtod(m, struct ether_header *);
1568 sc = ifp->if_bridge;
1569
1570 #ifndef BRIDGE_MPSAFE
1571 s = splnet();
1572 #endif
1573
1574 /*
1575 * If bridge is down, but the original output interface is up,
1576 * go ahead and send out that interface. Otherwise, the packet
1577 * is dropped below.
1578 */
1579 if (__predict_false(sc == NULL) ||
1580 (sc->sc_if.if_flags & IFF_RUNNING) == 0) {
1581 dst_if = ifp;
1582 goto sendunicast;
1583 }
1584
1585 /*
1586 * If the packet is a multicast, or we don't know a better way to
1587 * get there, send to all interfaces.
1588 */
1589 if (ETHER_IS_MULTICAST(eh->ether_dhost))
1590 dst_if = NULL;
1591 else
1592 dst_if = bridge_rtlookup(sc, eh->ether_dhost);
1593 if (dst_if == NULL) {
1594 struct bridge_iflist *bif;
1595 struct mbuf *mc;
1596 int used = 0;
1597 int ss;
1598
1599 BRIDGE_PSZ_RENTER(ss);
1600 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1601 bif = bridge_try_hold_bif(bif);
1602 if (bif == NULL)
1603 continue;
1604 BRIDGE_PSZ_REXIT(ss);
1605
1606 dst_if = bif->bif_ifp;
1607 if ((dst_if->if_flags & IFF_RUNNING) == 0)
1608 goto next;
1609
1610 /*
1611 * If this is not the original output interface,
1612 * and the interface is participating in spanning
1613 * tree, make sure the port is in a state that
1614 * allows forwarding.
1615 */
1616 if (dst_if != ifp &&
1617 (bif->bif_flags & IFBIF_STP) != 0) {
1618 switch (bif->bif_state) {
1619 case BSTP_IFSTATE_BLOCKING:
1620 case BSTP_IFSTATE_LISTENING:
1621 case BSTP_IFSTATE_DISABLED:
1622 goto next;
1623 }
1624 }
1625
1626 if (LIST_NEXT(bif, bif_next) == NULL) {
1627 used = 1;
1628 mc = m;
1629 } else {
1630 mc = m_copym(m, 0, M_COPYALL, M_NOWAIT);
1631 if (mc == NULL) {
1632 sc->sc_if.if_oerrors++;
1633 goto next;
1634 }
1635 }
1636
1637 bridge_enqueue(sc, dst_if, mc, 0);
1638 next:
1639 bridge_release_member(sc, bif);
1640 BRIDGE_PSZ_RENTER(ss);
1641 }
1642 BRIDGE_PSZ_REXIT(ss);
1643
1644 if (used == 0)
1645 m_freem(m);
1646 #ifndef BRIDGE_MPSAFE
1647 splx(s);
1648 #endif
1649 return (0);
1650 }
1651
1652 sendunicast:
1653 /*
1654 * XXX Spanning tree consideration here?
1655 */
1656
1657 if ((dst_if->if_flags & IFF_RUNNING) == 0) {
1658 m_freem(m);
1659 #ifndef BRIDGE_MPSAFE
1660 splx(s);
1661 #endif
1662 return (0);
1663 }
1664
1665 bridge_enqueue(sc, dst_if, m, 0);
1666
1667 #ifndef BRIDGE_MPSAFE
1668 splx(s);
1669 #endif
1670 return (0);
1671 }
1672
1673 /*
1674 * bridge_start:
1675 *
1676 * Start output on a bridge.
1677 *
1678 * NOTE: This routine should never be called in this implementation.
1679 */
1680 static void
1681 bridge_start(struct ifnet *ifp)
1682 {
1683
1684 printf("%s: bridge_start() called\n", ifp->if_xname);
1685 }
1686
1687 /*
1688 * bridge_forward:
1689 *
1690 * The forwarding function of the bridge.
1691 */
1692 static void
1693 bridge_forward(void *v)
1694 {
1695 struct bridge_softc *sc = v;
1696 struct mbuf *m;
1697 struct bridge_iflist *bif;
1698 struct ifnet *src_if, *dst_if;
1699 struct ether_header *eh;
1700 #ifndef BRIDGE_MPSAFE
1701 int s;
1702
1703 KERNEL_LOCK(1, NULL);
1704 mutex_enter(softnet_lock);
1705 #endif
1706
1707 if ((sc->sc_if.if_flags & IFF_RUNNING) == 0) {
1708 #ifndef BRIDGE_MPSAFE
1709 mutex_exit(softnet_lock);
1710 KERNEL_UNLOCK_ONE(NULL);
1711 #endif
1712 return;
1713 }
1714
1715 #ifndef BRIDGE_MPSAFE
1716 s = splnet();
1717 #endif
1718 while ((m = pktq_dequeue(sc->sc_fwd_pktq)) != NULL) {
1719 src_if = m->m_pkthdr.rcvif;
1720
1721 sc->sc_if.if_ipackets++;
1722 sc->sc_if.if_ibytes += m->m_pkthdr.len;
1723
1724 /*
1725 * Look up the bridge_iflist.
1726 */
1727 bif = bridge_lookup_member_if(sc, src_if);
1728 if (bif == NULL) {
1729 /* Interface is not a bridge member (anymore?) */
1730 m_freem(m);
1731 continue;
1732 }
1733
1734 if (bif->bif_flags & IFBIF_STP) {
1735 switch (bif->bif_state) {
1736 case BSTP_IFSTATE_BLOCKING:
1737 case BSTP_IFSTATE_LISTENING:
1738 case BSTP_IFSTATE_DISABLED:
1739 m_freem(m);
1740 bridge_release_member(sc, bif);
1741 continue;
1742 }
1743 }
1744
1745 eh = mtod(m, struct ether_header *);
1746
1747 /*
1748 * If the interface is learning, and the source
1749 * address is valid and not multicast, record
1750 * the address.
1751 */
1752 if ((bif->bif_flags & IFBIF_LEARNING) != 0 &&
1753 ETHER_IS_MULTICAST(eh->ether_shost) == 0 &&
1754 (eh->ether_shost[0] == 0 &&
1755 eh->ether_shost[1] == 0 &&
1756 eh->ether_shost[2] == 0 &&
1757 eh->ether_shost[3] == 0 &&
1758 eh->ether_shost[4] == 0 &&
1759 eh->ether_shost[5] == 0) == 0) {
1760 (void) bridge_rtupdate(sc, eh->ether_shost,
1761 src_if, 0, IFBAF_DYNAMIC);
1762 }
1763
1764 if ((bif->bif_flags & IFBIF_STP) != 0 &&
1765 bif->bif_state == BSTP_IFSTATE_LEARNING) {
1766 m_freem(m);
1767 bridge_release_member(sc, bif);
1768 continue;
1769 }
1770
1771 bridge_release_member(sc, bif);
1772
1773 /*
1774 * At this point, the port either doesn't participate
1775 * in spanning tree or it is in the forwarding state.
1776 */
1777
1778 /*
1779 * If the packet is unicast, destined for someone on
1780 * "this" side of the bridge, drop it.
1781 */
1782 if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) {
1783 dst_if = bridge_rtlookup(sc, eh->ether_dhost);
1784 if (src_if == dst_if) {
1785 m_freem(m);
1786 continue;
1787 }
1788 } else {
1789 /* ...forward it to all interfaces. */
1790 sc->sc_if.if_imcasts++;
1791 dst_if = NULL;
1792 }
1793
1794 if (pfil_run_hooks(sc->sc_if.if_pfil, &m,
1795 m->m_pkthdr.rcvif, PFIL_IN) != 0) {
1796 if (m != NULL)
1797 m_freem(m);
1798 continue;
1799 }
1800 if (m == NULL)
1801 continue;
1802
1803 if (dst_if == NULL) {
1804 bridge_broadcast(sc, src_if, m);
1805 continue;
1806 }
1807
1808 /*
1809 * At this point, we're dealing with a unicast frame
1810 * going to a different interface.
1811 */
1812 if ((dst_if->if_flags & IFF_RUNNING) == 0) {
1813 m_freem(m);
1814 continue;
1815 }
1816
1817 bif = bridge_lookup_member_if(sc, dst_if);
1818 if (bif == NULL) {
1819 /* Not a member of the bridge (anymore?) */
1820 m_freem(m);
1821 continue;
1822 }
1823
1824 if (bif->bif_flags & IFBIF_STP) {
1825 switch (bif->bif_state) {
1826 case BSTP_IFSTATE_DISABLED:
1827 case BSTP_IFSTATE_BLOCKING:
1828 m_freem(m);
1829 bridge_release_member(sc, bif);
1830 continue;
1831 }
1832 }
1833
1834 bridge_release_member(sc, bif);
1835
1836 bridge_enqueue(sc, dst_if, m, 1);
1837 }
1838 #ifndef BRIDGE_MPSAFE
1839 splx(s);
1840 mutex_exit(softnet_lock);
1841 KERNEL_UNLOCK_ONE(NULL);
1842 #endif
1843 }
1844
1845 static bool
1846 bstp_state_before_learning(struct bridge_iflist *bif)
1847 {
1848 if (bif->bif_flags & IFBIF_STP) {
1849 switch (bif->bif_state) {
1850 case BSTP_IFSTATE_BLOCKING:
1851 case BSTP_IFSTATE_LISTENING:
1852 case BSTP_IFSTATE_DISABLED:
1853 return true;
1854 }
1855 }
1856 return false;
1857 }
1858
1859 static bool
1860 bridge_ourether(struct bridge_iflist *bif, struct ether_header *eh, int src)
1861 {
1862 uint8_t *ether = src ? eh->ether_shost : eh->ether_dhost;
1863
1864 if (memcmp(CLLADDR(bif->bif_ifp->if_sadl), ether, ETHER_ADDR_LEN) == 0
1865 #if NCARP > 0
1866 || (bif->bif_ifp->if_carp &&
1867 carp_ourether(bif->bif_ifp->if_carp, eh, IFT_ETHER, src) != NULL)
1868 #endif /* NCARP > 0 */
1869 )
1870 return true;
1871
1872 return false;
1873 }
1874
1875 /*
1876 * bridge_input:
1877 *
1878 * Receive input from a member interface. Queue the packet for
1879 * bridging if it is not for us.
1880 */
1881 static void
1882 bridge_input(struct ifnet *ifp, struct mbuf *m)
1883 {
1884 struct bridge_softc *sc = ifp->if_bridge;
1885 struct bridge_iflist *bif;
1886 struct ether_header *eh;
1887
1888 if (__predict_false(sc == NULL) ||
1889 (sc->sc_if.if_flags & IFF_RUNNING) == 0) {
1890 ether_input(ifp, m);
1891 return;
1892 }
1893
1894 bif = bridge_lookup_member_if(sc, ifp);
1895 if (bif == NULL) {
1896 ether_input(ifp, m);
1897 return;
1898 }
1899
1900 eh = mtod(m, struct ether_header *);
1901
1902 if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
1903 if (memcmp(etherbroadcastaddr,
1904 eh->ether_dhost, ETHER_ADDR_LEN) == 0)
1905 m->m_flags |= M_BCAST;
1906 else
1907 m->m_flags |= M_MCAST;
1908 }
1909
1910 /*
1911 * A 'fast' path for packets addressed to interfaces that are
1912 * part of this bridge.
1913 */
1914 if (!(m->m_flags & (M_BCAST|M_MCAST)) &&
1915 !bstp_state_before_learning(bif)) {
1916 struct bridge_iflist *_bif;
1917 struct ifnet *_ifp = NULL;
1918 int s;
1919
1920 BRIDGE_PSZ_RENTER(s);
1921 LIST_FOREACH(_bif, &sc->sc_iflist, bif_next) {
1922 /* It is destined for us. */
1923 if (bridge_ourether(_bif, eh, 0)) {
1924 _bif = bridge_try_hold_bif(_bif);
1925 BRIDGE_PSZ_REXIT(s);
1926 if (_bif == NULL)
1927 goto out;
1928 if (_bif->bif_flags & IFBIF_LEARNING)
1929 (void) bridge_rtupdate(sc,
1930 eh->ether_shost, ifp, 0, IFBAF_DYNAMIC);
1931 _ifp = m->m_pkthdr.rcvif = _bif->bif_ifp;
1932 bridge_release_member(sc, _bif);
1933 goto out;
1934 }
1935
1936 /* We just received a packet that we sent out. */
1937 if (bridge_ourether(_bif, eh, 1))
1938 break;
1939 }
1940 BRIDGE_PSZ_REXIT(s);
1941 out:
1942
1943 if (_bif != NULL) {
1944 bridge_release_member(sc, bif);
1945 if (_ifp != NULL)
1946 ether_input(_ifp, m);
1947 else
1948 m_freem(m);
1949 return;
1950 }
1951 }
1952
1953 /* Tap off 802.1D packets; they do not get forwarded. */
1954 if (bif->bif_flags & IFBIF_STP &&
1955 memcmp(eh->ether_dhost, bstp_etheraddr, ETHER_ADDR_LEN) == 0) {
1956 bstp_input(sc, bif, m);
1957 bridge_release_member(sc, bif);
1958 return;
1959 }
1960
1961 /*
1962 * A normal switch would discard the packet here, but that's not what
1963 * we've done historically. This also prevents some obnoxious behaviour.
1964 */
1965 if (bstp_state_before_learning(bif)) {
1966 bridge_release_member(sc, bif);
1967 ether_input(ifp, m);
1968 return;
1969 }
1970
1971 bridge_release_member(sc, bif);
1972
1973 /* Queue the packet for bridge forwarding. */
1974 if (__predict_false(!pktq_enqueue(sc->sc_fwd_pktq, m, 0)))
1975 m_freem(m);
1976 }
1977
1978 /*
1979 * bridge_broadcast:
1980 *
1981 * Send a frame to all interfaces that are members of
1982 * the bridge, except for the one on which the packet
1983 * arrived.
1984 */
1985 static void
1986 bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if,
1987 struct mbuf *m)
1988 {
1989 struct bridge_iflist *bif;
1990 struct mbuf *mc;
1991 struct ifnet *dst_if;
1992 bool used, bmcast;
1993 int s;
1994
1995 used = bmcast = m->m_flags & (M_BCAST|M_MCAST);
1996
1997 BRIDGE_PSZ_RENTER(s);
1998 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1999 bif = bridge_try_hold_bif(bif);
2000 if (bif == NULL)
2001 continue;
2002 BRIDGE_PSZ_REXIT(s);
2003
2004 dst_if = bif->bif_ifp;
2005 if (dst_if == src_if)
2006 goto next;
2007
2008 if (bif->bif_flags & IFBIF_STP) {
2009 switch (bif->bif_state) {
2010 case BSTP_IFSTATE_BLOCKING:
2011 case BSTP_IFSTATE_DISABLED:
2012 goto next;
2013 }
2014 }
2015
2016 if ((bif->bif_flags & IFBIF_DISCOVER) == 0 && !bmcast)
2017 goto next;
2018
2019 if ((dst_if->if_flags & IFF_RUNNING) == 0)
2020 goto next;
2021
2022 if (!used && LIST_NEXT(bif, bif_next) == NULL) {
2023 mc = m;
2024 used = true;
2025 } else {
2026 mc = m_copym(m, 0, M_COPYALL, M_DONTWAIT);
2027 if (mc == NULL) {
2028 sc->sc_if.if_oerrors++;
2029 goto next;
2030 }
2031 }
2032
2033 bridge_enqueue(sc, dst_if, mc, 1);
2034 next:
2035 bridge_release_member(sc, bif);
2036 BRIDGE_PSZ_RENTER(s);
2037 }
2038 BRIDGE_PSZ_REXIT(s);
2039
2040 if (bmcast)
2041 ether_input(src_if, m);
2042 else if (!used)
2043 m_freem(m);
2044 }
2045
2046 static int
2047 bridge_rtalloc(struct bridge_softc *sc, const uint8_t *dst,
2048 struct bridge_rtnode **brtp)
2049 {
2050 struct bridge_rtnode *brt;
2051 int error;
2052
2053 if (sc->sc_brtcnt >= sc->sc_brtmax)
2054 return ENOSPC;
2055
2056 /*
2057 * Allocate a new bridge forwarding node, and
2058 * initialize the expiration time and Ethernet
2059 * address.
2060 */
2061 brt = pool_get(&bridge_rtnode_pool, PR_NOWAIT);
2062 if (brt == NULL)
2063 return ENOMEM;
2064
2065 memset(brt, 0, sizeof(*brt));
2066 brt->brt_expire = time_uptime + sc->sc_brttimeout;
2067 brt->brt_flags = IFBAF_DYNAMIC;
2068 memcpy(brt->brt_addr, dst, ETHER_ADDR_LEN);
2069
2070 BRIDGE_RT_INTR_LOCK(sc);
2071 error = bridge_rtnode_insert(sc, brt);
2072 BRIDGE_RT_INTR_UNLOCK(sc);
2073
2074 if (error != 0) {
2075 pool_put(&bridge_rtnode_pool, brt);
2076 return error;
2077 }
2078
2079 *brtp = brt;
2080 return 0;
2081 }
2082
2083 /*
2084 * bridge_rtupdate:
2085 *
2086 * Add a bridge routing entry.
2087 */
2088 static int
2089 bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst,
2090 struct ifnet *dst_if, int setflags, uint8_t flags)
2091 {
2092 struct bridge_rtnode *brt;
2093 int s;
2094
2095 again:
2096 /*
2097 * A route for this destination might already exist. If so,
2098 * update it, otherwise create a new one.
2099 */
2100 BRIDGE_RT_RENTER(s);
2101 brt = bridge_rtnode_lookup(sc, dst);
2102
2103 if (brt != NULL) {
2104 brt->brt_ifp = dst_if;
2105 if (setflags) {
2106 brt->brt_flags = flags;
2107 if (flags & IFBAF_STATIC)
2108 brt->brt_expire = 0;
2109 else
2110 brt->brt_expire = time_uptime + sc->sc_brttimeout;
2111 } else {
2112 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
2113 brt->brt_expire = time_uptime + sc->sc_brttimeout;
2114 }
2115 }
2116 BRIDGE_RT_REXIT(s);
2117
2118 if (brt == NULL) {
2119 int r;
2120
2121 r = bridge_rtalloc(sc, dst, &brt);
2122 if (r != 0)
2123 return r;
2124 goto again;
2125 }
2126
2127 return 0;
2128 }
2129
2130 /*
2131 * bridge_rtlookup:
2132 *
2133 * Lookup the destination interface for an address.
2134 */
2135 static struct ifnet *
2136 bridge_rtlookup(struct bridge_softc *sc, const uint8_t *addr)
2137 {
2138 struct bridge_rtnode *brt;
2139 struct ifnet *ifs = NULL;
2140 int s;
2141
2142 BRIDGE_RT_RENTER(s);
2143 brt = bridge_rtnode_lookup(sc, addr);
2144 if (brt != NULL)
2145 ifs = brt->brt_ifp;
2146 BRIDGE_RT_REXIT(s);
2147
2148 return ifs;
2149 }
2150
2151 typedef bool (*bridge_iterate_cb_t)
2152 (struct bridge_softc *, struct bridge_rtnode *, bool *, void *);
2153
2154 /*
2155 * bridge_rtlist_iterate_remove:
2156 *
2157 * It iterates on sc->sc_rtlist and removes rtnodes of it which func
2158 * callback judges to remove. Removals of rtnodes are done in a manner
2159 * of pserialize. To this end, all kmem_* operations are placed out of
2160 * mutexes.
2161 */
2162 static void
2163 bridge_rtlist_iterate_remove(struct bridge_softc *sc, bridge_iterate_cb_t func, void *arg)
2164 {
2165 struct bridge_rtnode *brt, *nbrt;
2166 struct bridge_rtnode **brt_list;
2167 int i, count;
2168
2169 retry:
2170 count = sc->sc_brtcnt;
2171 if (count == 0)
2172 return;
2173 brt_list = kmem_alloc(sizeof(struct bridge_rtnode *) * count, KM_SLEEP);
2174
2175 BRIDGE_RT_LOCK(sc);
2176 BRIDGE_RT_INTR_LOCK(sc);
2177 if (__predict_false(sc->sc_brtcnt > count)) {
2178 /* The rtnodes increased, we need more memory */
2179 BRIDGE_RT_INTR_UNLOCK(sc);
2180 BRIDGE_RT_UNLOCK(sc);
2181 kmem_free(brt_list, sizeof(*brt_list) * count);
2182 goto retry;
2183 }
2184
2185 i = 0;
2186 LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
2187 bool need_break = false;
2188 if (func(sc, brt, &need_break, arg)) {
2189 bridge_rtnode_remove(sc, brt);
2190 brt_list[i++] = brt;
2191 }
2192 if (need_break)
2193 break;
2194 }
2195 BRIDGE_RT_INTR_UNLOCK(sc);
2196
2197 if (i > 0)
2198 BRIDGE_RT_PSZ_PERFORM(sc);
2199 BRIDGE_RT_UNLOCK(sc);
2200
2201 while (--i >= 0)
2202 bridge_rtnode_destroy(brt_list[i]);
2203
2204 kmem_free(brt_list, sizeof(*brt_list) * count);
2205 }
2206
2207 static bool
2208 bridge_rttrim0_cb(struct bridge_softc *sc, struct bridge_rtnode *brt,
2209 bool *need_break, void *arg)
2210 {
2211 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
2212 /* Take into account of the subsequent removal */
2213 if ((sc->sc_brtcnt - 1) <= sc->sc_brtmax)
2214 *need_break = true;
2215 return true;
2216 } else
2217 return false;
2218 }
2219
2220 static void
2221 bridge_rttrim0(struct bridge_softc *sc)
2222 {
2223 bridge_rtlist_iterate_remove(sc, bridge_rttrim0_cb, NULL);
2224 }
2225
2226 /*
2227 * bridge_rttrim:
2228 *
2229 * Trim the routine table so that we have a number
2230 * of routing entries less than or equal to the
2231 * maximum number.
2232 */
2233 static void
2234 bridge_rttrim(struct bridge_softc *sc)
2235 {
2236
2237 /* Make sure we actually need to do this. */
2238 if (sc->sc_brtcnt <= sc->sc_brtmax)
2239 return;
2240
2241 /* Force an aging cycle; this might trim enough addresses. */
2242 bridge_rtage(sc);
2243 if (sc->sc_brtcnt <= sc->sc_brtmax)
2244 return;
2245
2246 bridge_rttrim0(sc);
2247
2248 return;
2249 }
2250
2251 /*
2252 * bridge_timer:
2253 *
2254 * Aging timer for the bridge.
2255 */
2256 static void
2257 bridge_timer(void *arg)
2258 {
2259 struct bridge_softc *sc = arg;
2260
2261 workqueue_enqueue(sc->sc_rtage_wq, &bridge_rtage_wk, NULL);
2262 }
2263
2264 static void
2265 bridge_rtage_work(struct work *wk, void *arg)
2266 {
2267 struct bridge_softc *sc = arg;
2268
2269 KASSERT(wk == &bridge_rtage_wk);
2270
2271 bridge_rtage(sc);
2272
2273 if (sc->sc_if.if_flags & IFF_RUNNING)
2274 callout_reset(&sc->sc_brcallout,
2275 bridge_rtable_prune_period * hz, bridge_timer, sc);
2276 }
2277
2278 static bool
2279 bridge_rtage_cb(struct bridge_softc *sc, struct bridge_rtnode *brt,
2280 bool *need_break, void *arg)
2281 {
2282 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
2283 time_uptime >= brt->brt_expire)
2284 return true;
2285 else
2286 return false;
2287 }
2288
2289 /*
2290 * bridge_rtage:
2291 *
2292 * Perform an aging cycle.
2293 */
2294 static void
2295 bridge_rtage(struct bridge_softc *sc)
2296 {
2297 bridge_rtlist_iterate_remove(sc, bridge_rtage_cb, NULL);
2298 }
2299
2300
2301 static bool
2302 bridge_rtflush_cb(struct bridge_softc *sc, struct bridge_rtnode *brt,
2303 bool *need_break, void *arg)
2304 {
2305 int full = *(int*)arg;
2306
2307 if (full || (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
2308 return true;
2309 else
2310 return false;
2311 }
2312
2313 /*
2314 * bridge_rtflush:
2315 *
2316 * Remove all dynamic addresses from the bridge.
2317 */
2318 static void
2319 bridge_rtflush(struct bridge_softc *sc, int full)
2320 {
2321 bridge_rtlist_iterate_remove(sc, bridge_rtflush_cb, &full);
2322 }
2323
2324 /*
2325 * bridge_rtdaddr:
2326 *
2327 * Remove an address from the table.
2328 */
2329 static int
2330 bridge_rtdaddr(struct bridge_softc *sc, const uint8_t *addr)
2331 {
2332 struct bridge_rtnode *brt;
2333
2334 BRIDGE_RT_LOCK(sc);
2335 BRIDGE_RT_INTR_LOCK(sc);
2336 if ((brt = bridge_rtnode_lookup(sc, addr)) == NULL) {
2337 BRIDGE_RT_INTR_UNLOCK(sc);
2338 BRIDGE_RT_UNLOCK(sc);
2339 return ENOENT;
2340 }
2341 bridge_rtnode_remove(sc, brt);
2342 BRIDGE_RT_INTR_UNLOCK(sc);
2343 BRIDGE_RT_PSZ_PERFORM(sc);
2344 BRIDGE_RT_UNLOCK(sc);
2345
2346 bridge_rtnode_destroy(brt);
2347
2348 return 0;
2349 }
2350
2351 /*
2352 * bridge_rtdelete:
2353 *
2354 * Delete routes to a speicifc member interface.
2355 */
2356 static void
2357 bridge_rtdelete(struct bridge_softc *sc, struct ifnet *ifp)
2358 {
2359 struct bridge_rtnode *brt, *nbrt;
2360
2361 BRIDGE_RT_LOCK(sc);
2362 BRIDGE_RT_INTR_LOCK(sc);
2363 LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
2364 if (brt->brt_ifp == ifp)
2365 break;
2366 }
2367 if (brt == NULL) {
2368 BRIDGE_RT_INTR_UNLOCK(sc);
2369 BRIDGE_RT_UNLOCK(sc);
2370 return;
2371 }
2372 bridge_rtnode_remove(sc, brt);
2373 BRIDGE_RT_INTR_UNLOCK(sc);
2374 BRIDGE_RT_PSZ_PERFORM(sc);
2375 BRIDGE_RT_UNLOCK(sc);
2376
2377 bridge_rtnode_destroy(brt);
2378 }
2379
2380 /*
2381 * bridge_rtable_init:
2382 *
2383 * Initialize the route table for this bridge.
2384 */
2385 static void
2386 bridge_rtable_init(struct bridge_softc *sc)
2387 {
2388 int i;
2389
2390 sc->sc_rthash = kmem_alloc(sizeof(*sc->sc_rthash) * BRIDGE_RTHASH_SIZE,
2391 KM_SLEEP);
2392
2393 for (i = 0; i < BRIDGE_RTHASH_SIZE; i++)
2394 LIST_INIT(&sc->sc_rthash[i]);
2395
2396 sc->sc_rthash_key = cprng_fast32();
2397
2398 LIST_INIT(&sc->sc_rtlist);
2399
2400 sc->sc_rtlist_intr_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2401 #ifdef BRIDGE_MPSAFE
2402 sc->sc_rtlist_psz = pserialize_create();
2403 sc->sc_rtlist_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET);
2404 #else
2405 sc->sc_rtlist_psz = NULL;
2406 sc->sc_rtlist_lock = NULL;
2407 #endif
2408 }
2409
2410 /*
2411 * bridge_rtable_fini:
2412 *
2413 * Deconstruct the route table for this bridge.
2414 */
2415 static void
2416 bridge_rtable_fini(struct bridge_softc *sc)
2417 {
2418
2419 kmem_free(sc->sc_rthash, sizeof(*sc->sc_rthash) * BRIDGE_RTHASH_SIZE);
2420 if (sc->sc_rtlist_intr_lock)
2421 mutex_obj_free(sc->sc_rtlist_intr_lock);
2422 if (sc->sc_rtlist_lock)
2423 mutex_obj_free(sc->sc_rtlist_lock);
2424 if (sc->sc_rtlist_psz)
2425 pserialize_destroy(sc->sc_rtlist_psz);
2426 }
2427
2428 /*
2429 * The following hash function is adapted from "Hash Functions" by Bob Jenkins
2430 * ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
2431 */
2432 #define mix(a, b, c) \
2433 do { \
2434 a -= b; a -= c; a ^= (c >> 13); \
2435 b -= c; b -= a; b ^= (a << 8); \
2436 c -= a; c -= b; c ^= (b >> 13); \
2437 a -= b; a -= c; a ^= (c >> 12); \
2438 b -= c; b -= a; b ^= (a << 16); \
2439 c -= a; c -= b; c ^= (b >> 5); \
2440 a -= b; a -= c; a ^= (c >> 3); \
2441 b -= c; b -= a; b ^= (a << 10); \
2442 c -= a; c -= b; c ^= (b >> 15); \
2443 } while (/*CONSTCOND*/0)
2444
2445 static inline uint32_t
2446 bridge_rthash(struct bridge_softc *sc, const uint8_t *addr)
2447 {
2448 uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = sc->sc_rthash_key;
2449
2450 b += addr[5] << 8;
2451 b += addr[4];
2452 a += addr[3] << 24;
2453 a += addr[2] << 16;
2454 a += addr[1] << 8;
2455 a += addr[0];
2456
2457 mix(a, b, c);
2458
2459 return (c & BRIDGE_RTHASH_MASK);
2460 }
2461
2462 #undef mix
2463
2464 /*
2465 * bridge_rtnode_lookup:
2466 *
2467 * Look up a bridge route node for the specified destination.
2468 */
2469 static struct bridge_rtnode *
2470 bridge_rtnode_lookup(struct bridge_softc *sc, const uint8_t *addr)
2471 {
2472 struct bridge_rtnode *brt;
2473 uint32_t hash;
2474 int dir;
2475
2476 hash = bridge_rthash(sc, addr);
2477 LIST_FOREACH(brt, &sc->sc_rthash[hash], brt_hash) {
2478 dir = memcmp(addr, brt->brt_addr, ETHER_ADDR_LEN);
2479 if (dir == 0)
2480 return (brt);
2481 if (dir > 0)
2482 return (NULL);
2483 }
2484
2485 return (NULL);
2486 }
2487
2488 /*
2489 * bridge_rtnode_insert:
2490 *
2491 * Insert the specified bridge node into the route table. We
2492 * assume the entry is not already in the table.
2493 */
2494 static int
2495 bridge_rtnode_insert(struct bridge_softc *sc, struct bridge_rtnode *brt)
2496 {
2497 struct bridge_rtnode *lbrt;
2498 uint32_t hash;
2499 int dir;
2500
2501 KASSERT(BRIDGE_RT_INTR_LOCKED(sc));
2502
2503 hash = bridge_rthash(sc, brt->brt_addr);
2504
2505 lbrt = LIST_FIRST(&sc->sc_rthash[hash]);
2506 if (lbrt == NULL) {
2507 LIST_INSERT_HEAD(&sc->sc_rthash[hash], brt, brt_hash);
2508 goto out;
2509 }
2510
2511 do {
2512 dir = memcmp(brt->brt_addr, lbrt->brt_addr, ETHER_ADDR_LEN);
2513 if (dir == 0)
2514 return (EEXIST);
2515 if (dir > 0) {
2516 LIST_INSERT_BEFORE(lbrt, brt, brt_hash);
2517 goto out;
2518 }
2519 if (LIST_NEXT(lbrt, brt_hash) == NULL) {
2520 LIST_INSERT_AFTER(lbrt, brt, brt_hash);
2521 goto out;
2522 }
2523 lbrt = LIST_NEXT(lbrt, brt_hash);
2524 } while (lbrt != NULL);
2525
2526 #ifdef DIAGNOSTIC
2527 panic("bridge_rtnode_insert: impossible");
2528 #endif
2529
2530 out:
2531 LIST_INSERT_HEAD(&sc->sc_rtlist, brt, brt_list);
2532 sc->sc_brtcnt++;
2533
2534 return (0);
2535 }
2536
2537 /*
2538 * bridge_rtnode_remove:
2539 *
2540 * Remove a bridge rtnode from the rthash and the rtlist of a bridge.
2541 */
2542 static void
2543 bridge_rtnode_remove(struct bridge_softc *sc, struct bridge_rtnode *brt)
2544 {
2545
2546 KASSERT(BRIDGE_RT_INTR_LOCKED(sc));
2547
2548 LIST_REMOVE(brt, brt_hash);
2549 LIST_REMOVE(brt, brt_list);
2550 sc->sc_brtcnt--;
2551 }
2552
2553 /*
2554 * bridge_rtnode_destroy:
2555 *
2556 * Destroy a bridge rtnode.
2557 */
2558 static void
2559 bridge_rtnode_destroy(struct bridge_rtnode *brt)
2560 {
2561
2562 pool_put(&bridge_rtnode_pool, brt);
2563 }
2564
2565 #if defined(BRIDGE_IPF)
2566 extern pfil_head_t *inet_pfil_hook; /* XXX */
2567 extern pfil_head_t *inet6_pfil_hook; /* XXX */
2568
2569 /*
2570 * Send bridge packets through IPF if they are one of the types IPF can deal
2571 * with, or if they are ARP or REVARP. (IPF will pass ARP and REVARP without
2572 * question.)
2573 */
2574 static int
2575 bridge_ipf(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
2576 {
2577 int snap, error;
2578 struct ether_header *eh1, eh2;
2579 struct llc llc1;
2580 uint16_t ether_type;
2581
2582 snap = 0;
2583 error = -1; /* Default error if not error == 0 */
2584 eh1 = mtod(*mp, struct ether_header *);
2585 ether_type = ntohs(eh1->ether_type);
2586
2587 /*
2588 * Check for SNAP/LLC.
2589 */
2590 if (ether_type < ETHERMTU) {
2591 struct llc *llc2 = (struct llc *)(eh1 + 1);
2592
2593 if ((*mp)->m_len >= ETHER_HDR_LEN + 8 &&
2594 llc2->llc_dsap == LLC_SNAP_LSAP &&
2595 llc2->llc_ssap == LLC_SNAP_LSAP &&
2596 llc2->llc_control == LLC_UI) {
2597 ether_type = htons(llc2->llc_un.type_snap.ether_type);
2598 snap = 1;
2599 }
2600 }
2601
2602 /*
2603 * If we're trying to filter bridge traffic, don't look at anything
2604 * other than IP and ARP traffic. If the filter doesn't understand
2605 * IPv6, don't allow IPv6 through the bridge either. This is lame
2606 * since if we really wanted, say, an AppleTalk filter, we are hosed,
2607 * but of course we don't have an AppleTalk filter to begin with.
2608 * (Note that since IPF doesn't understand ARP it will pass *ALL*
2609 * ARP traffic.)
2610 */
2611 switch (ether_type) {
2612 case ETHERTYPE_ARP:
2613 case ETHERTYPE_REVARP:
2614 return 0; /* Automatically pass */
2615 case ETHERTYPE_IP:
2616 # ifdef INET6
2617 case ETHERTYPE_IPV6:
2618 # endif /* INET6 */
2619 break;
2620 default:
2621 goto bad;
2622 }
2623
2624 /* Strip off the Ethernet header and keep a copy. */
2625 m_copydata(*mp, 0, ETHER_HDR_LEN, (void *) &eh2);
2626 m_adj(*mp, ETHER_HDR_LEN);
2627
2628 /* Strip off snap header, if present */
2629 if (snap) {
2630 m_copydata(*mp, 0, sizeof(struct llc), (void *) &llc1);
2631 m_adj(*mp, sizeof(struct llc));
2632 }
2633
2634 /*
2635 * Check basic packet sanity and run IPF through pfil.
2636 */
2637 KASSERT(!cpu_intr_p());
2638 switch (ether_type)
2639 {
2640 case ETHERTYPE_IP :
2641 error = (dir == PFIL_IN) ? bridge_ip_checkbasic(mp) : 0;
2642 if (error == 0)
2643 error = pfil_run_hooks(inet_pfil_hook, mp, ifp, dir);
2644 break;
2645 # ifdef INET6
2646 case ETHERTYPE_IPV6 :
2647 error = (dir == PFIL_IN) ? bridge_ip6_checkbasic(mp) : 0;
2648 if (error == 0)
2649 error = pfil_run_hooks(inet6_pfil_hook, mp, ifp, dir);
2650 break;
2651 # endif
2652 default :
2653 error = 0;
2654 break;
2655 }
2656
2657 if (*mp == NULL)
2658 return error;
2659 if (error != 0)
2660 goto bad;
2661
2662 error = -1;
2663
2664 /*
2665 * Finally, put everything back the way it was and return
2666 */
2667 if (snap) {
2668 M_PREPEND(*mp, sizeof(struct llc), M_DONTWAIT);
2669 if (*mp == NULL)
2670 return error;
2671 bcopy(&llc1, mtod(*mp, void *), sizeof(struct llc));
2672 }
2673
2674 M_PREPEND(*mp, ETHER_HDR_LEN, M_DONTWAIT);
2675 if (*mp == NULL)
2676 return error;
2677 bcopy(&eh2, mtod(*mp, void *), ETHER_HDR_LEN);
2678
2679 return 0;
2680
2681 bad:
2682 m_freem(*mp);
2683 *mp = NULL;
2684 return error;
2685 }
2686
2687 /*
2688 * Perform basic checks on header size since
2689 * IPF assumes ip_input has already processed
2690 * it for it. Cut-and-pasted from ip_input.c.
2691 * Given how simple the IPv6 version is,
2692 * does the IPv4 version really need to be
2693 * this complicated?
2694 *
2695 * XXX Should we update ipstat here, or not?
2696 * XXX Right now we update ipstat but not
2697 * XXX csum_counter.
2698 */
2699 static int
2700 bridge_ip_checkbasic(struct mbuf **mp)
2701 {
2702 struct mbuf *m = *mp;
2703 struct ip *ip;
2704 int len, hlen;
2705
2706 if (*mp == NULL)
2707 return -1;
2708
2709 if (IP_HDR_ALIGNED_P(mtod(m, void *)) == 0) {
2710 if ((m = m_copyup(m, sizeof(struct ip),
2711 (max_linkhdr + 3) & ~3)) == NULL) {
2712 /* XXXJRT new stat, please */
2713 ip_statinc(IP_STAT_TOOSMALL);
2714 goto bad;
2715 }
2716 } else if (__predict_false(m->m_len < sizeof (struct ip))) {
2717 if ((m = m_pullup(m, sizeof (struct ip))) == NULL) {
2718 ip_statinc(IP_STAT_TOOSMALL);
2719 goto bad;
2720 }
2721 }
2722 ip = mtod(m, struct ip *);
2723 if (ip == NULL) goto bad;
2724
2725 if (ip->ip_v != IPVERSION) {
2726 ip_statinc(IP_STAT_BADVERS);
2727 goto bad;
2728 }
2729 hlen = ip->ip_hl << 2;
2730 if (hlen < sizeof(struct ip)) { /* minimum header length */
2731 ip_statinc(IP_STAT_BADHLEN);
2732 goto bad;
2733 }
2734 if (hlen > m->m_len) {
2735 if ((m = m_pullup(m, hlen)) == 0) {
2736 ip_statinc(IP_STAT_BADHLEN);
2737 goto bad;
2738 }
2739 ip = mtod(m, struct ip *);
2740 if (ip == NULL) goto bad;
2741 }
2742
2743 switch (m->m_pkthdr.csum_flags &
2744 ((m->m_pkthdr.rcvif->if_csum_flags_rx & M_CSUM_IPv4) |
2745 M_CSUM_IPv4_BAD)) {
2746 case M_CSUM_IPv4|M_CSUM_IPv4_BAD:
2747 /* INET_CSUM_COUNTER_INCR(&ip_hwcsum_bad); */
2748 goto bad;
2749
2750 case M_CSUM_IPv4:
2751 /* Checksum was okay. */
2752 /* INET_CSUM_COUNTER_INCR(&ip_hwcsum_ok); */
2753 break;
2754
2755 default:
2756 /* Must compute it ourselves. */
2757 /* INET_CSUM_COUNTER_INCR(&ip_swcsum); */
2758 if (in_cksum(m, hlen) != 0)
2759 goto bad;
2760 break;
2761 }
2762
2763 /* Retrieve the packet length. */
2764 len = ntohs(ip->ip_len);
2765
2766 /*
2767 * Check for additional length bogosity
2768 */
2769 if (len < hlen) {
2770 ip_statinc(IP_STAT_BADLEN);
2771 goto bad;
2772 }
2773
2774 /*
2775 * Check that the amount of data in the buffers
2776 * is as at least much as the IP header would have us expect.
2777 * Drop packet if shorter than we expect.
2778 */
2779 if (m->m_pkthdr.len < len) {
2780 ip_statinc(IP_STAT_TOOSHORT);
2781 goto bad;
2782 }
2783
2784 /* Checks out, proceed */
2785 *mp = m;
2786 return 0;
2787
2788 bad:
2789 *mp = m;
2790 return -1;
2791 }
2792
2793 # ifdef INET6
2794 /*
2795 * Same as above, but for IPv6.
2796 * Cut-and-pasted from ip6_input.c.
2797 * XXX Should we update ip6stat, or not?
2798 */
2799 static int
2800 bridge_ip6_checkbasic(struct mbuf **mp)
2801 {
2802 struct mbuf *m = *mp;
2803 struct ip6_hdr *ip6;
2804
2805 /*
2806 * If the IPv6 header is not aligned, slurp it up into a new
2807 * mbuf with space for link headers, in the event we forward
2808 * it. Otherwise, if it is aligned, make sure the entire base
2809 * IPv6 header is in the first mbuf of the chain.
2810 */
2811 if (IP6_HDR_ALIGNED_P(mtod(m, void *)) == 0) {
2812 struct ifnet *inifp = m->m_pkthdr.rcvif;
2813 if ((m = m_copyup(m, sizeof(struct ip6_hdr),
2814 (max_linkhdr + 3) & ~3)) == NULL) {
2815 /* XXXJRT new stat, please */
2816 ip6_statinc(IP6_STAT_TOOSMALL);
2817 in6_ifstat_inc(inifp, ifs6_in_hdrerr);
2818 goto bad;
2819 }
2820 } else if (__predict_false(m->m_len < sizeof(struct ip6_hdr))) {
2821 struct ifnet *inifp = m->m_pkthdr.rcvif;
2822 if ((m = m_pullup(m, sizeof(struct ip6_hdr))) == NULL) {
2823 ip6_statinc(IP6_STAT_TOOSMALL);
2824 in6_ifstat_inc(inifp, ifs6_in_hdrerr);
2825 goto bad;
2826 }
2827 }
2828
2829 ip6 = mtod(m, struct ip6_hdr *);
2830
2831 if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
2832 ip6_statinc(IP6_STAT_BADVERS);
2833 in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_hdrerr);
2834 goto bad;
2835 }
2836
2837 /* Checks out, proceed */
2838 *mp = m;
2839 return 0;
2840
2841 bad:
2842 *mp = m;
2843 return -1;
2844 }
2845 # endif /* INET6 */
2846 #endif /* BRIDGE_IPF */
2847