if_bridge.c revision 1.91.2.3 1 /* $NetBSD: if_bridge.c,v 1.91.2.3 2015/09/22 12:06:10 skrll Exp $ */
2
3 /*
4 * Copyright 2001 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Copyright (c) 1999, 2000 Jason L. Wright (jason (at) thought.net)
40 * All rights reserved.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. All advertising materials mentioning features or use of this software
51 * must display the following acknowledgement:
52 * This product includes software developed by Jason L. Wright
53 * 4. The name of the author may not be used to endorse or promote products
54 * derived from this software without specific prior written permission.
55 *
56 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
57 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
58 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
59 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
60 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
61 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
62 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
64 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
65 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
66 * POSSIBILITY OF SUCH DAMAGE.
67 *
68 * OpenBSD: if_bridge.c,v 1.60 2001/06/15 03:38:33 itojun Exp
69 */
70
71 /*
72 * Network interface bridge support.
73 *
74 * TODO:
75 *
76 * - Currently only supports Ethernet-like interfaces (Ethernet,
77 * 802.11, VLANs on Ethernet, etc.) Figure out a nice way
78 * to bridge other types of interfaces (FDDI-FDDI, and maybe
79 * consider heterogenous bridges).
80 */
81
82 #include <sys/cdefs.h>
83 __KERNEL_RCSID(0, "$NetBSD: if_bridge.c,v 1.91.2.3 2015/09/22 12:06:10 skrll Exp $");
84
85 #ifdef _KERNEL_OPT
86 #include "opt_bridge_ipf.h"
87 #include "opt_inet.h"
88 #endif /* _KERNEL_OPT */
89
90 #include <sys/param.h>
91 #include <sys/kernel.h>
92 #include <sys/mbuf.h>
93 #include <sys/queue.h>
94 #include <sys/socket.h>
95 #include <sys/socketvar.h> /* for softnet_lock */
96 #include <sys/sockio.h>
97 #include <sys/systm.h>
98 #include <sys/proc.h>
99 #include <sys/pool.h>
100 #include <sys/kauth.h>
101 #include <sys/cpu.h>
102 #include <sys/cprng.h>
103 #include <sys/mutex.h>
104 #include <sys/kmem.h>
105
106 #include <net/bpf.h>
107 #include <net/if.h>
108 #include <net/if_dl.h>
109 #include <net/if_types.h>
110 #include <net/if_llc.h>
111 #include <net/pktqueue.h>
112
113 #include <net/if_ether.h>
114 #include <net/if_bridgevar.h>
115
116 #if defined(BRIDGE_IPF)
117 /* Used for bridge_ip[6]_checkbasic */
118 #include <netinet/in.h>
119 #include <netinet/in_systm.h>
120 #include <netinet/ip.h>
121 #include <netinet/ip_var.h>
122 #include <netinet/ip_private.h> /* XXX */
123
124 #include <netinet/ip6.h>
125 #include <netinet6/in6_var.h>
126 #include <netinet6/ip6_var.h>
127 #include <netinet6/ip6_private.h> /* XXX */
128 #endif /* BRIDGE_IPF */
129
130 /*
131 * Size of the route hash table. Must be a power of two.
132 */
133 #ifndef BRIDGE_RTHASH_SIZE
134 #define BRIDGE_RTHASH_SIZE 1024
135 #endif
136
137 #define BRIDGE_RTHASH_MASK (BRIDGE_RTHASH_SIZE - 1)
138
139 #include "carp.h"
140 #if NCARP > 0
141 #include <netinet/in.h>
142 #include <netinet/in_var.h>
143 #include <netinet/ip_carp.h>
144 #endif
145
146 #include "ioconf.h"
147
148 __CTASSERT(sizeof(struct ifbifconf) == sizeof(struct ifbaconf));
149 __CTASSERT(offsetof(struct ifbifconf, ifbic_len) == offsetof(struct ifbaconf, ifbac_len));
150 __CTASSERT(offsetof(struct ifbifconf, ifbic_buf) == offsetof(struct ifbaconf, ifbac_buf));
151
152 /*
153 * Maximum number of addresses to cache.
154 */
155 #ifndef BRIDGE_RTABLE_MAX
156 #define BRIDGE_RTABLE_MAX 100
157 #endif
158
159 /*
160 * Spanning tree defaults.
161 */
162 #define BSTP_DEFAULT_MAX_AGE (20 * 256)
163 #define BSTP_DEFAULT_HELLO_TIME (2 * 256)
164 #define BSTP_DEFAULT_FORWARD_DELAY (15 * 256)
165 #define BSTP_DEFAULT_HOLD_TIME (1 * 256)
166 #define BSTP_DEFAULT_BRIDGE_PRIORITY 0x8000
167 #define BSTP_DEFAULT_PORT_PRIORITY 0x80
168 #define BSTP_DEFAULT_PATH_COST 55
169
170 /*
171 * Timeout (in seconds) for entries learned dynamically.
172 */
173 #ifndef BRIDGE_RTABLE_TIMEOUT
174 #define BRIDGE_RTABLE_TIMEOUT (20 * 60) /* same as ARP */
175 #endif
176
177 /*
178 * Number of seconds between walks of the route list.
179 */
180 #ifndef BRIDGE_RTABLE_PRUNE_PERIOD
181 #define BRIDGE_RTABLE_PRUNE_PERIOD (5 * 60)
182 #endif
183
184 #define BRIDGE_RT_INTR_LOCK(_sc) mutex_enter((_sc)->sc_rtlist_intr_lock)
185 #define BRIDGE_RT_INTR_UNLOCK(_sc) mutex_exit((_sc)->sc_rtlist_intr_lock)
186 #define BRIDGE_RT_INTR_LOCKED(_sc) mutex_owned((_sc)->sc_rtlist_intr_lock)
187
188 #define BRIDGE_RT_LOCK(_sc) if ((_sc)->sc_rtlist_lock) \
189 mutex_enter((_sc)->sc_rtlist_lock)
190 #define BRIDGE_RT_UNLOCK(_sc) if ((_sc)->sc_rtlist_lock) \
191 mutex_exit((_sc)->sc_rtlist_lock)
192 #define BRIDGE_RT_LOCKED(_sc) (!(_sc)->sc_rtlist_lock || \
193 mutex_owned((_sc)->sc_rtlist_lock))
194
195 #define BRIDGE_RT_PSZ_PERFORM(_sc) \
196 if ((_sc)->sc_rtlist_psz != NULL) \
197 pserialize_perform((_sc)->sc_rtlist_psz);
198
199 #ifdef BRIDGE_MPSAFE
200 #define BRIDGE_RT_RENTER(__s) do { \
201 if (!cpu_intr_p()) \
202 __s = pserialize_read_enter(); \
203 else \
204 __s = splhigh(); \
205 } while (0)
206 #define BRIDGE_RT_REXIT(__s) do { \
207 if (!cpu_intr_p()) \
208 pserialize_read_exit(__s); \
209 else \
210 splx(__s); \
211 } while (0)
212 #else /* BRIDGE_MPSAFE */
213 #define BRIDGE_RT_RENTER(__s) do { __s = 0; } while (0)
214 #define BRIDGE_RT_REXIT(__s) do { (void)__s; } while (0)
215 #endif /* BRIDGE_MPSAFE */
216
217 int bridge_rtable_prune_period = BRIDGE_RTABLE_PRUNE_PERIOD;
218
219 static struct pool bridge_rtnode_pool;
220 static struct work bridge_rtage_wk;
221
222 static int bridge_clone_create(struct if_clone *, int);
223 static int bridge_clone_destroy(struct ifnet *);
224
225 static int bridge_ioctl(struct ifnet *, u_long, void *);
226 static int bridge_init(struct ifnet *);
227 static void bridge_stop(struct ifnet *, int);
228 static void bridge_start(struct ifnet *);
229
230 static void bridge_input(struct ifnet *, struct mbuf *);
231 static void bridge_forward(void *);
232
233 static void bridge_timer(void *);
234
235 static void bridge_broadcast(struct bridge_softc *, struct ifnet *,
236 struct mbuf *);
237
238 static int bridge_rtupdate(struct bridge_softc *, const uint8_t *,
239 struct ifnet *, int, uint8_t);
240 static struct ifnet *bridge_rtlookup(struct bridge_softc *, const uint8_t *);
241 static void bridge_rttrim(struct bridge_softc *);
242 static void bridge_rtage(struct bridge_softc *);
243 static void bridge_rtage_work(struct work *, void *);
244 static void bridge_rtflush(struct bridge_softc *, int);
245 static int bridge_rtdaddr(struct bridge_softc *, const uint8_t *);
246 static void bridge_rtdelete(struct bridge_softc *, struct ifnet *ifp);
247
248 static void bridge_rtable_init(struct bridge_softc *);
249 static void bridge_rtable_fini(struct bridge_softc *);
250
251 static struct bridge_rtnode *bridge_rtnode_lookup(struct bridge_softc *,
252 const uint8_t *);
253 static int bridge_rtnode_insert(struct bridge_softc *,
254 struct bridge_rtnode *);
255 static void bridge_rtnode_remove(struct bridge_softc *,
256 struct bridge_rtnode *);
257 static void bridge_rtnode_destroy(struct bridge_rtnode *);
258
259 static struct bridge_iflist *bridge_lookup_member(struct bridge_softc *,
260 const char *name);
261 static struct bridge_iflist *bridge_lookup_member_if(struct bridge_softc *,
262 struct ifnet *ifp);
263 static void bridge_release_member(struct bridge_softc *, struct bridge_iflist *);
264 static void bridge_delete_member(struct bridge_softc *,
265 struct bridge_iflist *);
266 static struct bridge_iflist *bridge_try_hold_bif(struct bridge_iflist *);
267
268 static int bridge_ioctl_add(struct bridge_softc *, void *);
269 static int bridge_ioctl_del(struct bridge_softc *, void *);
270 static int bridge_ioctl_gifflags(struct bridge_softc *, void *);
271 static int bridge_ioctl_sifflags(struct bridge_softc *, void *);
272 static int bridge_ioctl_scache(struct bridge_softc *, void *);
273 static int bridge_ioctl_gcache(struct bridge_softc *, void *);
274 static int bridge_ioctl_gifs(struct bridge_softc *, void *);
275 static int bridge_ioctl_rts(struct bridge_softc *, void *);
276 static int bridge_ioctl_saddr(struct bridge_softc *, void *);
277 static int bridge_ioctl_sto(struct bridge_softc *, void *);
278 static int bridge_ioctl_gto(struct bridge_softc *, void *);
279 static int bridge_ioctl_daddr(struct bridge_softc *, void *);
280 static int bridge_ioctl_flush(struct bridge_softc *, void *);
281 static int bridge_ioctl_gpri(struct bridge_softc *, void *);
282 static int bridge_ioctl_spri(struct bridge_softc *, void *);
283 static int bridge_ioctl_ght(struct bridge_softc *, void *);
284 static int bridge_ioctl_sht(struct bridge_softc *, void *);
285 static int bridge_ioctl_gfd(struct bridge_softc *, void *);
286 static int bridge_ioctl_sfd(struct bridge_softc *, void *);
287 static int bridge_ioctl_gma(struct bridge_softc *, void *);
288 static int bridge_ioctl_sma(struct bridge_softc *, void *);
289 static int bridge_ioctl_sifprio(struct bridge_softc *, void *);
290 static int bridge_ioctl_sifcost(struct bridge_softc *, void *);
291 #if defined(BRIDGE_IPF)
292 static int bridge_ioctl_gfilt(struct bridge_softc *, void *);
293 static int bridge_ioctl_sfilt(struct bridge_softc *, void *);
294 static int bridge_ipf(void *, struct mbuf **, struct ifnet *, int);
295 static int bridge_ip_checkbasic(struct mbuf **mp);
296 # ifdef INET6
297 static int bridge_ip6_checkbasic(struct mbuf **mp);
298 # endif /* INET6 */
299 #endif /* BRIDGE_IPF */
300
301 static void bridge_sysctl_fwdq_setup(struct sysctllog **clog,
302 struct bridge_softc *sc);
303
304 struct bridge_control {
305 int (*bc_func)(struct bridge_softc *, void *);
306 int bc_argsize;
307 int bc_flags;
308 };
309
310 #define BC_F_COPYIN 0x01 /* copy arguments in */
311 #define BC_F_COPYOUT 0x02 /* copy arguments out */
312 #define BC_F_SUSER 0x04 /* do super-user check */
313 #define BC_F_XLATEIN 0x08 /* xlate arguments in */
314 #define BC_F_XLATEOUT 0x10 /* xlate arguments out */
315
316 static const struct bridge_control bridge_control_table[] = {
317 [BRDGADD] = {bridge_ioctl_add, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
318 [BRDGDEL] = {bridge_ioctl_del, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
319
320 [BRDGGIFFLGS] = {bridge_ioctl_gifflags, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_COPYOUT},
321 [BRDGSIFFLGS] = {bridge_ioctl_sifflags, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
322
323 [BRDGSCACHE] = {bridge_ioctl_scache, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
324 [BRDGGCACHE] = {bridge_ioctl_gcache, sizeof(struct ifbrparam), BC_F_COPYOUT},
325
326 [OBRDGGIFS] = {bridge_ioctl_gifs, sizeof(struct ifbifconf), BC_F_COPYIN|BC_F_COPYOUT},
327 [OBRDGRTS] = {bridge_ioctl_rts, sizeof(struct ifbaconf), BC_F_COPYIN|BC_F_COPYOUT},
328
329 [BRDGSADDR] = {bridge_ioctl_saddr, sizeof(struct ifbareq), BC_F_COPYIN|BC_F_SUSER},
330
331 [BRDGSTO] = {bridge_ioctl_sto, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
332 [BRDGGTO] = {bridge_ioctl_gto, sizeof(struct ifbrparam), BC_F_COPYOUT},
333
334 [BRDGDADDR] = {bridge_ioctl_daddr, sizeof(struct ifbareq), BC_F_COPYIN|BC_F_SUSER},
335
336 [BRDGFLUSH] = {bridge_ioctl_flush, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
337
338 [BRDGGPRI] = {bridge_ioctl_gpri, sizeof(struct ifbrparam), BC_F_COPYOUT},
339 [BRDGSPRI] = {bridge_ioctl_spri, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
340
341 [BRDGGHT] = {bridge_ioctl_ght, sizeof(struct ifbrparam), BC_F_COPYOUT},
342 [BRDGSHT] = {bridge_ioctl_sht, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
343
344 [BRDGGFD] = {bridge_ioctl_gfd, sizeof(struct ifbrparam), BC_F_COPYOUT},
345 [BRDGSFD] = {bridge_ioctl_sfd, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
346
347 [BRDGGMA] = {bridge_ioctl_gma, sizeof(struct ifbrparam), BC_F_COPYOUT},
348 [BRDGSMA] = {bridge_ioctl_sma, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
349
350 [BRDGSIFPRIO] = {bridge_ioctl_sifprio, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
351
352 [BRDGSIFCOST] = {bridge_ioctl_sifcost, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
353 #if defined(BRIDGE_IPF)
354 [BRDGGFILT] = {bridge_ioctl_gfilt, sizeof(struct ifbrparam), BC_F_COPYOUT},
355 [BRDGSFILT] = {bridge_ioctl_sfilt, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
356 #endif /* BRIDGE_IPF */
357 [BRDGGIFS] = {bridge_ioctl_gifs, sizeof(struct ifbifconf), BC_F_XLATEIN|BC_F_XLATEOUT},
358 [BRDGRTS] = {bridge_ioctl_rts, sizeof(struct ifbaconf), BC_F_XLATEIN|BC_F_XLATEOUT},
359 };
360
361 static const int bridge_control_table_size = __arraycount(bridge_control_table);
362
363 static LIST_HEAD(, bridge_softc) bridge_list;
364 static kmutex_t bridge_list_lock;
365
366 static struct if_clone bridge_cloner =
367 IF_CLONE_INITIALIZER("bridge", bridge_clone_create, bridge_clone_destroy);
368
369 /*
370 * bridgeattach:
371 *
372 * Pseudo-device attach routine.
373 */
374 void
375 bridgeattach(int n)
376 {
377
378 pool_init(&bridge_rtnode_pool, sizeof(struct bridge_rtnode),
379 0, 0, 0, "brtpl", NULL, IPL_NET);
380
381 LIST_INIT(&bridge_list);
382 mutex_init(&bridge_list_lock, MUTEX_DEFAULT, IPL_NET);
383 if_clone_attach(&bridge_cloner);
384 }
385
386 /*
387 * bridge_clone_create:
388 *
389 * Create a new bridge instance.
390 */
391 static int
392 bridge_clone_create(struct if_clone *ifc, int unit)
393 {
394 struct bridge_softc *sc;
395 struct ifnet *ifp;
396 int error, flags;
397
398 sc = kmem_zalloc(sizeof(*sc), KM_SLEEP);
399 ifp = &sc->sc_if;
400
401 sc->sc_brtmax = BRIDGE_RTABLE_MAX;
402 sc->sc_brttimeout = BRIDGE_RTABLE_TIMEOUT;
403 sc->sc_bridge_max_age = BSTP_DEFAULT_MAX_AGE;
404 sc->sc_bridge_hello_time = BSTP_DEFAULT_HELLO_TIME;
405 sc->sc_bridge_forward_delay = BSTP_DEFAULT_FORWARD_DELAY;
406 sc->sc_bridge_priority = BSTP_DEFAULT_BRIDGE_PRIORITY;
407 sc->sc_hold_time = BSTP_DEFAULT_HOLD_TIME;
408 sc->sc_filter_flags = 0;
409
410 /* Initialize our routing table. */
411 bridge_rtable_init(sc);
412
413 #ifdef BRIDGE_MPSAFE
414 flags = WQ_MPSAFE;
415 #else
416 flags = 0;
417 #endif
418 error = workqueue_create(&sc->sc_rtage_wq, "bridge_rtage",
419 bridge_rtage_work, sc, PRI_SOFTNET, IPL_SOFTNET, flags);
420 if (error)
421 panic("%s: workqueue_create %d\n", __func__, error);
422
423 callout_init(&sc->sc_brcallout, 0);
424 callout_init(&sc->sc_bstpcallout, 0);
425
426 LIST_INIT(&sc->sc_iflist);
427 #ifdef BRIDGE_MPSAFE
428 sc->sc_iflist_intr_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
429 sc->sc_iflist_psz = pserialize_create();
430 sc->sc_iflist_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET);
431 #else
432 sc->sc_iflist_intr_lock = NULL;
433 sc->sc_iflist_psz = NULL;
434 sc->sc_iflist_lock = NULL;
435 #endif
436 cv_init(&sc->sc_iflist_cv, "if_bridge_cv");
437
438 if_initname(ifp, ifc->ifc_name, unit);
439 ifp->if_softc = sc;
440 ifp->if_mtu = ETHERMTU;
441 ifp->if_ioctl = bridge_ioctl;
442 ifp->if_output = bridge_output;
443 ifp->if_start = bridge_start;
444 ifp->if_stop = bridge_stop;
445 ifp->if_init = bridge_init;
446 ifp->if_type = IFT_BRIDGE;
447 ifp->if_addrlen = 0;
448 ifp->if_dlt = DLT_EN10MB;
449 ifp->if_hdrlen = ETHER_HDR_LEN;
450
451 sc->sc_fwd_pktq = pktq_create(IFQ_MAXLEN, bridge_forward, sc);
452 KASSERT(sc->sc_fwd_pktq != NULL);
453
454 bridge_sysctl_fwdq_setup(&ifp->if_sysctl_log, sc);
455
456 if_attach(ifp);
457
458 if_alloc_sadl(ifp);
459
460 mutex_enter(&bridge_list_lock);
461 LIST_INSERT_HEAD(&bridge_list, sc, sc_list);
462 mutex_exit(&bridge_list_lock);
463
464 return (0);
465 }
466
467 /*
468 * bridge_clone_destroy:
469 *
470 * Destroy a bridge instance.
471 */
472 static int
473 bridge_clone_destroy(struct ifnet *ifp)
474 {
475 struct bridge_softc *sc = ifp->if_softc;
476 struct bridge_iflist *bif;
477 int s;
478
479 /* Must be called during IFF_RUNNING, i.e., before bridge_stop */
480 pktq_barrier(sc->sc_fwd_pktq);
481
482 s = splnet();
483
484 bridge_stop(ifp, 1);
485
486 BRIDGE_LOCK(sc);
487 while ((bif = LIST_FIRST(&sc->sc_iflist)) != NULL)
488 bridge_delete_member(sc, bif);
489 BRIDGE_UNLOCK(sc);
490
491 mutex_enter(&bridge_list_lock);
492 LIST_REMOVE(sc, sc_list);
493 mutex_exit(&bridge_list_lock);
494
495 splx(s);
496
497 if_detach(ifp);
498
499 /* Should be called after if_detach for safe */
500 pktq_flush(sc->sc_fwd_pktq);
501 pktq_destroy(sc->sc_fwd_pktq);
502
503 /* Tear down the routing table. */
504 bridge_rtable_fini(sc);
505
506 cv_destroy(&sc->sc_iflist_cv);
507 if (sc->sc_iflist_intr_lock)
508 mutex_obj_free(sc->sc_iflist_intr_lock);
509
510 if (sc->sc_iflist_psz)
511 pserialize_destroy(sc->sc_iflist_psz);
512 if (sc->sc_iflist_lock)
513 mutex_obj_free(sc->sc_iflist_lock);
514
515 workqueue_destroy(sc->sc_rtage_wq);
516
517 kmem_free(sc, sizeof(*sc));
518
519 return (0);
520 }
521
522 static int
523 bridge_sysctl_fwdq_maxlen(SYSCTLFN_ARGS)
524 {
525 struct sysctlnode node = *rnode;
526 const struct bridge_softc *sc = node.sysctl_data;
527 return sysctl_pktq_maxlen(SYSCTLFN_CALL(rnode), sc->sc_fwd_pktq);
528 }
529
530 #define SYSCTL_BRIDGE_PKTQ(cn, c) \
531 static int \
532 bridge_sysctl_fwdq_##cn(SYSCTLFN_ARGS) \
533 { \
534 struct sysctlnode node = *rnode; \
535 const struct bridge_softc *sc = node.sysctl_data; \
536 return sysctl_pktq_count(SYSCTLFN_CALL(rnode), \
537 sc->sc_fwd_pktq, c); \
538 }
539
540 SYSCTL_BRIDGE_PKTQ(items, PKTQ_NITEMS)
541 SYSCTL_BRIDGE_PKTQ(drops, PKTQ_DROPS)
542
543 static void
544 bridge_sysctl_fwdq_setup(struct sysctllog **clog, struct bridge_softc *sc)
545 {
546 const struct sysctlnode *cnode, *rnode;
547 sysctlfn len_func = NULL, maxlen_func = NULL, drops_func = NULL;
548 const char *ifname = sc->sc_if.if_xname;
549
550 len_func = bridge_sysctl_fwdq_items;
551 maxlen_func = bridge_sysctl_fwdq_maxlen;
552 drops_func = bridge_sysctl_fwdq_drops;
553
554 if (sysctl_createv(clog, 0, NULL, &rnode,
555 CTLFLAG_PERMANENT,
556 CTLTYPE_NODE, "interfaces",
557 SYSCTL_DESCR("Per-interface controls"),
558 NULL, 0, NULL, 0,
559 CTL_NET, CTL_CREATE, CTL_EOL) != 0)
560 goto bad;
561
562 if (sysctl_createv(clog, 0, &rnode, &rnode,
563 CTLFLAG_PERMANENT,
564 CTLTYPE_NODE, ifname,
565 SYSCTL_DESCR("Interface controls"),
566 NULL, 0, NULL, 0,
567 CTL_CREATE, CTL_EOL) != 0)
568 goto bad;
569
570 if (sysctl_createv(clog, 0, &rnode, &rnode,
571 CTLFLAG_PERMANENT,
572 CTLTYPE_NODE, "fwdq",
573 SYSCTL_DESCR("Protocol input queue controls"),
574 NULL, 0, NULL, 0,
575 CTL_CREATE, CTL_EOL) != 0)
576 goto bad;
577
578 if (sysctl_createv(clog, 0, &rnode, &cnode,
579 CTLFLAG_PERMANENT,
580 CTLTYPE_INT, "len",
581 SYSCTL_DESCR("Current forwarding queue length"),
582 len_func, 0, (void *)sc, 0,
583 CTL_CREATE, IFQCTL_LEN, CTL_EOL) != 0)
584 goto bad;
585
586 if (sysctl_createv(clog, 0, &rnode, &cnode,
587 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
588 CTLTYPE_INT, "maxlen",
589 SYSCTL_DESCR("Maximum allowed forwarding queue length"),
590 maxlen_func, 0, (void *)sc, 0,
591 CTL_CREATE, IFQCTL_MAXLEN, CTL_EOL) != 0)
592 goto bad;
593
594 if (sysctl_createv(clog, 0, &rnode, &cnode,
595 CTLFLAG_PERMANENT,
596 CTLTYPE_INT, "drops",
597 SYSCTL_DESCR("Packets dropped due to full forwarding queue"),
598 drops_func, 0, (void *)sc, 0,
599 CTL_CREATE, IFQCTL_DROPS, CTL_EOL) != 0)
600 goto bad;
601
602 return;
603 bad:
604 aprint_error("%s: could not attach sysctl nodes\n", ifname);
605 return;
606 }
607
608 /*
609 * bridge_ioctl:
610 *
611 * Handle a control request from the operator.
612 */
613 static int
614 bridge_ioctl(struct ifnet *ifp, u_long cmd, void *data)
615 {
616 struct bridge_softc *sc = ifp->if_softc;
617 struct lwp *l = curlwp; /* XXX */
618 union {
619 struct ifbreq ifbreq;
620 struct ifbifconf ifbifconf;
621 struct ifbareq ifbareq;
622 struct ifbaconf ifbaconf;
623 struct ifbrparam ifbrparam;
624 } args;
625 struct ifdrv *ifd = (struct ifdrv *) data;
626 const struct bridge_control *bc = NULL; /* XXXGCC */
627 int s, error = 0;
628
629 /* Authorize command before calling splnet(). */
630 switch (cmd) {
631 case SIOCGDRVSPEC:
632 case SIOCSDRVSPEC:
633 if (ifd->ifd_cmd >= bridge_control_table_size
634 || (bc = &bridge_control_table[ifd->ifd_cmd]) == NULL) {
635 error = EINVAL;
636 return error;
637 }
638
639 /* We only care about BC_F_SUSER at this point. */
640 if ((bc->bc_flags & BC_F_SUSER) == 0)
641 break;
642
643 error = kauth_authorize_network(l->l_cred,
644 KAUTH_NETWORK_INTERFACE_BRIDGE,
645 cmd == SIOCGDRVSPEC ?
646 KAUTH_REQ_NETWORK_INTERFACE_BRIDGE_GETPRIV :
647 KAUTH_REQ_NETWORK_INTERFACE_BRIDGE_SETPRIV,
648 ifd, NULL, NULL);
649 if (error)
650 return (error);
651
652 break;
653 }
654
655 s = splnet();
656
657 switch (cmd) {
658 case SIOCGDRVSPEC:
659 case SIOCSDRVSPEC:
660 KASSERT(bc != NULL);
661 if (cmd == SIOCGDRVSPEC &&
662 (bc->bc_flags & (BC_F_COPYOUT|BC_F_XLATEOUT)) == 0) {
663 error = EINVAL;
664 break;
665 }
666 else if (cmd == SIOCSDRVSPEC &&
667 (bc->bc_flags & (BC_F_COPYOUT|BC_F_XLATEOUT)) != 0) {
668 error = EINVAL;
669 break;
670 }
671
672 /* BC_F_SUSER is checked above, before splnet(). */
673
674 if ((bc->bc_flags & (BC_F_XLATEIN|BC_F_XLATEOUT)) == 0
675 && (ifd->ifd_len != bc->bc_argsize
676 || ifd->ifd_len > sizeof(args))) {
677 error = EINVAL;
678 break;
679 }
680
681 memset(&args, 0, sizeof(args));
682 if (bc->bc_flags & BC_F_COPYIN) {
683 error = copyin(ifd->ifd_data, &args, ifd->ifd_len);
684 if (error)
685 break;
686 } else if (bc->bc_flags & BC_F_XLATEIN) {
687 args.ifbifconf.ifbic_len = ifd->ifd_len;
688 args.ifbifconf.ifbic_buf = ifd->ifd_data;
689 }
690
691 error = (*bc->bc_func)(sc, &args);
692 if (error)
693 break;
694
695 if (bc->bc_flags & BC_F_COPYOUT) {
696 error = copyout(&args, ifd->ifd_data, ifd->ifd_len);
697 } else if (bc->bc_flags & BC_F_XLATEOUT) {
698 ifd->ifd_len = args.ifbifconf.ifbic_len;
699 ifd->ifd_data = args.ifbifconf.ifbic_buf;
700 }
701 break;
702
703 case SIOCSIFFLAGS:
704 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
705 break;
706 switch (ifp->if_flags & (IFF_UP|IFF_RUNNING)) {
707 case IFF_RUNNING:
708 /*
709 * If interface is marked down and it is running,
710 * then stop and disable it.
711 */
712 (*ifp->if_stop)(ifp, 1);
713 break;
714 case IFF_UP:
715 /*
716 * If interface is marked up and it is stopped, then
717 * start it.
718 */
719 error = (*ifp->if_init)(ifp);
720 break;
721 default:
722 break;
723 }
724 break;
725
726 case SIOCSIFMTU:
727 if ((error = ifioctl_common(ifp, cmd, data)) == ENETRESET)
728 error = 0;
729 break;
730
731 default:
732 error = ifioctl_common(ifp, cmd, data);
733 break;
734 }
735
736 splx(s);
737
738 return (error);
739 }
740
741 /*
742 * bridge_lookup_member:
743 *
744 * Lookup a bridge member interface.
745 */
746 static struct bridge_iflist *
747 bridge_lookup_member(struct bridge_softc *sc, const char *name)
748 {
749 struct bridge_iflist *bif;
750 struct ifnet *ifp;
751 int s;
752
753 BRIDGE_PSZ_RENTER(s);
754
755 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
756 ifp = bif->bif_ifp;
757 if (strcmp(ifp->if_xname, name) == 0)
758 break;
759 }
760 bif = bridge_try_hold_bif(bif);
761
762 BRIDGE_PSZ_REXIT(s);
763
764 return bif;
765 }
766
767 /*
768 * bridge_lookup_member_if:
769 *
770 * Lookup a bridge member interface by ifnet*.
771 */
772 static struct bridge_iflist *
773 bridge_lookup_member_if(struct bridge_softc *sc, struct ifnet *member_ifp)
774 {
775 struct bridge_iflist *bif;
776 int s;
777
778 BRIDGE_PSZ_RENTER(s);
779
780 bif = member_ifp->if_bridgeif;
781 bif = bridge_try_hold_bif(bif);
782
783 BRIDGE_PSZ_REXIT(s);
784
785 return bif;
786 }
787
788 static struct bridge_iflist *
789 bridge_try_hold_bif(struct bridge_iflist *bif)
790 {
791 #ifdef BRIDGE_MPSAFE
792 if (bif != NULL) {
793 if (bif->bif_waiting)
794 bif = NULL;
795 else
796 atomic_inc_32(&bif->bif_refs);
797 }
798 #endif
799 return bif;
800 }
801
802 /*
803 * bridge_release_member:
804 *
805 * Release the specified member interface.
806 */
807 static void
808 bridge_release_member(struct bridge_softc *sc, struct bridge_iflist *bif)
809 {
810 #ifdef BRIDGE_MPSAFE
811 uint32_t refs;
812
813 refs = atomic_dec_uint_nv(&bif->bif_refs);
814 if (__predict_false(refs == 0 && bif->bif_waiting)) {
815 BRIDGE_INTR_LOCK(sc);
816 cv_broadcast(&sc->sc_iflist_cv);
817 BRIDGE_INTR_UNLOCK(sc);
818 }
819 #else
820 (void)sc;
821 (void)bif;
822 #endif
823 }
824
825 /*
826 * bridge_delete_member:
827 *
828 * Delete the specified member interface.
829 */
830 static void
831 bridge_delete_member(struct bridge_softc *sc, struct bridge_iflist *bif)
832 {
833 struct ifnet *ifs = bif->bif_ifp;
834
835 KASSERT(BRIDGE_LOCKED(sc));
836
837 ifs->if_input = ether_input;
838 ifs->if_bridge = NULL;
839 ifs->if_bridgeif = NULL;
840
841 LIST_REMOVE(bif, bif_next);
842
843 BRIDGE_PSZ_PERFORM(sc);
844
845 BRIDGE_UNLOCK(sc);
846
847 #ifdef BRIDGE_MPSAFE
848 BRIDGE_INTR_LOCK(sc);
849 bif->bif_waiting = true;
850 membar_sync();
851 while (bif->bif_refs > 0) {
852 aprint_debug("%s: cv_wait on iflist\n", __func__);
853 cv_wait(&sc->sc_iflist_cv, sc->sc_iflist_intr_lock);
854 }
855 bif->bif_waiting = false;
856 BRIDGE_INTR_UNLOCK(sc);
857 #endif
858
859 kmem_free(bif, sizeof(*bif));
860
861 BRIDGE_LOCK(sc);
862 }
863
864 static int
865 bridge_ioctl_add(struct bridge_softc *sc, void *arg)
866 {
867 struct ifbreq *req = arg;
868 struct bridge_iflist *bif = NULL;
869 struct ifnet *ifs;
870 int error = 0;
871
872 ifs = ifunit(req->ifbr_ifsname);
873 if (ifs == NULL)
874 return (ENOENT);
875
876 if (sc->sc_if.if_mtu != ifs->if_mtu)
877 return (EINVAL);
878
879 if (ifs->if_bridge == sc)
880 return (EEXIST);
881
882 if (ifs->if_bridge != NULL)
883 return (EBUSY);
884
885 if (ifs->if_input != ether_input)
886 return EINVAL;
887
888 /* FIXME: doesn't work with non-IFF_SIMPLEX interfaces */
889 if ((ifs->if_flags & IFF_SIMPLEX) == 0)
890 return EINVAL;
891
892 bif = kmem_alloc(sizeof(*bif), KM_SLEEP);
893
894 switch (ifs->if_type) {
895 case IFT_ETHER:
896 /*
897 * Place the interface into promiscuous mode.
898 */
899 error = ifpromisc(ifs, 1);
900 if (error)
901 goto out;
902 break;
903 default:
904 error = EINVAL;
905 goto out;
906 }
907
908 bif->bif_ifp = ifs;
909 bif->bif_flags = IFBIF_LEARNING | IFBIF_DISCOVER;
910 bif->bif_priority = BSTP_DEFAULT_PORT_PRIORITY;
911 bif->bif_path_cost = BSTP_DEFAULT_PATH_COST;
912 bif->bif_refs = 0;
913 bif->bif_waiting = false;
914
915 BRIDGE_LOCK(sc);
916
917 ifs->if_bridge = sc;
918 ifs->if_bridgeif = bif;
919 LIST_INSERT_HEAD(&sc->sc_iflist, bif, bif_next);
920 ifs->if_input = bridge_input;
921
922 BRIDGE_UNLOCK(sc);
923
924 if (sc->sc_if.if_flags & IFF_RUNNING)
925 bstp_initialization(sc);
926 else
927 bstp_stop(sc);
928
929 out:
930 if (error) {
931 if (bif != NULL)
932 kmem_free(bif, sizeof(*bif));
933 }
934 return (error);
935 }
936
937 static int
938 bridge_ioctl_del(struct bridge_softc *sc, void *arg)
939 {
940 struct ifbreq *req = arg;
941 const char *name = req->ifbr_ifsname;
942 struct bridge_iflist *bif;
943 struct ifnet *ifs;
944
945 BRIDGE_LOCK(sc);
946
947 /*
948 * Don't use bridge_lookup_member. We want to get a member
949 * with bif_refs == 0.
950 */
951 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
952 ifs = bif->bif_ifp;
953 if (strcmp(ifs->if_xname, name) == 0)
954 break;
955 }
956
957 if (bif == NULL) {
958 BRIDGE_UNLOCK(sc);
959 return ENOENT;
960 }
961
962 bridge_delete_member(sc, bif);
963
964 BRIDGE_UNLOCK(sc);
965
966 switch (ifs->if_type) {
967 case IFT_ETHER:
968 /*
969 * Take the interface out of promiscuous mode.
970 * Don't call it with holding a spin lock.
971 */
972 (void) ifpromisc(ifs, 0);
973 break;
974 default:
975 #ifdef DIAGNOSTIC
976 panic("bridge_delete_member: impossible");
977 #endif
978 break;
979 }
980
981 bridge_rtdelete(sc, ifs);
982
983 if (sc->sc_if.if_flags & IFF_RUNNING)
984 bstp_initialization(sc);
985
986 return 0;
987 }
988
989 static int
990 bridge_ioctl_gifflags(struct bridge_softc *sc, void *arg)
991 {
992 struct ifbreq *req = arg;
993 struct bridge_iflist *bif;
994
995 bif = bridge_lookup_member(sc, req->ifbr_ifsname);
996 if (bif == NULL)
997 return (ENOENT);
998
999 req->ifbr_ifsflags = bif->bif_flags;
1000 req->ifbr_state = bif->bif_state;
1001 req->ifbr_priority = bif->bif_priority;
1002 req->ifbr_path_cost = bif->bif_path_cost;
1003 req->ifbr_portno = bif->bif_ifp->if_index & 0xff;
1004
1005 bridge_release_member(sc, bif);
1006
1007 return (0);
1008 }
1009
1010 static int
1011 bridge_ioctl_sifflags(struct bridge_softc *sc, void *arg)
1012 {
1013 struct ifbreq *req = arg;
1014 struct bridge_iflist *bif;
1015
1016 bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1017 if (bif == NULL)
1018 return (ENOENT);
1019
1020 if (req->ifbr_ifsflags & IFBIF_STP) {
1021 switch (bif->bif_ifp->if_type) {
1022 case IFT_ETHER:
1023 /* These can do spanning tree. */
1024 break;
1025
1026 default:
1027 /* Nothing else can. */
1028 bridge_release_member(sc, bif);
1029 return (EINVAL);
1030 }
1031 }
1032
1033 bif->bif_flags = req->ifbr_ifsflags;
1034
1035 bridge_release_member(sc, bif);
1036
1037 if (sc->sc_if.if_flags & IFF_RUNNING)
1038 bstp_initialization(sc);
1039
1040 return (0);
1041 }
1042
1043 static int
1044 bridge_ioctl_scache(struct bridge_softc *sc, void *arg)
1045 {
1046 struct ifbrparam *param = arg;
1047
1048 sc->sc_brtmax = param->ifbrp_csize;
1049 bridge_rttrim(sc);
1050
1051 return (0);
1052 }
1053
1054 static int
1055 bridge_ioctl_gcache(struct bridge_softc *sc, void *arg)
1056 {
1057 struct ifbrparam *param = arg;
1058
1059 param->ifbrp_csize = sc->sc_brtmax;
1060
1061 return (0);
1062 }
1063
1064 static int
1065 bridge_ioctl_gifs(struct bridge_softc *sc, void *arg)
1066 {
1067 struct ifbifconf *bifc = arg;
1068 struct bridge_iflist *bif;
1069 struct ifbreq *breqs;
1070 int i, count, error = 0;
1071
1072 retry:
1073 BRIDGE_LOCK(sc);
1074 count = 0;
1075 LIST_FOREACH(bif, &sc->sc_iflist, bif_next)
1076 count++;
1077 BRIDGE_UNLOCK(sc);
1078
1079 if (count == 0) {
1080 bifc->ifbic_len = 0;
1081 return 0;
1082 }
1083
1084 if (bifc->ifbic_len == 0 || bifc->ifbic_len < (sizeof(*breqs) * count)) {
1085 /* Tell that a larger buffer is needed */
1086 bifc->ifbic_len = sizeof(*breqs) * count;
1087 return 0;
1088 }
1089
1090 breqs = kmem_alloc(sizeof(*breqs) * count, KM_SLEEP);
1091
1092 BRIDGE_LOCK(sc);
1093
1094 i = 0;
1095 LIST_FOREACH(bif, &sc->sc_iflist, bif_next)
1096 i++;
1097 if (i > count) {
1098 /*
1099 * The number of members has been increased.
1100 * We need more memory!
1101 */
1102 BRIDGE_UNLOCK(sc);
1103 kmem_free(breqs, sizeof(*breqs) * count);
1104 goto retry;
1105 }
1106
1107 i = 0;
1108 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1109 struct ifbreq *breq = &breqs[i++];
1110 memset(breq, 0, sizeof(*breq));
1111
1112 strlcpy(breq->ifbr_ifsname, bif->bif_ifp->if_xname,
1113 sizeof(breq->ifbr_ifsname));
1114 breq->ifbr_ifsflags = bif->bif_flags;
1115 breq->ifbr_state = bif->bif_state;
1116 breq->ifbr_priority = bif->bif_priority;
1117 breq->ifbr_path_cost = bif->bif_path_cost;
1118 breq->ifbr_portno = bif->bif_ifp->if_index & 0xff;
1119 }
1120
1121 /* Don't call copyout with holding the mutex */
1122 BRIDGE_UNLOCK(sc);
1123
1124 for (i = 0; i < count; i++) {
1125 error = copyout(&breqs[i], bifc->ifbic_req + i, sizeof(*breqs));
1126 if (error)
1127 break;
1128 }
1129 bifc->ifbic_len = sizeof(*breqs) * i;
1130
1131 kmem_free(breqs, sizeof(*breqs) * count);
1132
1133 return error;
1134 }
1135
1136 static int
1137 bridge_ioctl_rts(struct bridge_softc *sc, void *arg)
1138 {
1139 struct ifbaconf *bac = arg;
1140 struct bridge_rtnode *brt;
1141 struct ifbareq bareq;
1142 int count = 0, error = 0, len;
1143
1144 if (bac->ifbac_len == 0)
1145 return (0);
1146
1147 BRIDGE_RT_INTR_LOCK(sc);
1148
1149 len = bac->ifbac_len;
1150 LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) {
1151 if (len < sizeof(bareq))
1152 goto out;
1153 memset(&bareq, 0, sizeof(bareq));
1154 strlcpy(bareq.ifba_ifsname, brt->brt_ifp->if_xname,
1155 sizeof(bareq.ifba_ifsname));
1156 memcpy(bareq.ifba_dst, brt->brt_addr, sizeof(brt->brt_addr));
1157 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
1158 bareq.ifba_expire = brt->brt_expire - time_uptime;
1159 } else
1160 bareq.ifba_expire = 0;
1161 bareq.ifba_flags = brt->brt_flags;
1162
1163 error = copyout(&bareq, bac->ifbac_req + count, sizeof(bareq));
1164 if (error)
1165 goto out;
1166 count++;
1167 len -= sizeof(bareq);
1168 }
1169 out:
1170 BRIDGE_RT_INTR_UNLOCK(sc);
1171
1172 bac->ifbac_len = sizeof(bareq) * count;
1173 return (error);
1174 }
1175
1176 static int
1177 bridge_ioctl_saddr(struct bridge_softc *sc, void *arg)
1178 {
1179 struct ifbareq *req = arg;
1180 struct bridge_iflist *bif;
1181 int error;
1182
1183 bif = bridge_lookup_member(sc, req->ifba_ifsname);
1184 if (bif == NULL)
1185 return (ENOENT);
1186
1187 error = bridge_rtupdate(sc, req->ifba_dst, bif->bif_ifp, 1,
1188 req->ifba_flags);
1189
1190 bridge_release_member(sc, bif);
1191
1192 return (error);
1193 }
1194
1195 static int
1196 bridge_ioctl_sto(struct bridge_softc *sc, void *arg)
1197 {
1198 struct ifbrparam *param = arg;
1199
1200 sc->sc_brttimeout = param->ifbrp_ctime;
1201
1202 return (0);
1203 }
1204
1205 static int
1206 bridge_ioctl_gto(struct bridge_softc *sc, void *arg)
1207 {
1208 struct ifbrparam *param = arg;
1209
1210 param->ifbrp_ctime = sc->sc_brttimeout;
1211
1212 return (0);
1213 }
1214
1215 static int
1216 bridge_ioctl_daddr(struct bridge_softc *sc, void *arg)
1217 {
1218 struct ifbareq *req = arg;
1219
1220 return (bridge_rtdaddr(sc, req->ifba_dst));
1221 }
1222
1223 static int
1224 bridge_ioctl_flush(struct bridge_softc *sc, void *arg)
1225 {
1226 struct ifbreq *req = arg;
1227
1228 bridge_rtflush(sc, req->ifbr_ifsflags);
1229
1230 return (0);
1231 }
1232
1233 static int
1234 bridge_ioctl_gpri(struct bridge_softc *sc, void *arg)
1235 {
1236 struct ifbrparam *param = arg;
1237
1238 param->ifbrp_prio = sc->sc_bridge_priority;
1239
1240 return (0);
1241 }
1242
1243 static int
1244 bridge_ioctl_spri(struct bridge_softc *sc, void *arg)
1245 {
1246 struct ifbrparam *param = arg;
1247
1248 sc->sc_bridge_priority = param->ifbrp_prio;
1249
1250 if (sc->sc_if.if_flags & IFF_RUNNING)
1251 bstp_initialization(sc);
1252
1253 return (0);
1254 }
1255
1256 static int
1257 bridge_ioctl_ght(struct bridge_softc *sc, void *arg)
1258 {
1259 struct ifbrparam *param = arg;
1260
1261 param->ifbrp_hellotime = sc->sc_bridge_hello_time >> 8;
1262
1263 return (0);
1264 }
1265
1266 static int
1267 bridge_ioctl_sht(struct bridge_softc *sc, void *arg)
1268 {
1269 struct ifbrparam *param = arg;
1270
1271 if (param->ifbrp_hellotime == 0)
1272 return (EINVAL);
1273 sc->sc_bridge_hello_time = param->ifbrp_hellotime << 8;
1274
1275 if (sc->sc_if.if_flags & IFF_RUNNING)
1276 bstp_initialization(sc);
1277
1278 return (0);
1279 }
1280
1281 static int
1282 bridge_ioctl_gfd(struct bridge_softc *sc, void *arg)
1283 {
1284 struct ifbrparam *param = arg;
1285
1286 param->ifbrp_fwddelay = sc->sc_bridge_forward_delay >> 8;
1287
1288 return (0);
1289 }
1290
1291 static int
1292 bridge_ioctl_sfd(struct bridge_softc *sc, void *arg)
1293 {
1294 struct ifbrparam *param = arg;
1295
1296 if (param->ifbrp_fwddelay == 0)
1297 return (EINVAL);
1298 sc->sc_bridge_forward_delay = param->ifbrp_fwddelay << 8;
1299
1300 if (sc->sc_if.if_flags & IFF_RUNNING)
1301 bstp_initialization(sc);
1302
1303 return (0);
1304 }
1305
1306 static int
1307 bridge_ioctl_gma(struct bridge_softc *sc, void *arg)
1308 {
1309 struct ifbrparam *param = arg;
1310
1311 param->ifbrp_maxage = sc->sc_bridge_max_age >> 8;
1312
1313 return (0);
1314 }
1315
1316 static int
1317 bridge_ioctl_sma(struct bridge_softc *sc, void *arg)
1318 {
1319 struct ifbrparam *param = arg;
1320
1321 if (param->ifbrp_maxage == 0)
1322 return (EINVAL);
1323 sc->sc_bridge_max_age = param->ifbrp_maxage << 8;
1324
1325 if (sc->sc_if.if_flags & IFF_RUNNING)
1326 bstp_initialization(sc);
1327
1328 return (0);
1329 }
1330
1331 static int
1332 bridge_ioctl_sifprio(struct bridge_softc *sc, void *arg)
1333 {
1334 struct ifbreq *req = arg;
1335 struct bridge_iflist *bif;
1336
1337 bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1338 if (bif == NULL)
1339 return (ENOENT);
1340
1341 bif->bif_priority = req->ifbr_priority;
1342
1343 if (sc->sc_if.if_flags & IFF_RUNNING)
1344 bstp_initialization(sc);
1345
1346 bridge_release_member(sc, bif);
1347
1348 return (0);
1349 }
1350
1351 #if defined(BRIDGE_IPF)
1352 static int
1353 bridge_ioctl_gfilt(struct bridge_softc *sc, void *arg)
1354 {
1355 struct ifbrparam *param = arg;
1356
1357 param->ifbrp_filter = sc->sc_filter_flags;
1358
1359 return (0);
1360 }
1361
1362 static int
1363 bridge_ioctl_sfilt(struct bridge_softc *sc, void *arg)
1364 {
1365 struct ifbrparam *param = arg;
1366 uint32_t nflags, oflags;
1367
1368 if (param->ifbrp_filter & ~IFBF_FILT_MASK)
1369 return (EINVAL);
1370
1371 nflags = param->ifbrp_filter;
1372 oflags = sc->sc_filter_flags;
1373
1374 if ((nflags & IFBF_FILT_USEIPF) && !(oflags & IFBF_FILT_USEIPF)) {
1375 pfil_add_hook((void *)bridge_ipf, NULL, PFIL_IN|PFIL_OUT,
1376 sc->sc_if.if_pfil);
1377 }
1378 if (!(nflags & IFBF_FILT_USEIPF) && (oflags & IFBF_FILT_USEIPF)) {
1379 pfil_remove_hook((void *)bridge_ipf, NULL, PFIL_IN|PFIL_OUT,
1380 sc->sc_if.if_pfil);
1381 }
1382
1383 sc->sc_filter_flags = nflags;
1384
1385 return (0);
1386 }
1387 #endif /* BRIDGE_IPF */
1388
1389 static int
1390 bridge_ioctl_sifcost(struct bridge_softc *sc, void *arg)
1391 {
1392 struct ifbreq *req = arg;
1393 struct bridge_iflist *bif;
1394
1395 bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1396 if (bif == NULL)
1397 return (ENOENT);
1398
1399 bif->bif_path_cost = req->ifbr_path_cost;
1400
1401 if (sc->sc_if.if_flags & IFF_RUNNING)
1402 bstp_initialization(sc);
1403
1404 bridge_release_member(sc, bif);
1405
1406 return (0);
1407 }
1408
1409 /*
1410 * bridge_ifdetach:
1411 *
1412 * Detach an interface from a bridge. Called when a member
1413 * interface is detaching.
1414 */
1415 void
1416 bridge_ifdetach(struct ifnet *ifp)
1417 {
1418 struct bridge_softc *sc = ifp->if_bridge;
1419 struct ifbreq breq;
1420
1421 /* ioctl_lock should prevent this from happening */
1422 KASSERT(sc != NULL);
1423
1424 memset(&breq, 0, sizeof(breq));
1425 strlcpy(breq.ifbr_ifsname, ifp->if_xname, sizeof(breq.ifbr_ifsname));
1426
1427 (void) bridge_ioctl_del(sc, &breq);
1428 }
1429
1430 /*
1431 * bridge_init:
1432 *
1433 * Initialize a bridge interface.
1434 */
1435 static int
1436 bridge_init(struct ifnet *ifp)
1437 {
1438 struct bridge_softc *sc = ifp->if_softc;
1439
1440 if (ifp->if_flags & IFF_RUNNING)
1441 return (0);
1442
1443 callout_reset(&sc->sc_brcallout, bridge_rtable_prune_period * hz,
1444 bridge_timer, sc);
1445
1446 ifp->if_flags |= IFF_RUNNING;
1447 bstp_initialization(sc);
1448 return (0);
1449 }
1450
1451 /*
1452 * bridge_stop:
1453 *
1454 * Stop the bridge interface.
1455 */
1456 static void
1457 bridge_stop(struct ifnet *ifp, int disable)
1458 {
1459 struct bridge_softc *sc = ifp->if_softc;
1460
1461 if ((ifp->if_flags & IFF_RUNNING) == 0)
1462 return;
1463
1464 callout_stop(&sc->sc_brcallout);
1465 bstp_stop(sc);
1466
1467 bridge_rtflush(sc, IFBF_FLUSHDYN);
1468
1469 ifp->if_flags &= ~IFF_RUNNING;
1470 }
1471
1472 /*
1473 * bridge_enqueue:
1474 *
1475 * Enqueue a packet on a bridge member interface.
1476 */
1477 void
1478 bridge_enqueue(struct bridge_softc *sc, struct ifnet *dst_ifp, struct mbuf *m,
1479 int runfilt)
1480 {
1481 ALTQ_DECL(struct altq_pktattr pktattr;)
1482 int len, error;
1483 short mflags;
1484
1485 /*
1486 * Clear any in-bound checksum flags for this packet.
1487 */
1488 m->m_pkthdr.csum_flags = 0;
1489
1490 if (runfilt) {
1491 if (pfil_run_hooks(sc->sc_if.if_pfil, &m,
1492 dst_ifp, PFIL_OUT) != 0) {
1493 if (m != NULL)
1494 m_freem(m);
1495 return;
1496 }
1497 if (m == NULL)
1498 return;
1499 }
1500
1501 #ifdef ALTQ
1502 /*
1503 * If ALTQ is enabled on the member interface, do
1504 * classification; the queueing discipline might
1505 * not require classification, but might require
1506 * the address family/header pointer in the pktattr.
1507 */
1508 if (ALTQ_IS_ENABLED(&dst_ifp->if_snd)) {
1509 /* XXX IFT_ETHER */
1510 altq_etherclassify(&dst_ifp->if_snd, m, &pktattr);
1511 }
1512 #endif /* ALTQ */
1513
1514 len = m->m_pkthdr.len;
1515 mflags = m->m_flags;
1516
1517 IFQ_ENQUEUE(&dst_ifp->if_snd, m, &pktattr, error);
1518
1519 if (error) {
1520 /* mbuf is already freed */
1521 sc->sc_if.if_oerrors++;
1522 return;
1523 }
1524
1525 sc->sc_if.if_opackets++;
1526 sc->sc_if.if_obytes += len;
1527
1528 dst_ifp->if_obytes += len;
1529
1530 if (mflags & M_MCAST) {
1531 sc->sc_if.if_omcasts++;
1532 dst_ifp->if_omcasts++;
1533 }
1534
1535 if ((dst_ifp->if_flags & IFF_OACTIVE) == 0)
1536 (*dst_ifp->if_start)(dst_ifp);
1537 }
1538
1539 /*
1540 * bridge_output:
1541 *
1542 * Send output from a bridge member interface. This
1543 * performs the bridging function for locally originated
1544 * packets.
1545 *
1546 * The mbuf has the Ethernet header already attached. We must
1547 * enqueue or free the mbuf before returning.
1548 */
1549 int
1550 bridge_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *sa,
1551 struct rtentry *rt)
1552 {
1553 struct ether_header *eh;
1554 struct ifnet *dst_if;
1555 struct bridge_softc *sc;
1556 #ifndef BRIDGE_MPSAFE
1557 int s;
1558 #endif
1559
1560 if (m->m_len < ETHER_HDR_LEN) {
1561 m = m_pullup(m, ETHER_HDR_LEN);
1562 if (m == NULL)
1563 return (0);
1564 }
1565
1566 eh = mtod(m, struct ether_header *);
1567 sc = ifp->if_bridge;
1568
1569 #ifndef BRIDGE_MPSAFE
1570 s = splnet();
1571 #endif
1572
1573 /*
1574 * If bridge is down, but the original output interface is up,
1575 * go ahead and send out that interface. Otherwise, the packet
1576 * is dropped below.
1577 */
1578 if (__predict_false(sc == NULL) ||
1579 (sc->sc_if.if_flags & IFF_RUNNING) == 0) {
1580 dst_if = ifp;
1581 goto sendunicast;
1582 }
1583
1584 /*
1585 * If the packet is a multicast, or we don't know a better way to
1586 * get there, send to all interfaces.
1587 */
1588 if (ETHER_IS_MULTICAST(eh->ether_dhost))
1589 dst_if = NULL;
1590 else
1591 dst_if = bridge_rtlookup(sc, eh->ether_dhost);
1592 if (dst_if == NULL) {
1593 struct bridge_iflist *bif;
1594 struct mbuf *mc;
1595 int used = 0;
1596 int ss;
1597
1598 BRIDGE_PSZ_RENTER(ss);
1599 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1600 bif = bridge_try_hold_bif(bif);
1601 if (bif == NULL)
1602 continue;
1603 BRIDGE_PSZ_REXIT(ss);
1604
1605 dst_if = bif->bif_ifp;
1606 if ((dst_if->if_flags & IFF_RUNNING) == 0)
1607 goto next;
1608
1609 /*
1610 * If this is not the original output interface,
1611 * and the interface is participating in spanning
1612 * tree, make sure the port is in a state that
1613 * allows forwarding.
1614 */
1615 if (dst_if != ifp &&
1616 (bif->bif_flags & IFBIF_STP) != 0) {
1617 switch (bif->bif_state) {
1618 case BSTP_IFSTATE_BLOCKING:
1619 case BSTP_IFSTATE_LISTENING:
1620 case BSTP_IFSTATE_DISABLED:
1621 goto next;
1622 }
1623 }
1624
1625 if (LIST_NEXT(bif, bif_next) == NULL) {
1626 used = 1;
1627 mc = m;
1628 } else {
1629 mc = m_copym(m, 0, M_COPYALL, M_NOWAIT);
1630 if (mc == NULL) {
1631 sc->sc_if.if_oerrors++;
1632 goto next;
1633 }
1634 }
1635
1636 bridge_enqueue(sc, dst_if, mc, 0);
1637 next:
1638 bridge_release_member(sc, bif);
1639 BRIDGE_PSZ_RENTER(ss);
1640 }
1641 BRIDGE_PSZ_REXIT(ss);
1642
1643 if (used == 0)
1644 m_freem(m);
1645 #ifndef BRIDGE_MPSAFE
1646 splx(s);
1647 #endif
1648 return (0);
1649 }
1650
1651 sendunicast:
1652 /*
1653 * XXX Spanning tree consideration here?
1654 */
1655
1656 if ((dst_if->if_flags & IFF_RUNNING) == 0) {
1657 m_freem(m);
1658 #ifndef BRIDGE_MPSAFE
1659 splx(s);
1660 #endif
1661 return (0);
1662 }
1663
1664 bridge_enqueue(sc, dst_if, m, 0);
1665
1666 #ifndef BRIDGE_MPSAFE
1667 splx(s);
1668 #endif
1669 return (0);
1670 }
1671
1672 /*
1673 * bridge_start:
1674 *
1675 * Start output on a bridge.
1676 *
1677 * NOTE: This routine should never be called in this implementation.
1678 */
1679 static void
1680 bridge_start(struct ifnet *ifp)
1681 {
1682
1683 printf("%s: bridge_start() called\n", ifp->if_xname);
1684 }
1685
1686 /*
1687 * bridge_forward:
1688 *
1689 * The forwarding function of the bridge.
1690 */
1691 static void
1692 bridge_forward(void *v)
1693 {
1694 struct bridge_softc *sc = v;
1695 struct mbuf *m;
1696 struct bridge_iflist *bif;
1697 struct ifnet *src_if, *dst_if;
1698 struct ether_header *eh;
1699 #ifndef BRIDGE_MPSAFE
1700 int s;
1701
1702 KERNEL_LOCK(1, NULL);
1703 mutex_enter(softnet_lock);
1704 #endif
1705
1706 if ((sc->sc_if.if_flags & IFF_RUNNING) == 0) {
1707 #ifndef BRIDGE_MPSAFE
1708 mutex_exit(softnet_lock);
1709 KERNEL_UNLOCK_ONE(NULL);
1710 #endif
1711 return;
1712 }
1713
1714 #ifndef BRIDGE_MPSAFE
1715 s = splnet();
1716 #endif
1717 while ((m = pktq_dequeue(sc->sc_fwd_pktq)) != NULL) {
1718 src_if = m->m_pkthdr.rcvif;
1719
1720 sc->sc_if.if_ipackets++;
1721 sc->sc_if.if_ibytes += m->m_pkthdr.len;
1722
1723 /*
1724 * Look up the bridge_iflist.
1725 */
1726 bif = bridge_lookup_member_if(sc, src_if);
1727 if (bif == NULL) {
1728 /* Interface is not a bridge member (anymore?) */
1729 m_freem(m);
1730 continue;
1731 }
1732
1733 if (bif->bif_flags & IFBIF_STP) {
1734 switch (bif->bif_state) {
1735 case BSTP_IFSTATE_BLOCKING:
1736 case BSTP_IFSTATE_LISTENING:
1737 case BSTP_IFSTATE_DISABLED:
1738 m_freem(m);
1739 bridge_release_member(sc, bif);
1740 continue;
1741 }
1742 }
1743
1744 eh = mtod(m, struct ether_header *);
1745
1746 /*
1747 * If the interface is learning, and the source
1748 * address is valid and not multicast, record
1749 * the address.
1750 */
1751 if ((bif->bif_flags & IFBIF_LEARNING) != 0 &&
1752 ETHER_IS_MULTICAST(eh->ether_shost) == 0 &&
1753 (eh->ether_shost[0] == 0 &&
1754 eh->ether_shost[1] == 0 &&
1755 eh->ether_shost[2] == 0 &&
1756 eh->ether_shost[3] == 0 &&
1757 eh->ether_shost[4] == 0 &&
1758 eh->ether_shost[5] == 0) == 0) {
1759 (void) bridge_rtupdate(sc, eh->ether_shost,
1760 src_if, 0, IFBAF_DYNAMIC);
1761 }
1762
1763 if ((bif->bif_flags & IFBIF_STP) != 0 &&
1764 bif->bif_state == BSTP_IFSTATE_LEARNING) {
1765 m_freem(m);
1766 bridge_release_member(sc, bif);
1767 continue;
1768 }
1769
1770 bridge_release_member(sc, bif);
1771
1772 /*
1773 * At this point, the port either doesn't participate
1774 * in spanning tree or it is in the forwarding state.
1775 */
1776
1777 /*
1778 * If the packet is unicast, destined for someone on
1779 * "this" side of the bridge, drop it.
1780 */
1781 if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) {
1782 dst_if = bridge_rtlookup(sc, eh->ether_dhost);
1783 if (src_if == dst_if) {
1784 m_freem(m);
1785 continue;
1786 }
1787 } else {
1788 /* ...forward it to all interfaces. */
1789 sc->sc_if.if_imcasts++;
1790 dst_if = NULL;
1791 }
1792
1793 if (pfil_run_hooks(sc->sc_if.if_pfil, &m,
1794 m->m_pkthdr.rcvif, PFIL_IN) != 0) {
1795 if (m != NULL)
1796 m_freem(m);
1797 continue;
1798 }
1799 if (m == NULL)
1800 continue;
1801
1802 if (dst_if == NULL) {
1803 bridge_broadcast(sc, src_if, m);
1804 continue;
1805 }
1806
1807 /*
1808 * At this point, we're dealing with a unicast frame
1809 * going to a different interface.
1810 */
1811 if ((dst_if->if_flags & IFF_RUNNING) == 0) {
1812 m_freem(m);
1813 continue;
1814 }
1815
1816 bif = bridge_lookup_member_if(sc, dst_if);
1817 if (bif == NULL) {
1818 /* Not a member of the bridge (anymore?) */
1819 m_freem(m);
1820 continue;
1821 }
1822
1823 if (bif->bif_flags & IFBIF_STP) {
1824 switch (bif->bif_state) {
1825 case BSTP_IFSTATE_DISABLED:
1826 case BSTP_IFSTATE_BLOCKING:
1827 m_freem(m);
1828 bridge_release_member(sc, bif);
1829 continue;
1830 }
1831 }
1832
1833 bridge_release_member(sc, bif);
1834
1835 bridge_enqueue(sc, dst_if, m, 1);
1836 }
1837 #ifndef BRIDGE_MPSAFE
1838 splx(s);
1839 mutex_exit(softnet_lock);
1840 KERNEL_UNLOCK_ONE(NULL);
1841 #endif
1842 }
1843
1844 static bool
1845 bstp_state_before_learning(struct bridge_iflist *bif)
1846 {
1847 if (bif->bif_flags & IFBIF_STP) {
1848 switch (bif->bif_state) {
1849 case BSTP_IFSTATE_BLOCKING:
1850 case BSTP_IFSTATE_LISTENING:
1851 case BSTP_IFSTATE_DISABLED:
1852 return true;
1853 }
1854 }
1855 return false;
1856 }
1857
1858 static bool
1859 bridge_ourether(struct bridge_iflist *bif, struct ether_header *eh, int src)
1860 {
1861 uint8_t *ether = src ? eh->ether_shost : eh->ether_dhost;
1862
1863 if (memcmp(CLLADDR(bif->bif_ifp->if_sadl), ether, ETHER_ADDR_LEN) == 0
1864 #if NCARP > 0
1865 || (bif->bif_ifp->if_carp &&
1866 carp_ourether(bif->bif_ifp->if_carp, eh, IFT_ETHER, src) != NULL)
1867 #endif /* NCARP > 0 */
1868 )
1869 return true;
1870
1871 return false;
1872 }
1873
1874 /*
1875 * bridge_input:
1876 *
1877 * Receive input from a member interface. Queue the packet for
1878 * bridging if it is not for us.
1879 */
1880 static void
1881 bridge_input(struct ifnet *ifp, struct mbuf *m)
1882 {
1883 struct bridge_softc *sc = ifp->if_bridge;
1884 struct bridge_iflist *bif;
1885 struct ether_header *eh;
1886
1887 if (__predict_false(sc == NULL) ||
1888 (sc->sc_if.if_flags & IFF_RUNNING) == 0) {
1889 ether_input(ifp, m);
1890 return;
1891 }
1892
1893 bif = bridge_lookup_member_if(sc, ifp);
1894 if (bif == NULL) {
1895 ether_input(ifp, m);
1896 return;
1897 }
1898
1899 eh = mtod(m, struct ether_header *);
1900
1901 if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
1902 if (memcmp(etherbroadcastaddr,
1903 eh->ether_dhost, ETHER_ADDR_LEN) == 0)
1904 m->m_flags |= M_BCAST;
1905 else
1906 m->m_flags |= M_MCAST;
1907 }
1908
1909 /*
1910 * A 'fast' path for packets addressed to interfaces that are
1911 * part of this bridge.
1912 */
1913 if (!(m->m_flags & (M_BCAST|M_MCAST)) &&
1914 !bstp_state_before_learning(bif)) {
1915 struct bridge_iflist *_bif;
1916 struct ifnet *_ifp = NULL;
1917 int s;
1918
1919 BRIDGE_PSZ_RENTER(s);
1920 LIST_FOREACH(_bif, &sc->sc_iflist, bif_next) {
1921 /* It is destined for us. */
1922 if (bridge_ourether(_bif, eh, 0)) {
1923 _bif = bridge_try_hold_bif(_bif);
1924 BRIDGE_PSZ_REXIT(s);
1925 if (_bif == NULL)
1926 goto out;
1927 if (_bif->bif_flags & IFBIF_LEARNING)
1928 (void) bridge_rtupdate(sc,
1929 eh->ether_shost, ifp, 0, IFBAF_DYNAMIC);
1930 _ifp = m->m_pkthdr.rcvif = _bif->bif_ifp;
1931 bridge_release_member(sc, _bif);
1932 goto out;
1933 }
1934
1935 /* We just received a packet that we sent out. */
1936 if (bridge_ourether(_bif, eh, 1))
1937 break;
1938 }
1939 BRIDGE_PSZ_REXIT(s);
1940 out:
1941
1942 if (_bif != NULL) {
1943 bridge_release_member(sc, bif);
1944 if (_ifp != NULL) {
1945 m->m_flags &= ~M_PROMISC;
1946 ether_input(_ifp, m);
1947 } else
1948 m_freem(m);
1949 return;
1950 }
1951 }
1952
1953 /* Tap off 802.1D packets; they do not get forwarded. */
1954 if (bif->bif_flags & IFBIF_STP &&
1955 memcmp(eh->ether_dhost, bstp_etheraddr, ETHER_ADDR_LEN) == 0) {
1956 bstp_input(sc, bif, m);
1957 bridge_release_member(sc, bif);
1958 return;
1959 }
1960
1961 /*
1962 * A normal switch would discard the packet here, but that's not what
1963 * we've done historically. This also prevents some obnoxious behaviour.
1964 */
1965 if (bstp_state_before_learning(bif)) {
1966 bridge_release_member(sc, bif);
1967 ether_input(ifp, m);
1968 return;
1969 }
1970
1971 bridge_release_member(sc, bif);
1972
1973 /* Queue the packet for bridge forwarding. */
1974 if (__predict_false(!pktq_enqueue(sc->sc_fwd_pktq, m, 0)))
1975 m_freem(m);
1976 }
1977
1978 /*
1979 * bridge_broadcast:
1980 *
1981 * Send a frame to all interfaces that are members of
1982 * the bridge, except for the one on which the packet
1983 * arrived.
1984 */
1985 static void
1986 bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if,
1987 struct mbuf *m)
1988 {
1989 struct bridge_iflist *bif;
1990 struct mbuf *mc;
1991 struct ifnet *dst_if;
1992 bool bmcast;
1993 int s;
1994
1995 bmcast = m->m_flags & (M_BCAST|M_MCAST);
1996
1997 BRIDGE_PSZ_RENTER(s);
1998 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1999 bif = bridge_try_hold_bif(bif);
2000 if (bif == NULL)
2001 continue;
2002 BRIDGE_PSZ_REXIT(s);
2003
2004 dst_if = bif->bif_ifp;
2005
2006 if (bif->bif_flags & IFBIF_STP) {
2007 switch (bif->bif_state) {
2008 case BSTP_IFSTATE_BLOCKING:
2009 case BSTP_IFSTATE_DISABLED:
2010 goto next;
2011 }
2012 }
2013
2014 if ((bif->bif_flags & IFBIF_DISCOVER) == 0 && !bmcast)
2015 goto next;
2016
2017 if ((dst_if->if_flags & IFF_RUNNING) == 0)
2018 goto next;
2019
2020 if (dst_if != src_if) {
2021 mc = m_copym(m, 0, M_COPYALL, M_DONTWAIT);
2022 if (mc == NULL) {
2023 sc->sc_if.if_oerrors++;
2024 goto next;
2025 }
2026 bridge_enqueue(sc, dst_if, mc, 1);
2027 }
2028
2029 if (bmcast) {
2030 mc = m_copym(m, 0, M_COPYALL, M_DONTWAIT);
2031 if (mc == NULL) {
2032 sc->sc_if.if_oerrors++;
2033 goto next;
2034 }
2035
2036 mc->m_pkthdr.rcvif = dst_if;
2037 mc->m_flags &= ~M_PROMISC;
2038 ether_input(dst_if, mc);
2039 }
2040 next:
2041 bridge_release_member(sc, bif);
2042 BRIDGE_PSZ_RENTER(s);
2043 }
2044 BRIDGE_PSZ_REXIT(s);
2045
2046 m_freem(m);
2047 }
2048
2049 static int
2050 bridge_rtalloc(struct bridge_softc *sc, const uint8_t *dst,
2051 struct bridge_rtnode **brtp)
2052 {
2053 struct bridge_rtnode *brt;
2054 int error;
2055
2056 if (sc->sc_brtcnt >= sc->sc_brtmax)
2057 return ENOSPC;
2058
2059 /*
2060 * Allocate a new bridge forwarding node, and
2061 * initialize the expiration time and Ethernet
2062 * address.
2063 */
2064 brt = pool_get(&bridge_rtnode_pool, PR_NOWAIT);
2065 if (brt == NULL)
2066 return ENOMEM;
2067
2068 memset(brt, 0, sizeof(*brt));
2069 brt->brt_expire = time_uptime + sc->sc_brttimeout;
2070 brt->brt_flags = IFBAF_DYNAMIC;
2071 memcpy(brt->brt_addr, dst, ETHER_ADDR_LEN);
2072
2073 BRIDGE_RT_INTR_LOCK(sc);
2074 error = bridge_rtnode_insert(sc, brt);
2075 BRIDGE_RT_INTR_UNLOCK(sc);
2076
2077 if (error != 0) {
2078 pool_put(&bridge_rtnode_pool, brt);
2079 return error;
2080 }
2081
2082 *brtp = brt;
2083 return 0;
2084 }
2085
2086 /*
2087 * bridge_rtupdate:
2088 *
2089 * Add a bridge routing entry.
2090 */
2091 static int
2092 bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst,
2093 struct ifnet *dst_if, int setflags, uint8_t flags)
2094 {
2095 struct bridge_rtnode *brt;
2096 int s;
2097
2098 again:
2099 /*
2100 * A route for this destination might already exist. If so,
2101 * update it, otherwise create a new one.
2102 */
2103 BRIDGE_RT_RENTER(s);
2104 brt = bridge_rtnode_lookup(sc, dst);
2105
2106 if (brt != NULL) {
2107 brt->brt_ifp = dst_if;
2108 if (setflags) {
2109 brt->brt_flags = flags;
2110 if (flags & IFBAF_STATIC)
2111 brt->brt_expire = 0;
2112 else
2113 brt->brt_expire = time_uptime + sc->sc_brttimeout;
2114 } else {
2115 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
2116 brt->brt_expire = time_uptime + sc->sc_brttimeout;
2117 }
2118 }
2119 BRIDGE_RT_REXIT(s);
2120
2121 if (brt == NULL) {
2122 int r;
2123
2124 r = bridge_rtalloc(sc, dst, &brt);
2125 if (r != 0)
2126 return r;
2127 goto again;
2128 }
2129
2130 return 0;
2131 }
2132
2133 /*
2134 * bridge_rtlookup:
2135 *
2136 * Lookup the destination interface for an address.
2137 */
2138 static struct ifnet *
2139 bridge_rtlookup(struct bridge_softc *sc, const uint8_t *addr)
2140 {
2141 struct bridge_rtnode *brt;
2142 struct ifnet *ifs = NULL;
2143 int s;
2144
2145 BRIDGE_RT_RENTER(s);
2146 brt = bridge_rtnode_lookup(sc, addr);
2147 if (brt != NULL)
2148 ifs = brt->brt_ifp;
2149 BRIDGE_RT_REXIT(s);
2150
2151 return ifs;
2152 }
2153
2154 typedef bool (*bridge_iterate_cb_t)
2155 (struct bridge_softc *, struct bridge_rtnode *, bool *, void *);
2156
2157 /*
2158 * bridge_rtlist_iterate_remove:
2159 *
2160 * It iterates on sc->sc_rtlist and removes rtnodes of it which func
2161 * callback judges to remove. Removals of rtnodes are done in a manner
2162 * of pserialize. To this end, all kmem_* operations are placed out of
2163 * mutexes.
2164 */
2165 static void
2166 bridge_rtlist_iterate_remove(struct bridge_softc *sc, bridge_iterate_cb_t func, void *arg)
2167 {
2168 struct bridge_rtnode *brt, *nbrt;
2169 struct bridge_rtnode **brt_list;
2170 int i, count;
2171
2172 retry:
2173 count = sc->sc_brtcnt;
2174 if (count == 0)
2175 return;
2176 brt_list = kmem_alloc(sizeof(struct bridge_rtnode *) * count, KM_SLEEP);
2177
2178 BRIDGE_RT_LOCK(sc);
2179 BRIDGE_RT_INTR_LOCK(sc);
2180 if (__predict_false(sc->sc_brtcnt > count)) {
2181 /* The rtnodes increased, we need more memory */
2182 BRIDGE_RT_INTR_UNLOCK(sc);
2183 BRIDGE_RT_UNLOCK(sc);
2184 kmem_free(brt_list, sizeof(*brt_list) * count);
2185 goto retry;
2186 }
2187
2188 i = 0;
2189 LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
2190 bool need_break = false;
2191 if (func(sc, brt, &need_break, arg)) {
2192 bridge_rtnode_remove(sc, brt);
2193 brt_list[i++] = brt;
2194 }
2195 if (need_break)
2196 break;
2197 }
2198 BRIDGE_RT_INTR_UNLOCK(sc);
2199
2200 if (i > 0)
2201 BRIDGE_RT_PSZ_PERFORM(sc);
2202 BRIDGE_RT_UNLOCK(sc);
2203
2204 while (--i >= 0)
2205 bridge_rtnode_destroy(brt_list[i]);
2206
2207 kmem_free(brt_list, sizeof(*brt_list) * count);
2208 }
2209
2210 static bool
2211 bridge_rttrim0_cb(struct bridge_softc *sc, struct bridge_rtnode *brt,
2212 bool *need_break, void *arg)
2213 {
2214 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
2215 /* Take into account of the subsequent removal */
2216 if ((sc->sc_brtcnt - 1) <= sc->sc_brtmax)
2217 *need_break = true;
2218 return true;
2219 } else
2220 return false;
2221 }
2222
2223 static void
2224 bridge_rttrim0(struct bridge_softc *sc)
2225 {
2226 bridge_rtlist_iterate_remove(sc, bridge_rttrim0_cb, NULL);
2227 }
2228
2229 /*
2230 * bridge_rttrim:
2231 *
2232 * Trim the routine table so that we have a number
2233 * of routing entries less than or equal to the
2234 * maximum number.
2235 */
2236 static void
2237 bridge_rttrim(struct bridge_softc *sc)
2238 {
2239
2240 /* Make sure we actually need to do this. */
2241 if (sc->sc_brtcnt <= sc->sc_brtmax)
2242 return;
2243
2244 /* Force an aging cycle; this might trim enough addresses. */
2245 bridge_rtage(sc);
2246 if (sc->sc_brtcnt <= sc->sc_brtmax)
2247 return;
2248
2249 bridge_rttrim0(sc);
2250
2251 return;
2252 }
2253
2254 /*
2255 * bridge_timer:
2256 *
2257 * Aging timer for the bridge.
2258 */
2259 static void
2260 bridge_timer(void *arg)
2261 {
2262 struct bridge_softc *sc = arg;
2263
2264 workqueue_enqueue(sc->sc_rtage_wq, &bridge_rtage_wk, NULL);
2265 }
2266
2267 static void
2268 bridge_rtage_work(struct work *wk, void *arg)
2269 {
2270 struct bridge_softc *sc = arg;
2271
2272 KASSERT(wk == &bridge_rtage_wk);
2273
2274 bridge_rtage(sc);
2275
2276 if (sc->sc_if.if_flags & IFF_RUNNING)
2277 callout_reset(&sc->sc_brcallout,
2278 bridge_rtable_prune_period * hz, bridge_timer, sc);
2279 }
2280
2281 static bool
2282 bridge_rtage_cb(struct bridge_softc *sc, struct bridge_rtnode *brt,
2283 bool *need_break, void *arg)
2284 {
2285 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
2286 time_uptime >= brt->brt_expire)
2287 return true;
2288 else
2289 return false;
2290 }
2291
2292 /*
2293 * bridge_rtage:
2294 *
2295 * Perform an aging cycle.
2296 */
2297 static void
2298 bridge_rtage(struct bridge_softc *sc)
2299 {
2300 bridge_rtlist_iterate_remove(sc, bridge_rtage_cb, NULL);
2301 }
2302
2303
2304 static bool
2305 bridge_rtflush_cb(struct bridge_softc *sc, struct bridge_rtnode *brt,
2306 bool *need_break, void *arg)
2307 {
2308 int full = *(int*)arg;
2309
2310 if (full || (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
2311 return true;
2312 else
2313 return false;
2314 }
2315
2316 /*
2317 * bridge_rtflush:
2318 *
2319 * Remove all dynamic addresses from the bridge.
2320 */
2321 static void
2322 bridge_rtflush(struct bridge_softc *sc, int full)
2323 {
2324 bridge_rtlist_iterate_remove(sc, bridge_rtflush_cb, &full);
2325 }
2326
2327 /*
2328 * bridge_rtdaddr:
2329 *
2330 * Remove an address from the table.
2331 */
2332 static int
2333 bridge_rtdaddr(struct bridge_softc *sc, const uint8_t *addr)
2334 {
2335 struct bridge_rtnode *brt;
2336
2337 BRIDGE_RT_LOCK(sc);
2338 BRIDGE_RT_INTR_LOCK(sc);
2339 if ((brt = bridge_rtnode_lookup(sc, addr)) == NULL) {
2340 BRIDGE_RT_INTR_UNLOCK(sc);
2341 BRIDGE_RT_UNLOCK(sc);
2342 return ENOENT;
2343 }
2344 bridge_rtnode_remove(sc, brt);
2345 BRIDGE_RT_INTR_UNLOCK(sc);
2346 BRIDGE_RT_PSZ_PERFORM(sc);
2347 BRIDGE_RT_UNLOCK(sc);
2348
2349 bridge_rtnode_destroy(brt);
2350
2351 return 0;
2352 }
2353
2354 /*
2355 * bridge_rtdelete:
2356 *
2357 * Delete routes to a speicifc member interface.
2358 */
2359 static void
2360 bridge_rtdelete(struct bridge_softc *sc, struct ifnet *ifp)
2361 {
2362 struct bridge_rtnode *brt, *nbrt;
2363
2364 BRIDGE_RT_LOCK(sc);
2365 BRIDGE_RT_INTR_LOCK(sc);
2366 LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
2367 if (brt->brt_ifp == ifp)
2368 break;
2369 }
2370 if (brt == NULL) {
2371 BRIDGE_RT_INTR_UNLOCK(sc);
2372 BRIDGE_RT_UNLOCK(sc);
2373 return;
2374 }
2375 bridge_rtnode_remove(sc, brt);
2376 BRIDGE_RT_INTR_UNLOCK(sc);
2377 BRIDGE_RT_PSZ_PERFORM(sc);
2378 BRIDGE_RT_UNLOCK(sc);
2379
2380 bridge_rtnode_destroy(brt);
2381 }
2382
2383 /*
2384 * bridge_rtable_init:
2385 *
2386 * Initialize the route table for this bridge.
2387 */
2388 static void
2389 bridge_rtable_init(struct bridge_softc *sc)
2390 {
2391 int i;
2392
2393 sc->sc_rthash = kmem_alloc(sizeof(*sc->sc_rthash) * BRIDGE_RTHASH_SIZE,
2394 KM_SLEEP);
2395
2396 for (i = 0; i < BRIDGE_RTHASH_SIZE; i++)
2397 LIST_INIT(&sc->sc_rthash[i]);
2398
2399 sc->sc_rthash_key = cprng_fast32();
2400
2401 LIST_INIT(&sc->sc_rtlist);
2402
2403 sc->sc_rtlist_intr_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2404 #ifdef BRIDGE_MPSAFE
2405 sc->sc_rtlist_psz = pserialize_create();
2406 sc->sc_rtlist_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET);
2407 #else
2408 sc->sc_rtlist_psz = NULL;
2409 sc->sc_rtlist_lock = NULL;
2410 #endif
2411 }
2412
2413 /*
2414 * bridge_rtable_fini:
2415 *
2416 * Deconstruct the route table for this bridge.
2417 */
2418 static void
2419 bridge_rtable_fini(struct bridge_softc *sc)
2420 {
2421
2422 kmem_free(sc->sc_rthash, sizeof(*sc->sc_rthash) * BRIDGE_RTHASH_SIZE);
2423 if (sc->sc_rtlist_intr_lock)
2424 mutex_obj_free(sc->sc_rtlist_intr_lock);
2425 if (sc->sc_rtlist_lock)
2426 mutex_obj_free(sc->sc_rtlist_lock);
2427 if (sc->sc_rtlist_psz)
2428 pserialize_destroy(sc->sc_rtlist_psz);
2429 }
2430
2431 /*
2432 * The following hash function is adapted from "Hash Functions" by Bob Jenkins
2433 * ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
2434 */
2435 #define mix(a, b, c) \
2436 do { \
2437 a -= b; a -= c; a ^= (c >> 13); \
2438 b -= c; b -= a; b ^= (a << 8); \
2439 c -= a; c -= b; c ^= (b >> 13); \
2440 a -= b; a -= c; a ^= (c >> 12); \
2441 b -= c; b -= a; b ^= (a << 16); \
2442 c -= a; c -= b; c ^= (b >> 5); \
2443 a -= b; a -= c; a ^= (c >> 3); \
2444 b -= c; b -= a; b ^= (a << 10); \
2445 c -= a; c -= b; c ^= (b >> 15); \
2446 } while (/*CONSTCOND*/0)
2447
2448 static inline uint32_t
2449 bridge_rthash(struct bridge_softc *sc, const uint8_t *addr)
2450 {
2451 uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = sc->sc_rthash_key;
2452
2453 b += addr[5] << 8;
2454 b += addr[4];
2455 a += addr[3] << 24;
2456 a += addr[2] << 16;
2457 a += addr[1] << 8;
2458 a += addr[0];
2459
2460 mix(a, b, c);
2461
2462 return (c & BRIDGE_RTHASH_MASK);
2463 }
2464
2465 #undef mix
2466
2467 /*
2468 * bridge_rtnode_lookup:
2469 *
2470 * Look up a bridge route node for the specified destination.
2471 */
2472 static struct bridge_rtnode *
2473 bridge_rtnode_lookup(struct bridge_softc *sc, const uint8_t *addr)
2474 {
2475 struct bridge_rtnode *brt;
2476 uint32_t hash;
2477 int dir;
2478
2479 hash = bridge_rthash(sc, addr);
2480 LIST_FOREACH(brt, &sc->sc_rthash[hash], brt_hash) {
2481 dir = memcmp(addr, brt->brt_addr, ETHER_ADDR_LEN);
2482 if (dir == 0)
2483 return (brt);
2484 if (dir > 0)
2485 return (NULL);
2486 }
2487
2488 return (NULL);
2489 }
2490
2491 /*
2492 * bridge_rtnode_insert:
2493 *
2494 * Insert the specified bridge node into the route table. We
2495 * assume the entry is not already in the table.
2496 */
2497 static int
2498 bridge_rtnode_insert(struct bridge_softc *sc, struct bridge_rtnode *brt)
2499 {
2500 struct bridge_rtnode *lbrt;
2501 uint32_t hash;
2502 int dir;
2503
2504 KASSERT(BRIDGE_RT_INTR_LOCKED(sc));
2505
2506 hash = bridge_rthash(sc, brt->brt_addr);
2507
2508 lbrt = LIST_FIRST(&sc->sc_rthash[hash]);
2509 if (lbrt == NULL) {
2510 LIST_INSERT_HEAD(&sc->sc_rthash[hash], brt, brt_hash);
2511 goto out;
2512 }
2513
2514 do {
2515 dir = memcmp(brt->brt_addr, lbrt->brt_addr, ETHER_ADDR_LEN);
2516 if (dir == 0)
2517 return (EEXIST);
2518 if (dir > 0) {
2519 LIST_INSERT_BEFORE(lbrt, brt, brt_hash);
2520 goto out;
2521 }
2522 if (LIST_NEXT(lbrt, brt_hash) == NULL) {
2523 LIST_INSERT_AFTER(lbrt, brt, brt_hash);
2524 goto out;
2525 }
2526 lbrt = LIST_NEXT(lbrt, brt_hash);
2527 } while (lbrt != NULL);
2528
2529 #ifdef DIAGNOSTIC
2530 panic("bridge_rtnode_insert: impossible");
2531 #endif
2532
2533 out:
2534 LIST_INSERT_HEAD(&sc->sc_rtlist, brt, brt_list);
2535 sc->sc_brtcnt++;
2536
2537 return (0);
2538 }
2539
2540 /*
2541 * bridge_rtnode_remove:
2542 *
2543 * Remove a bridge rtnode from the rthash and the rtlist of a bridge.
2544 */
2545 static void
2546 bridge_rtnode_remove(struct bridge_softc *sc, struct bridge_rtnode *brt)
2547 {
2548
2549 KASSERT(BRIDGE_RT_INTR_LOCKED(sc));
2550
2551 LIST_REMOVE(brt, brt_hash);
2552 LIST_REMOVE(brt, brt_list);
2553 sc->sc_brtcnt--;
2554 }
2555
2556 /*
2557 * bridge_rtnode_destroy:
2558 *
2559 * Destroy a bridge rtnode.
2560 */
2561 static void
2562 bridge_rtnode_destroy(struct bridge_rtnode *brt)
2563 {
2564
2565 pool_put(&bridge_rtnode_pool, brt);
2566 }
2567
2568 #if defined(BRIDGE_IPF)
2569 extern pfil_head_t *inet_pfil_hook; /* XXX */
2570 extern pfil_head_t *inet6_pfil_hook; /* XXX */
2571
2572 /*
2573 * Send bridge packets through IPF if they are one of the types IPF can deal
2574 * with, or if they are ARP or REVARP. (IPF will pass ARP and REVARP without
2575 * question.)
2576 */
2577 static int
2578 bridge_ipf(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
2579 {
2580 int snap, error;
2581 struct ether_header *eh1, eh2;
2582 struct llc llc1;
2583 uint16_t ether_type;
2584
2585 snap = 0;
2586 error = -1; /* Default error if not error == 0 */
2587 eh1 = mtod(*mp, struct ether_header *);
2588 ether_type = ntohs(eh1->ether_type);
2589
2590 /*
2591 * Check for SNAP/LLC.
2592 */
2593 if (ether_type < ETHERMTU) {
2594 struct llc *llc2 = (struct llc *)(eh1 + 1);
2595
2596 if ((*mp)->m_len >= ETHER_HDR_LEN + 8 &&
2597 llc2->llc_dsap == LLC_SNAP_LSAP &&
2598 llc2->llc_ssap == LLC_SNAP_LSAP &&
2599 llc2->llc_control == LLC_UI) {
2600 ether_type = htons(llc2->llc_un.type_snap.ether_type);
2601 snap = 1;
2602 }
2603 }
2604
2605 /*
2606 * If we're trying to filter bridge traffic, don't look at anything
2607 * other than IP and ARP traffic. If the filter doesn't understand
2608 * IPv6, don't allow IPv6 through the bridge either. This is lame
2609 * since if we really wanted, say, an AppleTalk filter, we are hosed,
2610 * but of course we don't have an AppleTalk filter to begin with.
2611 * (Note that since IPF doesn't understand ARP it will pass *ALL*
2612 * ARP traffic.)
2613 */
2614 switch (ether_type) {
2615 case ETHERTYPE_ARP:
2616 case ETHERTYPE_REVARP:
2617 return 0; /* Automatically pass */
2618 case ETHERTYPE_IP:
2619 # ifdef INET6
2620 case ETHERTYPE_IPV6:
2621 # endif /* INET6 */
2622 break;
2623 default:
2624 goto bad;
2625 }
2626
2627 /* Strip off the Ethernet header and keep a copy. */
2628 m_copydata(*mp, 0, ETHER_HDR_LEN, (void *) &eh2);
2629 m_adj(*mp, ETHER_HDR_LEN);
2630
2631 /* Strip off snap header, if present */
2632 if (snap) {
2633 m_copydata(*mp, 0, sizeof(struct llc), (void *) &llc1);
2634 m_adj(*mp, sizeof(struct llc));
2635 }
2636
2637 /*
2638 * Check basic packet sanity and run IPF through pfil.
2639 */
2640 KASSERT(!cpu_intr_p());
2641 switch (ether_type)
2642 {
2643 case ETHERTYPE_IP :
2644 error = (dir == PFIL_IN) ? bridge_ip_checkbasic(mp) : 0;
2645 if (error == 0)
2646 error = pfil_run_hooks(inet_pfil_hook, mp, ifp, dir);
2647 break;
2648 # ifdef INET6
2649 case ETHERTYPE_IPV6 :
2650 error = (dir == PFIL_IN) ? bridge_ip6_checkbasic(mp) : 0;
2651 if (error == 0)
2652 error = pfil_run_hooks(inet6_pfil_hook, mp, ifp, dir);
2653 break;
2654 # endif
2655 default :
2656 error = 0;
2657 break;
2658 }
2659
2660 if (*mp == NULL)
2661 return error;
2662 if (error != 0)
2663 goto bad;
2664
2665 error = -1;
2666
2667 /*
2668 * Finally, put everything back the way it was and return
2669 */
2670 if (snap) {
2671 M_PREPEND(*mp, sizeof(struct llc), M_DONTWAIT);
2672 if (*mp == NULL)
2673 return error;
2674 bcopy(&llc1, mtod(*mp, void *), sizeof(struct llc));
2675 }
2676
2677 M_PREPEND(*mp, ETHER_HDR_LEN, M_DONTWAIT);
2678 if (*mp == NULL)
2679 return error;
2680 bcopy(&eh2, mtod(*mp, void *), ETHER_HDR_LEN);
2681
2682 return 0;
2683
2684 bad:
2685 m_freem(*mp);
2686 *mp = NULL;
2687 return error;
2688 }
2689
2690 /*
2691 * Perform basic checks on header size since
2692 * IPF assumes ip_input has already processed
2693 * it for it. Cut-and-pasted from ip_input.c.
2694 * Given how simple the IPv6 version is,
2695 * does the IPv4 version really need to be
2696 * this complicated?
2697 *
2698 * XXX Should we update ipstat here, or not?
2699 * XXX Right now we update ipstat but not
2700 * XXX csum_counter.
2701 */
2702 static int
2703 bridge_ip_checkbasic(struct mbuf **mp)
2704 {
2705 struct mbuf *m = *mp;
2706 struct ip *ip;
2707 int len, hlen;
2708
2709 if (*mp == NULL)
2710 return -1;
2711
2712 if (IP_HDR_ALIGNED_P(mtod(m, void *)) == 0) {
2713 if ((m = m_copyup(m, sizeof(struct ip),
2714 (max_linkhdr + 3) & ~3)) == NULL) {
2715 /* XXXJRT new stat, please */
2716 ip_statinc(IP_STAT_TOOSMALL);
2717 goto bad;
2718 }
2719 } else if (__predict_false(m->m_len < sizeof (struct ip))) {
2720 if ((m = m_pullup(m, sizeof (struct ip))) == NULL) {
2721 ip_statinc(IP_STAT_TOOSMALL);
2722 goto bad;
2723 }
2724 }
2725 ip = mtod(m, struct ip *);
2726 if (ip == NULL) goto bad;
2727
2728 if (ip->ip_v != IPVERSION) {
2729 ip_statinc(IP_STAT_BADVERS);
2730 goto bad;
2731 }
2732 hlen = ip->ip_hl << 2;
2733 if (hlen < sizeof(struct ip)) { /* minimum header length */
2734 ip_statinc(IP_STAT_BADHLEN);
2735 goto bad;
2736 }
2737 if (hlen > m->m_len) {
2738 if ((m = m_pullup(m, hlen)) == 0) {
2739 ip_statinc(IP_STAT_BADHLEN);
2740 goto bad;
2741 }
2742 ip = mtod(m, struct ip *);
2743 if (ip == NULL) goto bad;
2744 }
2745
2746 switch (m->m_pkthdr.csum_flags &
2747 ((m->m_pkthdr.rcvif->if_csum_flags_rx & M_CSUM_IPv4) |
2748 M_CSUM_IPv4_BAD)) {
2749 case M_CSUM_IPv4|M_CSUM_IPv4_BAD:
2750 /* INET_CSUM_COUNTER_INCR(&ip_hwcsum_bad); */
2751 goto bad;
2752
2753 case M_CSUM_IPv4:
2754 /* Checksum was okay. */
2755 /* INET_CSUM_COUNTER_INCR(&ip_hwcsum_ok); */
2756 break;
2757
2758 default:
2759 /* Must compute it ourselves. */
2760 /* INET_CSUM_COUNTER_INCR(&ip_swcsum); */
2761 if (in_cksum(m, hlen) != 0)
2762 goto bad;
2763 break;
2764 }
2765
2766 /* Retrieve the packet length. */
2767 len = ntohs(ip->ip_len);
2768
2769 /*
2770 * Check for additional length bogosity
2771 */
2772 if (len < hlen) {
2773 ip_statinc(IP_STAT_BADLEN);
2774 goto bad;
2775 }
2776
2777 /*
2778 * Check that the amount of data in the buffers
2779 * is as at least much as the IP header would have us expect.
2780 * Drop packet if shorter than we expect.
2781 */
2782 if (m->m_pkthdr.len < len) {
2783 ip_statinc(IP_STAT_TOOSHORT);
2784 goto bad;
2785 }
2786
2787 /* Checks out, proceed */
2788 *mp = m;
2789 return 0;
2790
2791 bad:
2792 *mp = m;
2793 return -1;
2794 }
2795
2796 # ifdef INET6
2797 /*
2798 * Same as above, but for IPv6.
2799 * Cut-and-pasted from ip6_input.c.
2800 * XXX Should we update ip6stat, or not?
2801 */
2802 static int
2803 bridge_ip6_checkbasic(struct mbuf **mp)
2804 {
2805 struct mbuf *m = *mp;
2806 struct ip6_hdr *ip6;
2807
2808 /*
2809 * If the IPv6 header is not aligned, slurp it up into a new
2810 * mbuf with space for link headers, in the event we forward
2811 * it. Otherwise, if it is aligned, make sure the entire base
2812 * IPv6 header is in the first mbuf of the chain.
2813 */
2814 if (IP6_HDR_ALIGNED_P(mtod(m, void *)) == 0) {
2815 struct ifnet *inifp = m->m_pkthdr.rcvif;
2816 if ((m = m_copyup(m, sizeof(struct ip6_hdr),
2817 (max_linkhdr + 3) & ~3)) == NULL) {
2818 /* XXXJRT new stat, please */
2819 ip6_statinc(IP6_STAT_TOOSMALL);
2820 in6_ifstat_inc(inifp, ifs6_in_hdrerr);
2821 goto bad;
2822 }
2823 } else if (__predict_false(m->m_len < sizeof(struct ip6_hdr))) {
2824 struct ifnet *inifp = m->m_pkthdr.rcvif;
2825 if ((m = m_pullup(m, sizeof(struct ip6_hdr))) == NULL) {
2826 ip6_statinc(IP6_STAT_TOOSMALL);
2827 in6_ifstat_inc(inifp, ifs6_in_hdrerr);
2828 goto bad;
2829 }
2830 }
2831
2832 ip6 = mtod(m, struct ip6_hdr *);
2833
2834 if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
2835 ip6_statinc(IP6_STAT_BADVERS);
2836 in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_hdrerr);
2837 goto bad;
2838 }
2839
2840 /* Checks out, proceed */
2841 *mp = m;
2842 return 0;
2843
2844 bad:
2845 *mp = m;
2846 return -1;
2847 }
2848 # endif /* INET6 */
2849 #endif /* BRIDGE_IPF */
2850