Home | History | Annotate | Line # | Download | only in net
bpf_stub.c revision 1.6.32.1
      1 /*	$NetBSD: bpf_stub.c,v 1.6.32.1 2017/04/21 16:54:05 bouyer Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2010 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  *
     16  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     17  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     18  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     19  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     20  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     26  * POSSIBILITY OF SUCH DAMAGE.
     27  */
     28 
     29 #include <sys/cdefs.h>
     30 __KERNEL_RCSID(0, "$NetBSD: bpf_stub.c,v 1.6.32.1 2017/04/21 16:54:05 bouyer Exp $");
     31 
     32 #include <sys/param.h>
     33 #include <sys/kmem.h>
     34 #include <sys/mbuf.h>
     35 
     36 #include <net/bpf.h>
     37 
     38 struct laglist {
     39 	struct ifnet *lag_ifp;
     40 	u_int lag_dlt;
     41 	u_int lag_hlen;
     42 	struct bpf_if **lag_drvp;
     43 
     44 	TAILQ_ENTRY(laglist) lag_entries;
     45 };
     46 
     47 static TAILQ_HEAD(, laglist) lagdrvs = TAILQ_HEAD_INITIALIZER(lagdrvs);
     48 
     49 static void bpf_stub_attach(struct ifnet *, u_int, u_int, struct bpf_if **);
     50 static void bpf_stub_detach(struct ifnet *);
     51 
     52 static void bpf_stub_null(void);
     53 static void bpf_stub_warn(void);
     54 
     55 static kmutex_t handovermtx;
     56 static kcondvar_t handovercv;
     57 static bool handover;
     58 
     59 struct bpf_ops bpf_ops_stub = {
     60 	.bpf_attach =		bpf_stub_attach,
     61 	.bpf_detach =		bpf_stub_detach,
     62 	.bpf_change_type =	(void *)bpf_stub_null,
     63 
     64 	.bpf_tap = 		(void *)bpf_stub_warn,
     65 	.bpf_mtap = 		(void *)bpf_stub_warn,
     66 	.bpf_mtap2 = 		(void *)bpf_stub_warn,
     67 	.bpf_mtap_af = 		(void *)bpf_stub_warn,
     68 	.bpf_mtap_sl_in = 	(void *)bpf_stub_warn,
     69 	.bpf_mtap_sl_out =	(void *)bpf_stub_warn,
     70 
     71 	.bpf_mtap_softint_init =	(void *)bpf_stub_null,
     72 	.bpf_mtap_softint =		(void *)bpf_stub_warn,
     73 };
     74 struct bpf_ops *bpf_ops;
     75 
     76 static void
     77 bpf_stub_attach(struct ifnet *ifp, u_int dlt, u_int hlen, struct bpf_if **drvp)
     78 {
     79 	struct laglist *lag;
     80 	bool storeattach = true;
     81 
     82 	lag = kmem_alloc(sizeof(*lag), KM_SLEEP);
     83 	lag->lag_ifp = ifp;
     84 	lag->lag_dlt = dlt;
     85 	lag->lag_hlen = hlen;
     86 	lag->lag_drvp = drvp;
     87 
     88 	mutex_enter(&handovermtx);
     89 	/*
     90 	 * If handover is in progress, wait for it to finish and complete
     91 	 * attach after that.  Otherwise record ourselves.
     92 	 */
     93 	while (handover) {
     94 		storeattach = false;
     95 		cv_wait(&handovercv, &handovermtx);
     96 	}
     97 
     98 	if (storeattach == false) {
     99 		mutex_exit(&handovermtx);
    100 		kmem_free(lag, sizeof(*lag));
    101 		KASSERT(bpf_ops != &bpf_ops_stub); /* revisit when unloadable */
    102 		bpf_ops->bpf_attach(ifp, dlt, hlen, drvp);
    103 	} else {
    104 		*drvp = NULL;
    105 		TAILQ_INSERT_TAIL(&lagdrvs, lag, lag_entries);
    106 		mutex_exit(&handovermtx);
    107 	}
    108 }
    109 
    110 static void
    111 bpf_stub_detach(struct ifnet *ifp)
    112 {
    113 	TAILQ_HEAD(, laglist) rmlist;
    114 	struct laglist *lag, *lag_next;
    115 	bool didhand;
    116 
    117 	TAILQ_INIT(&rmlist);
    118 
    119 	didhand = false;
    120 	mutex_enter(&handovermtx);
    121 	while (handover) {
    122 		didhand = true;
    123 		cv_wait(&handovercv, &handovermtx);
    124 	}
    125 
    126 	if (didhand == false) {
    127 		/* atomically remove all */
    128 		for (lag = TAILQ_FIRST(&lagdrvs); lag; lag = lag_next) {
    129 			lag_next = TAILQ_NEXT(lag, lag_entries);
    130 			if (lag->lag_ifp == ifp) {
    131 				TAILQ_REMOVE(&lagdrvs, lag, lag_entries);
    132 				TAILQ_INSERT_HEAD(&rmlist, lag, lag_entries);
    133 			}
    134 		}
    135 		mutex_exit(&handovermtx);
    136 		while ((lag = TAILQ_FIRST(&rmlist)) != NULL) {
    137 			TAILQ_REMOVE(&rmlist, lag, lag_entries);
    138 			kmem_free(lag, sizeof(*lag));
    139 		}
    140 	} else {
    141 		mutex_exit(&handovermtx);
    142 		KASSERT(bpf_ops != &bpf_ops_stub); /* revisit when unloadable */
    143 		bpf_ops->bpf_detach(ifp);
    144 	}
    145 }
    146 
    147 static void
    148 bpf_stub_null(void)
    149 {
    150 
    151 }
    152 
    153 static void
    154 bpf_stub_warn(void)
    155 {
    156 
    157 #ifdef DEBUG
    158 	panic("bpf method called without attached bpf_if");
    159 #endif
    160 #ifdef DIAGNOSTIC
    161 	printf("bpf method called without attached bpf_if\n");
    162 #endif
    163 }
    164 
    165 void
    166 bpf_setops(void)
    167 {
    168 
    169 	mutex_init(&handovermtx, MUTEX_DEFAULT, IPL_NONE);
    170 	cv_init(&handovercv, "bpfops");
    171 	bpf_ops = &bpf_ops_stub;
    172 }
    173 
    174 /*
    175  * Party's over, prepare for handover.
    176  * It needs to happen *before* bpf_ops is set to make it atomic
    177  * to callers (see also stub implementations, which wait if
    178  * called during handover).  The likelyhood of seeing a full
    179  * attach-detach *during* handover comes close to astronomical,
    180  * but handle it anyway since it's relatively easy.
    181  */
    182 void
    183 bpf_ops_handover_enter(struct bpf_ops *newops)
    184 {
    185 	struct laglist *lag;
    186 
    187 	mutex_enter(&handovermtx);
    188 	handover = true;
    189 
    190 	while ((lag = TAILQ_FIRST(&lagdrvs)) != NULL) {
    191 		TAILQ_REMOVE(&lagdrvs, lag, lag_entries);
    192 		mutex_exit(&handovermtx);
    193 		newops->bpf_attach(lag->lag_ifp, lag->lag_dlt,
    194 		    lag->lag_hlen, lag->lag_drvp);
    195 		kmem_free(lag, sizeof(*lag));
    196 		mutex_enter(&handovermtx);
    197 	}
    198 	mutex_exit(&handovermtx);
    199 }
    200 
    201 /* hangover done */
    202 void
    203 bpf_ops_handover_exit(void)
    204 {
    205 
    206 	mutex_enter(&handovermtx);
    207 	handover = false;
    208 	cv_broadcast(&handovercv);
    209 	mutex_exit(&handovermtx);
    210 }
    211