Home | History | Annotate | Line # | Download | only in net
bpf_stub.c revision 1.6.28.1
      1  1.6.28.1  pgoyette /*	$NetBSD: bpf_stub.c,v 1.6.28.1 2017/03/20 06:57:49 pgoyette Exp $	*/
      2       1.2     pooka 
      3       1.2     pooka /*
      4       1.2     pooka  * Copyright (c) 2010 The NetBSD Foundation, Inc.
      5       1.2     pooka  * All rights reserved.
      6       1.2     pooka  *
      7       1.2     pooka  * Redistribution and use in source and binary forms, with or without
      8       1.2     pooka  * modification, are permitted provided that the following conditions
      9       1.2     pooka  * are met:
     10       1.2     pooka  * 1. Redistributions of source code must retain the above copyright
     11       1.2     pooka  *    notice, this list of conditions and the following disclaimer.
     12       1.2     pooka  * 2. Redistributions in binary form must reproduce the above copyright
     13       1.2     pooka  *    notice, this list of conditions and the following disclaimer in the
     14       1.2     pooka  *    documentation and/or other materials provided with the distribution.
     15       1.2     pooka  *
     16       1.2     pooka  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     17       1.2     pooka  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     18       1.2     pooka  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     19       1.2     pooka  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     20       1.2     pooka  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     21       1.2     pooka  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     22       1.2     pooka  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     23       1.2     pooka  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     24       1.2     pooka  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     25       1.2     pooka  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     26       1.2     pooka  * POSSIBILITY OF SUCH DAMAGE.
     27       1.2     pooka  */
     28       1.2     pooka 
     29       1.1     pooka #include <sys/cdefs.h>
     30  1.6.28.1  pgoyette __KERNEL_RCSID(0, "$NetBSD: bpf_stub.c,v 1.6.28.1 2017/03/20 06:57:49 pgoyette Exp $");
     31       1.1     pooka 
     32       1.1     pooka #include <sys/param.h>
     33       1.4     pooka #include <sys/kmem.h>
     34       1.1     pooka #include <sys/mbuf.h>
     35       1.1     pooka 
     36       1.1     pooka #include <net/bpf.h>
     37       1.1     pooka 
     38       1.4     pooka struct laglist {
     39       1.4     pooka 	struct ifnet *lag_ifp;
     40       1.4     pooka 	u_int lag_dlt;
     41       1.4     pooka 	u_int lag_hlen;
     42       1.4     pooka 	struct bpf_if **lag_drvp;
     43       1.4     pooka 
     44       1.4     pooka 	TAILQ_ENTRY(laglist) lag_entries;
     45       1.4     pooka };
     46       1.4     pooka 
     47       1.4     pooka static TAILQ_HEAD(, laglist) lagdrvs = TAILQ_HEAD_INITIALIZER(lagdrvs);
     48       1.4     pooka 
     49       1.4     pooka static void bpf_stub_attach(struct ifnet *, u_int, u_int, struct bpf_if **);
     50       1.4     pooka static void bpf_stub_detach(struct ifnet *);
     51       1.4     pooka 
     52       1.4     pooka static void bpf_stub_null(void);
     53       1.4     pooka static void bpf_stub_warn(void);
     54       1.4     pooka 
     55       1.4     pooka static kmutex_t handovermtx;
     56       1.4     pooka static kcondvar_t handovercv;
     57       1.4     pooka static bool handover;
     58       1.4     pooka 
     59       1.4     pooka struct bpf_ops bpf_ops_stub = {
     60       1.4     pooka 	.bpf_attach =		bpf_stub_attach,
     61       1.4     pooka 	.bpf_detach =		bpf_stub_detach,
     62       1.4     pooka 	.bpf_change_type =	(void *)bpf_stub_null,
     63       1.4     pooka 
     64       1.4     pooka 	.bpf_tap = 		(void *)bpf_stub_warn,
     65       1.4     pooka 	.bpf_mtap = 		(void *)bpf_stub_warn,
     66       1.4     pooka 	.bpf_mtap2 = 		(void *)bpf_stub_warn,
     67       1.4     pooka 	.bpf_mtap_af = 		(void *)bpf_stub_warn,
     68       1.4     pooka 	.bpf_mtap_sl_in = 	(void *)bpf_stub_warn,
     69       1.4     pooka 	.bpf_mtap_sl_out =	(void *)bpf_stub_warn,
     70  1.6.28.1  pgoyette 
     71  1.6.28.1  pgoyette 	.bpf_mtap_softint_init =	(void *)bpf_stub_null,
     72  1.6.28.1  pgoyette 	.bpf_mtap_softint =		(void *)bpf_stub_warn,
     73       1.4     pooka };
     74       1.4     pooka struct bpf_ops *bpf_ops;
     75       1.4     pooka 
     76       1.1     pooka static void
     77       1.4     pooka bpf_stub_attach(struct ifnet *ifp, u_int dlt, u_int hlen, struct bpf_if **drvp)
     78       1.1     pooka {
     79       1.4     pooka 	struct laglist *lag;
     80       1.4     pooka 	bool storeattach = true;
     81       1.4     pooka 
     82       1.4     pooka 	lag = kmem_alloc(sizeof(*lag), KM_SLEEP);
     83       1.4     pooka 	lag->lag_ifp = ifp;
     84       1.4     pooka 	lag->lag_dlt = dlt;
     85       1.4     pooka 	lag->lag_hlen = hlen;
     86       1.4     pooka 	lag->lag_drvp = drvp;
     87       1.4     pooka 
     88       1.4     pooka 	mutex_enter(&handovermtx);
     89       1.4     pooka 	/*
     90       1.4     pooka 	 * If handover is in progress, wait for it to finish and complete
     91       1.4     pooka 	 * attach after that.  Otherwise record ourselves.
     92       1.4     pooka 	 */
     93       1.4     pooka 	while (handover) {
     94       1.4     pooka 		storeattach = false;
     95       1.4     pooka 		cv_wait(&handovercv, &handovermtx);
     96       1.4     pooka 	}
     97       1.4     pooka 
     98       1.4     pooka 	if (storeattach == false) {
     99       1.4     pooka 		mutex_exit(&handovermtx);
    100       1.4     pooka 		kmem_free(lag, sizeof(*lag));
    101       1.4     pooka 		KASSERT(bpf_ops != &bpf_ops_stub); /* revisit when unloadable */
    102       1.4     pooka 		bpf_ops->bpf_attach(ifp, dlt, hlen, drvp);
    103       1.4     pooka 	} else {
    104       1.4     pooka 		*drvp = NULL;
    105       1.4     pooka 		TAILQ_INSERT_TAIL(&lagdrvs, lag, lag_entries);
    106       1.4     pooka 		mutex_exit(&handovermtx);
    107       1.4     pooka 	}
    108       1.4     pooka }
    109       1.1     pooka 
    110       1.4     pooka static void
    111       1.4     pooka bpf_stub_detach(struct ifnet *ifp)
    112       1.4     pooka {
    113       1.4     pooka 	TAILQ_HEAD(, laglist) rmlist;
    114       1.4     pooka 	struct laglist *lag, *lag_next;
    115       1.4     pooka 	bool didhand;
    116       1.4     pooka 
    117       1.4     pooka 	TAILQ_INIT(&rmlist);
    118       1.4     pooka 
    119       1.4     pooka 	didhand = false;
    120       1.4     pooka 	mutex_enter(&handovermtx);
    121       1.4     pooka 	while (handover) {
    122       1.4     pooka 		didhand = true;
    123       1.4     pooka 		cv_wait(&handovercv, &handovermtx);
    124       1.4     pooka 	}
    125       1.4     pooka 
    126       1.4     pooka 	if (didhand == false) {
    127       1.4     pooka 		/* atomically remove all */
    128       1.4     pooka 		for (lag = TAILQ_FIRST(&lagdrvs); lag; lag = lag_next) {
    129       1.4     pooka 			lag_next = TAILQ_NEXT(lag, lag_entries);
    130       1.4     pooka 			if (lag->lag_ifp == ifp) {
    131       1.4     pooka 				TAILQ_REMOVE(&lagdrvs, lag, lag_entries);
    132       1.4     pooka 				TAILQ_INSERT_HEAD(&rmlist, lag, lag_entries);
    133       1.4     pooka 			}
    134       1.4     pooka 		}
    135       1.4     pooka 		mutex_exit(&handovermtx);
    136       1.4     pooka 		while ((lag = TAILQ_FIRST(&rmlist)) != NULL) {
    137       1.4     pooka 			TAILQ_REMOVE(&rmlist, lag, lag_entries);
    138       1.4     pooka 			kmem_free(lag, sizeof(*lag));
    139       1.4     pooka 		}
    140       1.4     pooka 	} else {
    141       1.4     pooka 		mutex_exit(&handovermtx);
    142       1.4     pooka 		KASSERT(bpf_ops != &bpf_ops_stub); /* revisit when unloadable */
    143       1.4     pooka 		bpf_ops->bpf_detach(ifp);
    144       1.4     pooka 	}
    145       1.1     pooka }
    146       1.1     pooka 
    147       1.1     pooka static void
    148       1.1     pooka bpf_stub_null(void)
    149       1.1     pooka {
    150       1.1     pooka 
    151       1.1     pooka }
    152       1.1     pooka 
    153       1.1     pooka static void
    154       1.1     pooka bpf_stub_warn(void)
    155       1.1     pooka {
    156       1.1     pooka 
    157       1.1     pooka #ifdef DEBUG
    158       1.1     pooka 	panic("bpf method called without attached bpf_if");
    159       1.1     pooka #endif
    160       1.1     pooka #ifdef DIAGNOSTIC
    161       1.1     pooka 	printf("bpf method called without attached bpf_if\n");
    162       1.1     pooka #endif
    163       1.1     pooka }
    164       1.1     pooka 
    165       1.4     pooka void
    166       1.6      matt bpf_setops(void)
    167       1.4     pooka {
    168       1.4     pooka 
    169       1.4     pooka 	mutex_init(&handovermtx, MUTEX_DEFAULT, IPL_NONE);
    170       1.4     pooka 	cv_init(&handovercv, "bpfops");
    171       1.4     pooka 	bpf_ops = &bpf_ops_stub;
    172       1.4     pooka }
    173       1.4     pooka 
    174       1.4     pooka /*
    175       1.4     pooka  * Party's over, prepare for handover.
    176       1.4     pooka  * It needs to happen *before* bpf_ops is set to make it atomic
    177       1.4     pooka  * to callers (see also stub implementations, which wait if
    178       1.4     pooka  * called during handover).  The likelyhood of seeing a full
    179       1.4     pooka  * attach-detach *during* handover comes close to astronomical,
    180       1.4     pooka  * but handle it anyway since it's relatively easy.
    181       1.4     pooka  */
    182       1.4     pooka void
    183       1.4     pooka bpf_ops_handover_enter(struct bpf_ops *newops)
    184       1.4     pooka {
    185       1.4     pooka 	struct laglist *lag;
    186       1.1     pooka 
    187       1.4     pooka 	mutex_enter(&handovermtx);
    188       1.4     pooka 	handover = true;
    189       1.1     pooka 
    190       1.4     pooka 	while ((lag = TAILQ_FIRST(&lagdrvs)) != NULL) {
    191       1.4     pooka 		TAILQ_REMOVE(&lagdrvs, lag, lag_entries);
    192       1.4     pooka 		mutex_exit(&handovermtx);
    193       1.4     pooka 		newops->bpf_attach(lag->lag_ifp, lag->lag_dlt,
    194       1.4     pooka 		    lag->lag_hlen, lag->lag_drvp);
    195       1.4     pooka 		kmem_free(lag, sizeof(*lag));
    196       1.4     pooka 		mutex_enter(&handovermtx);
    197       1.4     pooka 	}
    198       1.4     pooka 	mutex_exit(&handovermtx);
    199       1.4     pooka }
    200       1.1     pooka 
    201       1.4     pooka /* hangover done */
    202       1.1     pooka void
    203       1.6      matt bpf_ops_handover_exit(void)
    204       1.1     pooka {
    205       1.1     pooka 
    206       1.4     pooka 	mutex_enter(&handovermtx);
    207       1.4     pooka 	handover = false;
    208       1.4     pooka 	cv_broadcast(&handovercv);
    209       1.4     pooka 	mutex_exit(&handovermtx);
    210       1.1     pooka }
    211