Home | History | Annotate | Line # | Download | only in net
bpf.c revision 1.230
      1 /*	$NetBSD: bpf.c,v 1.230 2019/09/12 07:38:19 maxv Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1990, 1991, 1993
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * This code is derived from the Stanford/CMU enet packet filter,
      8  * (net/enet.c) distributed as part of 4.3BSD, and code contributed
      9  * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
     10  * Berkeley Laboratory.
     11  *
     12  * Redistribution and use in source and binary forms, with or without
     13  * modification, are permitted provided that the following conditions
     14  * are met:
     15  * 1. Redistributions of source code must retain the above copyright
     16  *    notice, this list of conditions and the following disclaimer.
     17  * 2. Redistributions in binary form must reproduce the above copyright
     18  *    notice, this list of conditions and the following disclaimer in the
     19  *    documentation and/or other materials provided with the distribution.
     20  * 3. Neither the name of the University nor the names of its contributors
     21  *    may be used to endorse or promote products derived from this software
     22  *    without specific prior written permission.
     23  *
     24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     34  * SUCH DAMAGE.
     35  *
     36  *	@(#)bpf.c	8.4 (Berkeley) 1/9/95
     37  * static char rcsid[] =
     38  * "Header: bpf.c,v 1.67 96/09/26 22:00:52 leres Exp ";
     39  */
     40 
     41 #include <sys/cdefs.h>
     42 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.230 2019/09/12 07:38:19 maxv Exp $");
     43 
     44 #if defined(_KERNEL_OPT)
     45 #include "opt_bpf.h"
     46 #include "sl.h"
     47 #include "strip.h"
     48 #include "opt_net_mpsafe.h"
     49 #endif
     50 
     51 #include <sys/param.h>
     52 #include <sys/systm.h>
     53 #include <sys/mbuf.h>
     54 #include <sys/buf.h>
     55 #include <sys/time.h>
     56 #include <sys/proc.h>
     57 #include <sys/ioctl.h>
     58 #include <sys/conf.h>
     59 #include <sys/vnode.h>
     60 #include <sys/queue.h>
     61 #include <sys/stat.h>
     62 #include <sys/module.h>
     63 #include <sys/atomic.h>
     64 #include <sys/cpu.h>
     65 
     66 #include <sys/file.h>
     67 #include <sys/filedesc.h>
     68 #include <sys/tty.h>
     69 #include <sys/uio.h>
     70 
     71 #include <sys/protosw.h>
     72 #include <sys/socket.h>
     73 #include <sys/errno.h>
     74 #include <sys/kernel.h>
     75 #include <sys/poll.h>
     76 #include <sys/sysctl.h>
     77 #include <sys/kauth.h>
     78 #include <sys/syslog.h>
     79 #include <sys/percpu.h>
     80 #include <sys/pserialize.h>
     81 #include <sys/lwp.h>
     82 
     83 #include <net/if.h>
     84 #include <net/slip.h>
     85 
     86 #include <net/bpf.h>
     87 #include <net/bpfdesc.h>
     88 #include <net/bpfjit.h>
     89 
     90 #include <net/if_arc.h>
     91 #include <net/if_ether.h>
     92 
     93 #include <netinet/in.h>
     94 #include <netinet/if_inarp.h>
     95 
     96 
     97 #include <compat/sys/sockio.h>
     98 
     99 #ifndef BPF_BUFSIZE
    100 /*
    101  * 4096 is too small for FDDI frames. 8192 is too small for gigabit Ethernet
    102  * jumbos (circa 9k), ATM, or Intel gig/10gig ethernet jumbos (16k).
    103  */
    104 # define BPF_BUFSIZE 32768
    105 #endif
    106 
    107 #define PRINET  26			/* interruptible */
    108 
    109 /*
    110  * The default read buffer size, and limit for BIOCSBLEN, is sysctl'able.
    111  * XXX the default values should be computed dynamically based
    112  * on available memory size and available mbuf clusters.
    113  */
    114 static int bpf_bufsize = BPF_BUFSIZE;
    115 static int bpf_maxbufsize = BPF_DFLTBUFSIZE;	/* XXX set dynamically, see above */
    116 static bool bpf_jit = false;
    117 
    118 struct bpfjit_ops bpfjit_module_ops = {
    119 	.bj_generate_code = NULL,
    120 	.bj_free_code = NULL
    121 };
    122 
    123 /*
    124  * Global BPF statistics returned by net.bpf.stats sysctl.
    125  */
    126 static struct percpu	*bpf_gstats_percpu; /* struct bpf_stat */
    127 
    128 #define BPF_STATINC(id)					\
    129 	{						\
    130 		struct bpf_stat *__stats =		\
    131 		    percpu_getref(bpf_gstats_percpu);	\
    132 		__stats->bs_##id++;			\
    133 		percpu_putref(bpf_gstats_percpu);	\
    134 	}
    135 
    136 /*
    137  * Locking notes:
    138  * - bpf_mtx (adaptive mutex) protects:
    139  *   - Gobal lists: bpf_iflist and bpf_dlist
    140  *   - struct bpf_if
    141  *   - bpf_close
    142  *   - bpf_psz (pserialize)
    143  * - struct bpf_d has two mutexes:
    144  *   - bd_buf_mtx (spin mutex) protects the buffers that can be accessed
    145  *     on packet tapping
    146  *   - bd_mtx (adaptive mutex) protects member variables other than the buffers
    147  * - Locking order: bpf_mtx => bpf_d#bd_mtx => bpf_d#bd_buf_mtx
    148  * - struct bpf_d obtained via fp->f_bpf in bpf_read and bpf_write is
    149  *   never freed because struct bpf_d is only freed in bpf_close and
    150  *   bpf_close never be called while executing bpf_read and bpf_write
    151  * - A filter that is assigned to bpf_d can be replaced with another filter
    152  *   while tapping packets, so it needs to be done atomically
    153  * - struct bpf_d is iterated on bpf_dlist with psz
    154  * - struct bpf_if is iterated on bpf_iflist with psz or psref
    155  */
    156 /*
    157  * Use a mutex to avoid a race condition between gathering the stats/peers
    158  * and opening/closing the device.
    159  */
    160 static kmutex_t bpf_mtx;
    161 
    162 static struct psref_class	*bpf_psref_class __read_mostly;
    163 static pserialize_t		bpf_psz;
    164 
    165 static inline void
    166 bpf_if_acquire(struct bpf_if *bp, struct psref *psref)
    167 {
    168 
    169 	psref_acquire(psref, &bp->bif_psref, bpf_psref_class);
    170 }
    171 
    172 static inline void
    173 bpf_if_release(struct bpf_if *bp, struct psref *psref)
    174 {
    175 
    176 	psref_release(psref, &bp->bif_psref, bpf_psref_class);
    177 }
    178 
    179 /*
    180  *  bpf_iflist is the list of interfaces; each corresponds to an ifnet
    181  *  bpf_dtab holds the descriptors, indexed by minor device #
    182  */
    183 static struct pslist_head bpf_iflist;
    184 static struct pslist_head bpf_dlist;
    185 
    186 /* Macros for bpf_d on bpf_dlist */
    187 #define BPF_DLIST_WRITER_INSERT_HEAD(__d)				\
    188 	PSLIST_WRITER_INSERT_HEAD(&bpf_dlist, (__d), bd_bpf_dlist_entry)
    189 #define BPF_DLIST_READER_FOREACH(__d)					\
    190 	PSLIST_READER_FOREACH((__d), &bpf_dlist, struct bpf_d,		\
    191 	                      bd_bpf_dlist_entry)
    192 #define BPF_DLIST_WRITER_FOREACH(__d)					\
    193 	PSLIST_WRITER_FOREACH((__d), &bpf_dlist, struct bpf_d,		\
    194 	                      bd_bpf_dlist_entry)
    195 #define BPF_DLIST_ENTRY_INIT(__d)					\
    196 	PSLIST_ENTRY_INIT((__d), bd_bpf_dlist_entry)
    197 #define BPF_DLIST_WRITER_REMOVE(__d)					\
    198 	PSLIST_WRITER_REMOVE((__d), bd_bpf_dlist_entry)
    199 #define BPF_DLIST_ENTRY_DESTROY(__d)					\
    200 	PSLIST_ENTRY_DESTROY((__d), bd_bpf_dlist_entry)
    201 
    202 /* Macros for bpf_if on bpf_iflist */
    203 #define BPF_IFLIST_WRITER_INSERT_HEAD(__bp)				\
    204 	PSLIST_WRITER_INSERT_HEAD(&bpf_iflist, (__bp), bif_iflist_entry)
    205 #define BPF_IFLIST_READER_FOREACH(__bp)					\
    206 	PSLIST_READER_FOREACH((__bp), &bpf_iflist, struct bpf_if,	\
    207 	                      bif_iflist_entry)
    208 #define BPF_IFLIST_WRITER_FOREACH(__bp)					\
    209 	PSLIST_WRITER_FOREACH((__bp), &bpf_iflist, struct bpf_if,	\
    210 	                      bif_iflist_entry)
    211 #define BPF_IFLIST_WRITER_REMOVE(__bp)					\
    212 	PSLIST_WRITER_REMOVE((__bp), bif_iflist_entry)
    213 #define BPF_IFLIST_ENTRY_INIT(__bp)					\
    214 	PSLIST_ENTRY_INIT((__bp), bif_iflist_entry)
    215 #define BPF_IFLIST_ENTRY_DESTROY(__bp)					\
    216 	PSLIST_ENTRY_DESTROY((__bp), bif_iflist_entry)
    217 
    218 /* Macros for bpf_d on bpf_if#bif_dlist_pslist */
    219 #define BPFIF_DLIST_READER_FOREACH(__d, __bp)				\
    220 	PSLIST_READER_FOREACH((__d), &(__bp)->bif_dlist_head, struct bpf_d, \
    221 	                      bd_bif_dlist_entry)
    222 #define BPFIF_DLIST_WRITER_INSERT_HEAD(__bp, __d)			\
    223 	PSLIST_WRITER_INSERT_HEAD(&(__bp)->bif_dlist_head, (__d),	\
    224 	                          bd_bif_dlist_entry)
    225 #define BPFIF_DLIST_WRITER_REMOVE(__d)					\
    226 	PSLIST_WRITER_REMOVE((__d), bd_bif_dlist_entry)
    227 #define BPFIF_DLIST_ENTRY_INIT(__d)					\
    228 	PSLIST_ENTRY_INIT((__d), bd_bif_dlist_entry)
    229 #define	BPFIF_DLIST_READER_EMPTY(__bp)					\
    230 	(PSLIST_READER_FIRST(&(__bp)->bif_dlist_head, struct bpf_d,	\
    231 	                     bd_bif_dlist_entry) == NULL)
    232 #define	BPFIF_DLIST_WRITER_EMPTY(__bp)					\
    233 	(PSLIST_WRITER_FIRST(&(__bp)->bif_dlist_head, struct bpf_d,	\
    234 	                     bd_bif_dlist_entry) == NULL)
    235 #define BPFIF_DLIST_ENTRY_DESTROY(__d)					\
    236 	PSLIST_ENTRY_DESTROY((__d), bd_bif_dlist_entry)
    237 
    238 static int	bpf_allocbufs(struct bpf_d *);
    239 static void	bpf_deliver(struct bpf_if *,
    240 		            void *(*cpfn)(void *, const void *, size_t),
    241 		            void *, u_int, u_int, const u_int);
    242 static void	bpf_freed(struct bpf_d *);
    243 static void	bpf_free_filter(struct bpf_filter *);
    244 static void	bpf_ifname(struct ifnet *, struct ifreq *);
    245 static void	*bpf_mcpy(void *, const void *, size_t);
    246 static int	bpf_movein(struct uio *, int, uint64_t,
    247 			        struct mbuf **, struct sockaddr *);
    248 static void	bpf_attachd(struct bpf_d *, struct bpf_if *);
    249 static void	bpf_detachd(struct bpf_d *);
    250 static int	bpf_setif(struct bpf_d *, struct ifreq *);
    251 static int	bpf_setf(struct bpf_d *, struct bpf_program *);
    252 static void	bpf_timed_out(void *);
    253 static inline void
    254 		bpf_wakeup(struct bpf_d *);
    255 static int	bpf_hdrlen(struct bpf_d *);
    256 static void	catchpacket(struct bpf_d *, u_char *, u_int, u_int,
    257     void *(*)(void *, const void *, size_t), struct timespec *);
    258 static void	reset_d(struct bpf_d *);
    259 static int	bpf_getdltlist(struct bpf_d *, struct bpf_dltlist *);
    260 static int	bpf_setdlt(struct bpf_d *, u_int);
    261 
    262 static int	bpf_read(struct file *, off_t *, struct uio *, kauth_cred_t,
    263     int);
    264 static int	bpf_write(struct file *, off_t *, struct uio *, kauth_cred_t,
    265     int);
    266 static int	bpf_ioctl(struct file *, u_long, void *);
    267 static int	bpf_poll(struct file *, int);
    268 static int	bpf_stat(struct file *, struct stat *);
    269 static int	bpf_close(struct file *);
    270 static int	bpf_kqfilter(struct file *, struct knote *);
    271 
    272 static const struct fileops bpf_fileops = {
    273 	.fo_name = "bpf",
    274 	.fo_read = bpf_read,
    275 	.fo_write = bpf_write,
    276 	.fo_ioctl = bpf_ioctl,
    277 	.fo_fcntl = fnullop_fcntl,
    278 	.fo_poll = bpf_poll,
    279 	.fo_stat = bpf_stat,
    280 	.fo_close = bpf_close,
    281 	.fo_kqfilter = bpf_kqfilter,
    282 	.fo_restart = fnullop_restart,
    283 };
    284 
    285 dev_type_open(bpfopen);
    286 
    287 const struct cdevsw bpf_cdevsw = {
    288 	.d_open = bpfopen,
    289 	.d_close = noclose,
    290 	.d_read = noread,
    291 	.d_write = nowrite,
    292 	.d_ioctl = noioctl,
    293 	.d_stop = nostop,
    294 	.d_tty = notty,
    295 	.d_poll = nopoll,
    296 	.d_mmap = nommap,
    297 	.d_kqfilter = nokqfilter,
    298 	.d_discard = nodiscard,
    299 	.d_flag = D_OTHER | D_MPSAFE
    300 };
    301 
    302 bpfjit_func_t
    303 bpf_jit_generate(bpf_ctx_t *bc, void *code, size_t size)
    304 {
    305 
    306 	membar_consumer();
    307 	if (bpfjit_module_ops.bj_generate_code != NULL) {
    308 		return bpfjit_module_ops.bj_generate_code(bc, code, size);
    309 	}
    310 	return NULL;
    311 }
    312 
    313 void
    314 bpf_jit_freecode(bpfjit_func_t jcode)
    315 {
    316 	KASSERT(bpfjit_module_ops.bj_free_code != NULL);
    317 	bpfjit_module_ops.bj_free_code(jcode);
    318 }
    319 
    320 static int
    321 bpf_movein(struct uio *uio, int linktype, uint64_t mtu, struct mbuf **mp,
    322 	   struct sockaddr *sockp)
    323 {
    324 	struct mbuf *m;
    325 	int error;
    326 	size_t len;
    327 	size_t hlen;
    328 	size_t align;
    329 
    330 	/*
    331 	 * Build a sockaddr based on the data link layer type.
    332 	 * We do this at this level because the ethernet header
    333 	 * is copied directly into the data field of the sockaddr.
    334 	 * In the case of SLIP, there is no header and the packet
    335 	 * is forwarded as is.
    336 	 * Also, we are careful to leave room at the front of the mbuf
    337 	 * for the link level header.
    338 	 */
    339 	switch (linktype) {
    340 
    341 	case DLT_SLIP:
    342 		sockp->sa_family = AF_INET;
    343 		hlen = 0;
    344 		align = 0;
    345 		break;
    346 
    347 	case DLT_PPP:
    348 		sockp->sa_family = AF_UNSPEC;
    349 		hlen = 0;
    350 		align = 0;
    351 		break;
    352 
    353 	case DLT_EN10MB:
    354 		sockp->sa_family = AF_UNSPEC;
    355 		/* XXX Would MAXLINKHDR be better? */
    356  		/* 6(dst)+6(src)+2(type) */
    357 		hlen = sizeof(struct ether_header);
    358 		align = 2;
    359 		break;
    360 
    361 	case DLT_ARCNET:
    362 		sockp->sa_family = AF_UNSPEC;
    363 		hlen = ARC_HDRLEN;
    364 		align = 5;
    365 		break;
    366 
    367 	case DLT_FDDI:
    368 		sockp->sa_family = AF_LINK;
    369 		/* XXX 4(FORMAC)+6(dst)+6(src) */
    370 		hlen = 16;
    371 		align = 0;
    372 		break;
    373 
    374 	case DLT_ECONET:
    375 		sockp->sa_family = AF_UNSPEC;
    376 		hlen = 6;
    377 		align = 2;
    378 		break;
    379 
    380 	case DLT_NULL:
    381 		sockp->sa_family = AF_UNSPEC;
    382 		hlen = 0;
    383 		align = 0;
    384 		break;
    385 
    386 	default:
    387 		return (EIO);
    388 	}
    389 
    390 	len = uio->uio_resid;
    391 	/*
    392 	 * If there aren't enough bytes for a link level header or the
    393 	 * packet length exceeds the interface mtu, return an error.
    394 	 */
    395 	if (len - hlen > mtu)
    396 		return (EMSGSIZE);
    397 
    398 	/*
    399 	 * XXX Avoid complicated buffer chaining ---
    400 	 * bail if it won't fit in a single mbuf.
    401 	 * (Take into account possible alignment bytes)
    402 	 */
    403 	if (len + align > MCLBYTES)
    404 		return (EIO);
    405 
    406 	m = m_gethdr(M_WAIT, MT_DATA);
    407 	m_reset_rcvif(m);
    408 	m->m_pkthdr.len = (int)(len - hlen);
    409 	if (len + align > MHLEN) {
    410 		m_clget(m, M_WAIT);
    411 		if ((m->m_flags & M_EXT) == 0) {
    412 			error = ENOBUFS;
    413 			goto bad;
    414 		}
    415 	}
    416 
    417 	/* Insure the data is properly aligned */
    418 	if (align > 0) {
    419 		m->m_data += align;
    420 		m->m_len -= (int)align;
    421 	}
    422 
    423 	error = uiomove(mtod(m, void *), len, uio);
    424 	if (error)
    425 		goto bad;
    426 	if (hlen != 0) {
    427 		memcpy(sockp->sa_data, mtod(m, void *), hlen);
    428 		m->m_data += hlen; /* XXX */
    429 		len -= hlen;
    430 	}
    431 	m->m_len = (int)len;
    432 	*mp = m;
    433 	return (0);
    434 
    435 bad:
    436 	m_freem(m);
    437 	return (error);
    438 }
    439 
    440 /*
    441  * Attach file to the bpf interface, i.e. make d listen on bp.
    442  */
    443 static void
    444 bpf_attachd(struct bpf_d *d, struct bpf_if *bp)
    445 {
    446 
    447 	KASSERT(mutex_owned(&bpf_mtx));
    448 	KASSERT(mutex_owned(d->bd_mtx));
    449 	/*
    450 	 * Point d at bp, and add d to the interface's list of listeners.
    451 	 * Finally, point the driver's bpf cookie at the interface so
    452 	 * it will divert packets to bpf.
    453 	 */
    454 	d->bd_bif = bp;
    455 	BPFIF_DLIST_WRITER_INSERT_HEAD(bp, d);
    456 
    457 	*bp->bif_driverp = bp;
    458 }
    459 
    460 /*
    461  * Detach a file from its interface.
    462  */
    463 static void
    464 bpf_detachd(struct bpf_d *d)
    465 {
    466 	struct bpf_if *bp;
    467 
    468 	KASSERT(mutex_owned(&bpf_mtx));
    469 	KASSERT(mutex_owned(d->bd_mtx));
    470 
    471 	bp = d->bd_bif;
    472 	/*
    473 	 * Check if this descriptor had requested promiscuous mode.
    474 	 * If so, turn it off.
    475 	 */
    476 	if (d->bd_promisc) {
    477 		int error __diagused;
    478 
    479 		d->bd_promisc = 0;
    480 		/*
    481 		 * Take device out of promiscuous mode.  Since we were
    482 		 * able to enter promiscuous mode, we should be able
    483 		 * to turn it off.  But we can get an error if
    484 		 * the interface was configured down, so only panic
    485 		 * if we don't get an unexpected error.
    486 		 */
    487 		KERNEL_LOCK_UNLESS_NET_MPSAFE();
    488   		error = ifpromisc(bp->bif_ifp, 0);
    489 		KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
    490 #ifdef DIAGNOSTIC
    491 		if (error)
    492 			printf("%s: ifpromisc failed: %d", __func__, error);
    493 #endif
    494 	}
    495 
    496 	/* Remove d from the interface's descriptor list. */
    497 	BPFIF_DLIST_WRITER_REMOVE(d);
    498 
    499 	pserialize_perform(bpf_psz);
    500 
    501 	if (BPFIF_DLIST_WRITER_EMPTY(bp)) {
    502 		/*
    503 		 * Let the driver know that there are no more listeners.
    504 		 */
    505 		*d->bd_bif->bif_driverp = NULL;
    506 	}
    507 	d->bd_bif = NULL;
    508 }
    509 
    510 static void
    511 bpf_init(void)
    512 {
    513 
    514 	mutex_init(&bpf_mtx, MUTEX_DEFAULT, IPL_NONE);
    515 	bpf_psz = pserialize_create();
    516 	bpf_psref_class = psref_class_create("bpf", IPL_SOFTNET);
    517 
    518 	PSLIST_INIT(&bpf_iflist);
    519 	PSLIST_INIT(&bpf_dlist);
    520 
    521 	bpf_gstats_percpu = percpu_alloc(sizeof(struct bpf_stat));
    522 
    523 	return;
    524 }
    525 
    526 /*
    527  * bpfilterattach() is called at boot time.  We don't need to do anything
    528  * here, since any initialization will happen as part of module init code.
    529  */
    530 /* ARGSUSED */
    531 void
    532 bpfilterattach(int n)
    533 {
    534 
    535 }
    536 
    537 /*
    538  * Open ethernet device. Clones.
    539  */
    540 /* ARGSUSED */
    541 int
    542 bpfopen(dev_t dev, int flag, int mode, struct lwp *l)
    543 {
    544 	struct bpf_d *d;
    545 	struct file *fp;
    546 	int error, fd;
    547 
    548 	/* falloc() will fill in the descriptor for us. */
    549 	if ((error = fd_allocfile(&fp, &fd)) != 0)
    550 		return error;
    551 
    552 	d = kmem_zalloc(sizeof(*d), KM_SLEEP);
    553 	d->bd_bufsize = bpf_bufsize;
    554 	d->bd_direction = BPF_D_INOUT;
    555 	d->bd_feedback = 0;
    556 	d->bd_pid = l->l_proc->p_pid;
    557 #ifdef _LP64
    558 	if (curproc->p_flag & PK_32)
    559 		d->bd_compat32 = 1;
    560 #endif
    561 	getnanotime(&d->bd_btime);
    562 	d->bd_atime = d->bd_mtime = d->bd_btime;
    563 	callout_init(&d->bd_callout, CALLOUT_MPSAFE);
    564 	selinit(&d->bd_sel);
    565 	d->bd_jitcode = NULL;
    566 	d->bd_filter = NULL;
    567 	BPF_DLIST_ENTRY_INIT(d);
    568 	BPFIF_DLIST_ENTRY_INIT(d);
    569 	d->bd_mtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET);
    570 	d->bd_buf_mtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
    571 	cv_init(&d->bd_cv, "bpf");
    572 
    573 	mutex_enter(&bpf_mtx);
    574 	BPF_DLIST_WRITER_INSERT_HEAD(d);
    575 	mutex_exit(&bpf_mtx);
    576 
    577 	return fd_clone(fp, fd, flag, &bpf_fileops, d);
    578 }
    579 
    580 /*
    581  * Close the descriptor by detaching it from its interface,
    582  * deallocating its buffers, and marking it free.
    583  */
    584 /* ARGSUSED */
    585 static int
    586 bpf_close(struct file *fp)
    587 {
    588 	struct bpf_d *d;
    589 
    590 	mutex_enter(&bpf_mtx);
    591 
    592 	if ((d = fp->f_bpf) == NULL) {
    593 		mutex_exit(&bpf_mtx);
    594 		return 0;
    595 	}
    596 
    597 	/*
    598 	 * Refresh the PID associated with this bpf file.
    599 	 */
    600 	d->bd_pid = curproc->p_pid;
    601 
    602 	mutex_enter(d->bd_mtx);
    603 	if (d->bd_state == BPF_WAITING)
    604 		callout_halt(&d->bd_callout, d->bd_mtx);
    605 	d->bd_state = BPF_IDLE;
    606 	if (d->bd_bif)
    607 		bpf_detachd(d);
    608 	mutex_exit(d->bd_mtx);
    609 
    610 	BPF_DLIST_WRITER_REMOVE(d);
    611 
    612 	pserialize_perform(bpf_psz);
    613 	mutex_exit(&bpf_mtx);
    614 
    615 	BPFIF_DLIST_ENTRY_DESTROY(d);
    616 	BPF_DLIST_ENTRY_DESTROY(d);
    617 	fp->f_bpf = NULL;
    618 	bpf_freed(d);
    619 	callout_destroy(&d->bd_callout);
    620 	seldestroy(&d->bd_sel);
    621 	mutex_obj_free(d->bd_mtx);
    622 	mutex_obj_free(d->bd_buf_mtx);
    623 	cv_destroy(&d->bd_cv);
    624 
    625 	kmem_free(d, sizeof(*d));
    626 
    627 	return (0);
    628 }
    629 
    630 /*
    631  * Rotate the packet buffers in descriptor d.  Move the store buffer
    632  * into the hold slot, and the free buffer into the store slot.
    633  * Zero the length of the new store buffer.
    634  */
    635 #define ROTATE_BUFFERS(d) \
    636 	(d)->bd_hbuf = (d)->bd_sbuf; \
    637 	(d)->bd_hlen = (d)->bd_slen; \
    638 	(d)->bd_sbuf = (d)->bd_fbuf; \
    639 	(d)->bd_slen = 0; \
    640 	(d)->bd_fbuf = NULL;
    641 /*
    642  *  bpfread - read next chunk of packets from buffers
    643  */
    644 static int
    645 bpf_read(struct file *fp, off_t *offp, struct uio *uio,
    646     kauth_cred_t cred, int flags)
    647 {
    648 	struct bpf_d *d = fp->f_bpf;
    649 	int timed_out;
    650 	int error;
    651 
    652 	getnanotime(&d->bd_atime);
    653 	/*
    654 	 * Restrict application to use a buffer the same size as
    655 	 * the kernel buffers.
    656 	 */
    657 	if (uio->uio_resid != d->bd_bufsize)
    658 		return (EINVAL);
    659 
    660 	mutex_enter(d->bd_mtx);
    661 	if (d->bd_state == BPF_WAITING)
    662 		callout_halt(&d->bd_callout, d->bd_mtx);
    663 	timed_out = (d->bd_state == BPF_TIMED_OUT);
    664 	d->bd_state = BPF_IDLE;
    665 	mutex_exit(d->bd_mtx);
    666 	/*
    667 	 * If the hold buffer is empty, then do a timed sleep, which
    668 	 * ends when the timeout expires or when enough packets
    669 	 * have arrived to fill the store buffer.
    670 	 */
    671 	mutex_enter(d->bd_buf_mtx);
    672 	while (d->bd_hbuf == NULL) {
    673 		if (fp->f_flag & FNONBLOCK) {
    674 			if (d->bd_slen == 0) {
    675 				error = EWOULDBLOCK;
    676 				goto out;
    677 			}
    678 			ROTATE_BUFFERS(d);
    679 			break;
    680 		}
    681 
    682 		if ((d->bd_immediate || timed_out) && d->bd_slen != 0) {
    683 			/*
    684 			 * A packet(s) either arrived since the previous
    685 			 * read or arrived while we were asleep.
    686 			 * Rotate the buffers and return what's here.
    687 			 */
    688 			ROTATE_BUFFERS(d);
    689 			break;
    690 		}
    691 
    692 		error = cv_timedwait_sig(&d->bd_cv, d->bd_buf_mtx, d->bd_rtout);
    693 
    694 		if (error == EINTR || error == ERESTART)
    695 			goto out;
    696 
    697 		if (error == EWOULDBLOCK) {
    698 			/*
    699 			 * On a timeout, return what's in the buffer,
    700 			 * which may be nothing.  If there is something
    701 			 * in the store buffer, we can rotate the buffers.
    702 			 */
    703 			if (d->bd_hbuf)
    704 				/*
    705 				 * We filled up the buffer in between
    706 				 * getting the timeout and arriving
    707 				 * here, so we don't need to rotate.
    708 				 */
    709 				break;
    710 
    711 			if (d->bd_slen == 0) {
    712 				error = 0;
    713 				goto out;
    714 			}
    715 			ROTATE_BUFFERS(d);
    716 			break;
    717 		}
    718 		if (error != 0)
    719 			goto out;
    720 	}
    721 	/*
    722 	 * At this point, we know we have something in the hold slot.
    723 	 */
    724 	mutex_exit(d->bd_buf_mtx);
    725 
    726 	/*
    727 	 * Move data from hold buffer into user space.
    728 	 * We know the entire buffer is transferred since
    729 	 * we checked above that the read buffer is bpf_bufsize bytes.
    730 	 */
    731 	error = uiomove(d->bd_hbuf, d->bd_hlen, uio);
    732 
    733 	mutex_enter(d->bd_buf_mtx);
    734 	d->bd_fbuf = d->bd_hbuf;
    735 	d->bd_hbuf = NULL;
    736 	d->bd_hlen = 0;
    737 out:
    738 	mutex_exit(d->bd_buf_mtx);
    739 	return (error);
    740 }
    741 
    742 
    743 /*
    744  * If there are processes sleeping on this descriptor, wake them up.
    745  */
    746 static inline void
    747 bpf_wakeup(struct bpf_d *d)
    748 {
    749 
    750 	mutex_enter(d->bd_buf_mtx);
    751 	cv_broadcast(&d->bd_cv);
    752 	mutex_exit(d->bd_buf_mtx);
    753 
    754 	if (d->bd_async)
    755 		fownsignal(d->bd_pgid, SIGIO, 0, 0, NULL);
    756 	selnotify(&d->bd_sel, 0, 0);
    757 }
    758 
    759 static void
    760 bpf_timed_out(void *arg)
    761 {
    762 	struct bpf_d *d = arg;
    763 
    764 	mutex_enter(d->bd_mtx);
    765 	if (d->bd_state == BPF_WAITING) {
    766 		d->bd_state = BPF_TIMED_OUT;
    767 		if (d->bd_slen != 0)
    768 			bpf_wakeup(d);
    769 	}
    770 	mutex_exit(d->bd_mtx);
    771 }
    772 
    773 
    774 static int
    775 bpf_write(struct file *fp, off_t *offp, struct uio *uio,
    776     kauth_cred_t cred, int flags)
    777 {
    778 	struct bpf_d *d = fp->f_bpf;
    779 	struct bpf_if *bp;
    780 	struct ifnet *ifp;
    781 	struct mbuf *m, *mc;
    782 	int error;
    783 	static struct sockaddr_storage dst;
    784 	struct psref psref;
    785 	int bound;
    786 
    787 	m = NULL;	/* XXX gcc */
    788 
    789 	bound = curlwp_bind();
    790 	mutex_enter(d->bd_mtx);
    791 	bp = d->bd_bif;
    792 	if (bp == NULL) {
    793 		mutex_exit(d->bd_mtx);
    794 		error = ENXIO;
    795 		goto out_bindx;
    796 	}
    797 	bpf_if_acquire(bp, &psref);
    798 	mutex_exit(d->bd_mtx);
    799 
    800 	getnanotime(&d->bd_mtime);
    801 
    802 	ifp = bp->bif_ifp;
    803 	if (if_is_deactivated(ifp)) {
    804 		error = ENXIO;
    805 		goto out;
    806 	}
    807 
    808 	if (uio->uio_resid == 0) {
    809 		error = 0;
    810 		goto out;
    811 	}
    812 
    813 	error = bpf_movein(uio, (int)bp->bif_dlt, ifp->if_mtu, &m,
    814 		(struct sockaddr *) &dst);
    815 	if (error)
    816 		goto out;
    817 
    818 	if (m->m_pkthdr.len > ifp->if_mtu) {
    819 		m_freem(m);
    820 		error = EMSGSIZE;
    821 		goto out;
    822 	}
    823 
    824 	if (d->bd_hdrcmplt)
    825 		dst.ss_family = pseudo_AF_HDRCMPLT;
    826 
    827 	if (d->bd_feedback) {
    828 		mc = m_dup(m, 0, M_COPYALL, M_NOWAIT);
    829 		if (mc != NULL)
    830 			m_set_rcvif(mc, ifp);
    831 		/* Set M_PROMISC for outgoing packets to be discarded. */
    832 		if (1 /*d->bd_direction == BPF_D_INOUT*/)
    833 			m->m_flags |= M_PROMISC;
    834 	} else
    835 		mc = NULL;
    836 
    837 	error = if_output_lock(ifp, ifp, m, (struct sockaddr *) &dst, NULL);
    838 
    839 	if (mc != NULL) {
    840 		if (error == 0) {
    841 			int s = splsoftnet();
    842 			KERNEL_LOCK_UNLESS_IFP_MPSAFE(ifp);
    843 			ifp->_if_input(ifp, mc);
    844 			KERNEL_UNLOCK_UNLESS_IFP_MPSAFE(ifp);
    845 			splx(s);
    846 		} else
    847 			m_freem(mc);
    848 	}
    849 	/*
    850 	 * The driver frees the mbuf.
    851 	 */
    852 out:
    853 	bpf_if_release(bp, &psref);
    854 out_bindx:
    855 	curlwp_bindx(bound);
    856 	return error;
    857 }
    858 
    859 /*
    860  * Reset a descriptor by flushing its packet buffer and clearing the
    861  * receive and drop counts.
    862  */
    863 static void
    864 reset_d(struct bpf_d *d)
    865 {
    866 
    867 	KASSERT(mutex_owned(d->bd_mtx));
    868 
    869 	mutex_enter(d->bd_buf_mtx);
    870 	if (d->bd_hbuf) {
    871 		/* Free the hold buffer. */
    872 		d->bd_fbuf = d->bd_hbuf;
    873 		d->bd_hbuf = NULL;
    874 	}
    875 	d->bd_slen = 0;
    876 	d->bd_hlen = 0;
    877 	d->bd_rcount = 0;
    878 	d->bd_dcount = 0;
    879 	d->bd_ccount = 0;
    880 	mutex_exit(d->bd_buf_mtx);
    881 }
    882 
    883 /*
    884  *  FIONREAD		Check for read packet available.
    885  *  BIOCGBLEN		Get buffer len [for read()].
    886  *  BIOCSETF		Set ethernet read filter.
    887  *  BIOCFLUSH		Flush read packet buffer.
    888  *  BIOCPROMISC		Put interface into promiscuous mode.
    889  *  BIOCGDLT		Get link layer type.
    890  *  BIOCGETIF		Get interface name.
    891  *  BIOCSETIF		Set interface.
    892  *  BIOCSRTIMEOUT	Set read timeout.
    893  *  BIOCGRTIMEOUT	Get read timeout.
    894  *  BIOCGSTATS		Get packet stats.
    895  *  BIOCIMMEDIATE	Set immediate mode.
    896  *  BIOCVERSION		Get filter language version.
    897  *  BIOCGHDRCMPLT	Get "header already complete" flag.
    898  *  BIOCSHDRCMPLT	Set "header already complete" flag.
    899  *  BIOCSFEEDBACK	Set packet feedback mode.
    900  *  BIOCGFEEDBACK	Get packet feedback mode.
    901  *  BIOCGDIRECTION	Get packet direction flag
    902  *  BIOCSDIRECTION	Set packet direction flag
    903  */
    904 /* ARGSUSED */
    905 static int
    906 bpf_ioctl(struct file *fp, u_long cmd, void *addr)
    907 {
    908 	struct bpf_d *d = fp->f_bpf;
    909 	int error = 0;
    910 
    911 	/*
    912 	 * Refresh the PID associated with this bpf file.
    913 	 */
    914 	d->bd_pid = curproc->p_pid;
    915 #ifdef _LP64
    916 	if (curproc->p_flag & PK_32)
    917 		d->bd_compat32 = 1;
    918 	else
    919 		d->bd_compat32 = 0;
    920 #endif
    921 
    922 	mutex_enter(d->bd_mtx);
    923 	if (d->bd_state == BPF_WAITING)
    924 		callout_halt(&d->bd_callout, d->bd_mtx);
    925 	d->bd_state = BPF_IDLE;
    926 	mutex_exit(d->bd_mtx);
    927 
    928 	switch (cmd) {
    929 
    930 	default:
    931 		error = EINVAL;
    932 		break;
    933 
    934 	/*
    935 	 * Check for read packet available.
    936 	 */
    937 	case FIONREAD:
    938 		{
    939 			int n;
    940 
    941 			mutex_enter(d->bd_buf_mtx);
    942 			n = d->bd_slen;
    943 			if (d->bd_hbuf)
    944 				n += d->bd_hlen;
    945 			mutex_exit(d->bd_buf_mtx);
    946 
    947 			*(int *)addr = n;
    948 			break;
    949 		}
    950 
    951 	/*
    952 	 * Get buffer len [for read()].
    953 	 */
    954 	case BIOCGBLEN:
    955 		*(u_int *)addr = d->bd_bufsize;
    956 		break;
    957 
    958 	/*
    959 	 * Set buffer length.
    960 	 */
    961 	case BIOCSBLEN:
    962 		/*
    963 		 * Forbid to change the buffer length if buffers are already
    964 		 * allocated.
    965 		 */
    966 		mutex_enter(d->bd_mtx);
    967 		mutex_enter(d->bd_buf_mtx);
    968 		if (d->bd_bif != NULL || d->bd_sbuf != NULL)
    969 			error = EINVAL;
    970 		else {
    971 			u_int size = *(u_int *)addr;
    972 
    973 			if (size > bpf_maxbufsize)
    974 				*(u_int *)addr = size = bpf_maxbufsize;
    975 			else if (size < BPF_MINBUFSIZE)
    976 				*(u_int *)addr = size = BPF_MINBUFSIZE;
    977 			d->bd_bufsize = size;
    978 		}
    979 		mutex_exit(d->bd_buf_mtx);
    980 		mutex_exit(d->bd_mtx);
    981 		break;
    982 
    983 	/*
    984 	 * Set link layer read filter.
    985 	 */
    986 	case BIOCSETF:
    987 		error = bpf_setf(d, addr);
    988 		break;
    989 
    990 	/*
    991 	 * Flush read packet buffer.
    992 	 */
    993 	case BIOCFLUSH:
    994 		mutex_enter(d->bd_mtx);
    995 		reset_d(d);
    996 		mutex_exit(d->bd_mtx);
    997 		break;
    998 
    999 	/*
   1000 	 * Put interface into promiscuous mode.
   1001 	 */
   1002 	case BIOCPROMISC:
   1003 		mutex_enter(d->bd_mtx);
   1004 		if (d->bd_bif == NULL) {
   1005 			mutex_exit(d->bd_mtx);
   1006 			/*
   1007 			 * No interface attached yet.
   1008 			 */
   1009 			error = EINVAL;
   1010 			break;
   1011 		}
   1012 		if (d->bd_promisc == 0) {
   1013 			KERNEL_LOCK_UNLESS_NET_MPSAFE();
   1014 			error = ifpromisc(d->bd_bif->bif_ifp, 1);
   1015 			KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
   1016 			if (error == 0)
   1017 				d->bd_promisc = 1;
   1018 		}
   1019 		mutex_exit(d->bd_mtx);
   1020 		break;
   1021 
   1022 	/*
   1023 	 * Get device parameters.
   1024 	 */
   1025 	case BIOCGDLT:
   1026 		mutex_enter(d->bd_mtx);
   1027 		if (d->bd_bif == NULL)
   1028 			error = EINVAL;
   1029 		else
   1030 			*(u_int *)addr = d->bd_bif->bif_dlt;
   1031 		mutex_exit(d->bd_mtx);
   1032 		break;
   1033 
   1034 	/*
   1035 	 * Get a list of supported device parameters.
   1036 	 */
   1037 	case BIOCGDLTLIST:
   1038 		mutex_enter(d->bd_mtx);
   1039 		if (d->bd_bif == NULL)
   1040 			error = EINVAL;
   1041 		else
   1042 			error = bpf_getdltlist(d, addr);
   1043 		mutex_exit(d->bd_mtx);
   1044 		break;
   1045 
   1046 	/*
   1047 	 * Set device parameters.
   1048 	 */
   1049 	case BIOCSDLT:
   1050 		mutex_enter(&bpf_mtx);
   1051 		mutex_enter(d->bd_mtx);
   1052 		if (d->bd_bif == NULL)
   1053 			error = EINVAL;
   1054 		else
   1055 			error = bpf_setdlt(d, *(u_int *)addr);
   1056 		mutex_exit(d->bd_mtx);
   1057 		mutex_exit(&bpf_mtx);
   1058 		break;
   1059 
   1060 	/*
   1061 	 * Set interface name.
   1062 	 */
   1063 #ifdef OBIOCGETIF
   1064 	case OBIOCGETIF:
   1065 #endif
   1066 	case BIOCGETIF:
   1067 		mutex_enter(d->bd_mtx);
   1068 		if (d->bd_bif == NULL)
   1069 			error = EINVAL;
   1070 		else
   1071 			bpf_ifname(d->bd_bif->bif_ifp, addr);
   1072 		mutex_exit(d->bd_mtx);
   1073 		break;
   1074 
   1075 	/*
   1076 	 * Set interface.
   1077 	 */
   1078 #ifdef OBIOCSETIF
   1079 	case OBIOCSETIF:
   1080 #endif
   1081 	case BIOCSETIF:
   1082 		mutex_enter(&bpf_mtx);
   1083 		error = bpf_setif(d, addr);
   1084 		mutex_exit(&bpf_mtx);
   1085 		break;
   1086 
   1087 	/*
   1088 	 * Set read timeout.
   1089 	 */
   1090 	case BIOCSRTIMEOUT:
   1091 		{
   1092 			struct timeval *tv = addr;
   1093 
   1094 			/* Compute number of ticks. */
   1095 			d->bd_rtout = tv->tv_sec * hz + tv->tv_usec / tick;
   1096 			if ((d->bd_rtout == 0) && (tv->tv_usec != 0))
   1097 				d->bd_rtout = 1;
   1098 			break;
   1099 		}
   1100 
   1101 #ifdef BIOCGORTIMEOUT
   1102 	/*
   1103 	 * Get read timeout.
   1104 	 */
   1105 	case BIOCGORTIMEOUT:
   1106 		{
   1107 			struct timeval50 *tv = addr;
   1108 
   1109 			tv->tv_sec = d->bd_rtout / hz;
   1110 			tv->tv_usec = (d->bd_rtout % hz) * tick;
   1111 			break;
   1112 		}
   1113 #endif
   1114 
   1115 #ifdef BIOCSORTIMEOUT
   1116 	/*
   1117 	 * Set read timeout.
   1118 	 */
   1119 	case BIOCSORTIMEOUT:
   1120 		{
   1121 			struct timeval50 *tv = addr;
   1122 
   1123 			/* Compute number of ticks. */
   1124 			d->bd_rtout = tv->tv_sec * hz + tv->tv_usec / tick;
   1125 			if ((d->bd_rtout == 0) && (tv->tv_usec != 0))
   1126 				d->bd_rtout = 1;
   1127 			break;
   1128 		}
   1129 #endif
   1130 
   1131 	/*
   1132 	 * Get read timeout.
   1133 	 */
   1134 	case BIOCGRTIMEOUT:
   1135 		{
   1136 			struct timeval *tv = addr;
   1137 
   1138 			tv->tv_sec = d->bd_rtout / hz;
   1139 			tv->tv_usec = (d->bd_rtout % hz) * tick;
   1140 			break;
   1141 		}
   1142 	/*
   1143 	 * Get packet stats.
   1144 	 */
   1145 	case BIOCGSTATS:
   1146 		{
   1147 			struct bpf_stat *bs = addr;
   1148 
   1149 			bs->bs_recv = d->bd_rcount;
   1150 			bs->bs_drop = d->bd_dcount;
   1151 			bs->bs_capt = d->bd_ccount;
   1152 			break;
   1153 		}
   1154 
   1155 	case BIOCGSTATSOLD:
   1156 		{
   1157 			struct bpf_stat_old *bs = addr;
   1158 
   1159 			bs->bs_recv = d->bd_rcount;
   1160 			bs->bs_drop = d->bd_dcount;
   1161 			break;
   1162 		}
   1163 
   1164 	/*
   1165 	 * Set immediate mode.
   1166 	 */
   1167 	case BIOCIMMEDIATE:
   1168 		d->bd_immediate = *(u_int *)addr;
   1169 		break;
   1170 
   1171 	case BIOCVERSION:
   1172 		{
   1173 			struct bpf_version *bv = addr;
   1174 
   1175 			bv->bv_major = BPF_MAJOR_VERSION;
   1176 			bv->bv_minor = BPF_MINOR_VERSION;
   1177 			break;
   1178 		}
   1179 
   1180 	case BIOCGHDRCMPLT:	/* get "header already complete" flag */
   1181 		*(u_int *)addr = d->bd_hdrcmplt;
   1182 		break;
   1183 
   1184 	case BIOCSHDRCMPLT:	/* set "header already complete" flag */
   1185 		d->bd_hdrcmplt = *(u_int *)addr ? 1 : 0;
   1186 		break;
   1187 
   1188 	/*
   1189 	 * Get packet direction flag
   1190 	 */
   1191 	case BIOCGDIRECTION:
   1192 		*(u_int *)addr = d->bd_direction;
   1193 		break;
   1194 
   1195 	/*
   1196 	 * Set packet direction flag
   1197 	 */
   1198 	case BIOCSDIRECTION:
   1199 		{
   1200 			u_int	direction;
   1201 
   1202 			direction = *(u_int *)addr;
   1203 			switch (direction) {
   1204 			case BPF_D_IN:
   1205 			case BPF_D_INOUT:
   1206 			case BPF_D_OUT:
   1207 				d->bd_direction = direction;
   1208 				break;
   1209 			default:
   1210 				error = EINVAL;
   1211 			}
   1212 		}
   1213 		break;
   1214 
   1215 	/*
   1216 	 * Set "feed packets from bpf back to input" mode
   1217 	 */
   1218 	case BIOCSFEEDBACK:
   1219 		d->bd_feedback = *(u_int *)addr;
   1220 		break;
   1221 
   1222 	/*
   1223 	 * Get "feed packets from bpf back to input" mode
   1224 	 */
   1225 	case BIOCGFEEDBACK:
   1226 		*(u_int *)addr = d->bd_feedback;
   1227 		break;
   1228 
   1229 	case FIONBIO:		/* Non-blocking I/O */
   1230 		/*
   1231 		 * No need to do anything special as we use IO_NDELAY in
   1232 		 * bpfread() as an indication of whether or not to block
   1233 		 * the read.
   1234 		 */
   1235 		break;
   1236 
   1237 	case FIOASYNC:		/* Send signal on receive packets */
   1238 		mutex_enter(d->bd_mtx);
   1239 		d->bd_async = *(int *)addr;
   1240 		mutex_exit(d->bd_mtx);
   1241 		break;
   1242 
   1243 	case TIOCSPGRP:		/* Process or group to send signals to */
   1244 	case FIOSETOWN:
   1245 		error = fsetown(&d->bd_pgid, cmd, addr);
   1246 		break;
   1247 
   1248 	case TIOCGPGRP:
   1249 	case FIOGETOWN:
   1250 		error = fgetown(d->bd_pgid, cmd, addr);
   1251 		break;
   1252 	}
   1253 	return (error);
   1254 }
   1255 
   1256 /*
   1257  * Set d's packet filter program to fp.  If this file already has a filter,
   1258  * free it and replace it.  Returns EINVAL for bogus requests.
   1259  */
   1260 static int
   1261 bpf_setf(struct bpf_d *d, struct bpf_program *fp)
   1262 {
   1263 	struct bpf_insn *fcode;
   1264 	bpfjit_func_t jcode;
   1265 	size_t flen, size = 0;
   1266 	struct bpf_filter *oldf, *newf;
   1267 
   1268 	jcode = NULL;
   1269 	flen = fp->bf_len;
   1270 
   1271 	if ((fp->bf_insns == NULL && flen) || flen > BPF_MAXINSNS) {
   1272 		return EINVAL;
   1273 	}
   1274 
   1275 	if (flen) {
   1276 		/*
   1277 		 * Allocate the buffer, copy the byte-code from
   1278 		 * userspace and validate it.
   1279 		 */
   1280 		size = flen * sizeof(*fp->bf_insns);
   1281 		fcode = kmem_alloc(size, KM_SLEEP);
   1282 		if (copyin(fp->bf_insns, fcode, size) != 0 ||
   1283 		    !bpf_validate(fcode, (int)flen)) {
   1284 			kmem_free(fcode, size);
   1285 			return EINVAL;
   1286 		}
   1287 		membar_consumer();
   1288 		if (bpf_jit)
   1289 			jcode = bpf_jit_generate(NULL, fcode, flen);
   1290 	} else {
   1291 		fcode = NULL;
   1292 	}
   1293 
   1294 	newf = kmem_alloc(sizeof(*newf), KM_SLEEP);
   1295 	newf->bf_insn = fcode;
   1296 	newf->bf_size = size;
   1297 	newf->bf_jitcode = jcode;
   1298 	d->bd_jitcode = jcode; /* XXX just for kvm(3) users */
   1299 
   1300 	/* Need to hold bpf_mtx for pserialize_perform */
   1301 	mutex_enter(&bpf_mtx);
   1302 	mutex_enter(d->bd_mtx);
   1303 	oldf = d->bd_filter;
   1304 	d->bd_filter = newf;
   1305 	membar_producer();
   1306 	reset_d(d);
   1307 	pserialize_perform(bpf_psz);
   1308 	mutex_exit(d->bd_mtx);
   1309 	mutex_exit(&bpf_mtx);
   1310 
   1311 	if (oldf != NULL)
   1312 		bpf_free_filter(oldf);
   1313 
   1314 	return 0;
   1315 }
   1316 
   1317 /*
   1318  * Detach a file from its current interface (if attached at all) and attach
   1319  * to the interface indicated by the name stored in ifr.
   1320  * Return an errno or 0.
   1321  */
   1322 static int
   1323 bpf_setif(struct bpf_d *d, struct ifreq *ifr)
   1324 {
   1325 	struct bpf_if *bp;
   1326 	char *cp;
   1327 	int unit_seen, i, error;
   1328 
   1329 	KASSERT(mutex_owned(&bpf_mtx));
   1330 	/*
   1331 	 * Make sure the provided name has a unit number, and default
   1332 	 * it to '0' if not specified.
   1333 	 * XXX This is ugly ... do this differently?
   1334 	 */
   1335 	unit_seen = 0;
   1336 	cp = ifr->ifr_name;
   1337 	cp[sizeof(ifr->ifr_name) - 1] = '\0';	/* sanity */
   1338 	while (*cp++)
   1339 		if (*cp >= '0' && *cp <= '9')
   1340 			unit_seen = 1;
   1341 	if (!unit_seen) {
   1342 		/* Make sure to leave room for the '\0'. */
   1343 		for (i = 0; i < (IFNAMSIZ - 1); ++i) {
   1344 			if ((ifr->ifr_name[i] >= 'a' &&
   1345 			     ifr->ifr_name[i] <= 'z') ||
   1346 			    (ifr->ifr_name[i] >= 'A' &&
   1347 			     ifr->ifr_name[i] <= 'Z'))
   1348 				continue;
   1349 			ifr->ifr_name[i] = '0';
   1350 		}
   1351 	}
   1352 
   1353 	/*
   1354 	 * Look through attached interfaces for the named one.
   1355 	 */
   1356 	BPF_IFLIST_WRITER_FOREACH(bp) {
   1357 		struct ifnet *ifp = bp->bif_ifp;
   1358 
   1359 		if (ifp == NULL ||
   1360 		    strcmp(ifp->if_xname, ifr->ifr_name) != 0)
   1361 			continue;
   1362 		/* skip additional entry */
   1363 		if (bp->bif_driverp != &ifp->if_bpf)
   1364 			continue;
   1365 		/*
   1366 		 * We found the requested interface.
   1367 		 * Allocate the packet buffers if we need to.
   1368 		 * If we're already attached to requested interface,
   1369 		 * just flush the buffer.
   1370 		 */
   1371 		/*
   1372 		 * bpf_allocbufs is called only here. bpf_mtx ensures that
   1373 		 * no race condition happen on d->bd_sbuf.
   1374 		 */
   1375 		if (d->bd_sbuf == NULL) {
   1376 			error = bpf_allocbufs(d);
   1377 			if (error != 0)
   1378 				return (error);
   1379 		}
   1380 		mutex_enter(d->bd_mtx);
   1381 		if (bp != d->bd_bif) {
   1382 			if (d->bd_bif) {
   1383 				/*
   1384 				 * Detach if attached to something else.
   1385 				 */
   1386 				bpf_detachd(d);
   1387 				BPFIF_DLIST_ENTRY_INIT(d);
   1388 			}
   1389 
   1390 			bpf_attachd(d, bp);
   1391 		}
   1392 		reset_d(d);
   1393 		mutex_exit(d->bd_mtx);
   1394 		return (0);
   1395 	}
   1396 	/* Not found. */
   1397 	return (ENXIO);
   1398 }
   1399 
   1400 /*
   1401  * Copy the interface name to the ifreq.
   1402  */
   1403 static void
   1404 bpf_ifname(struct ifnet *ifp, struct ifreq *ifr)
   1405 {
   1406 	memcpy(ifr->ifr_name, ifp->if_xname, IFNAMSIZ);
   1407 }
   1408 
   1409 static int
   1410 bpf_stat(struct file *fp, struct stat *st)
   1411 {
   1412 	struct bpf_d *d = fp->f_bpf;
   1413 
   1414 	(void)memset(st, 0, sizeof(*st));
   1415 	mutex_enter(d->bd_mtx);
   1416 	st->st_dev = makedev(cdevsw_lookup_major(&bpf_cdevsw), d->bd_pid);
   1417 	st->st_atimespec = d->bd_atime;
   1418 	st->st_mtimespec = d->bd_mtime;
   1419 	st->st_ctimespec = st->st_birthtimespec = d->bd_btime;
   1420 	st->st_uid = kauth_cred_geteuid(fp->f_cred);
   1421 	st->st_gid = kauth_cred_getegid(fp->f_cred);
   1422 	st->st_mode = S_IFCHR;
   1423 	mutex_exit(d->bd_mtx);
   1424 	return 0;
   1425 }
   1426 
   1427 /*
   1428  * Support for poll() system call
   1429  *
   1430  * Return true iff the specific operation will not block indefinitely - with
   1431  * the assumption that it is safe to positively acknowledge a request for the
   1432  * ability to write to the BPF device.
   1433  * Otherwise, return false but make a note that a selnotify() must be done.
   1434  */
   1435 static int
   1436 bpf_poll(struct file *fp, int events)
   1437 {
   1438 	struct bpf_d *d = fp->f_bpf;
   1439 	int revents;
   1440 
   1441 	/*
   1442 	 * Refresh the PID associated with this bpf file.
   1443 	 */
   1444 	mutex_enter(&bpf_mtx);
   1445 	d->bd_pid = curproc->p_pid;
   1446 
   1447 	revents = events & (POLLOUT | POLLWRNORM);
   1448 	if (events & (POLLIN | POLLRDNORM)) {
   1449 		/*
   1450 		 * An imitation of the FIONREAD ioctl code.
   1451 		 */
   1452 		mutex_enter(d->bd_mtx);
   1453 		if (d->bd_hlen != 0 ||
   1454 		    ((d->bd_immediate || d->bd_state == BPF_TIMED_OUT) &&
   1455 		     d->bd_slen != 0)) {
   1456 			revents |= events & (POLLIN | POLLRDNORM);
   1457 		} else {
   1458 			selrecord(curlwp, &d->bd_sel);
   1459 			/* Start the read timeout if necessary */
   1460 			if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
   1461 				callout_reset(&d->bd_callout, d->bd_rtout,
   1462 					      bpf_timed_out, d);
   1463 				d->bd_state = BPF_WAITING;
   1464 			}
   1465 		}
   1466 		mutex_exit(d->bd_mtx);
   1467 	}
   1468 
   1469 	mutex_exit(&bpf_mtx);
   1470 	return (revents);
   1471 }
   1472 
   1473 static void
   1474 filt_bpfrdetach(struct knote *kn)
   1475 {
   1476 	struct bpf_d *d = kn->kn_hook;
   1477 
   1478 	mutex_enter(d->bd_buf_mtx);
   1479 	SLIST_REMOVE(&d->bd_sel.sel_klist, kn, knote, kn_selnext);
   1480 	mutex_exit(d->bd_buf_mtx);
   1481 }
   1482 
   1483 static int
   1484 filt_bpfread(struct knote *kn, long hint)
   1485 {
   1486 	struct bpf_d *d = kn->kn_hook;
   1487 	int rv;
   1488 
   1489 	mutex_enter(d->bd_buf_mtx);
   1490 	kn->kn_data = d->bd_hlen;
   1491 	if (d->bd_immediate)
   1492 		kn->kn_data += d->bd_slen;
   1493 	rv = (kn->kn_data > 0);
   1494 	mutex_exit(d->bd_buf_mtx);
   1495 	return rv;
   1496 }
   1497 
   1498 static const struct filterops bpfread_filtops = {
   1499 	.f_isfd = 1,
   1500 	.f_attach = NULL,
   1501 	.f_detach = filt_bpfrdetach,
   1502 	.f_event = filt_bpfread,
   1503 };
   1504 
   1505 static int
   1506 bpf_kqfilter(struct file *fp, struct knote *kn)
   1507 {
   1508 	struct bpf_d *d = fp->f_bpf;
   1509 	struct klist *klist;
   1510 
   1511 	mutex_enter(d->bd_buf_mtx);
   1512 	switch (kn->kn_filter) {
   1513 	case EVFILT_READ:
   1514 		klist = &d->bd_sel.sel_klist;
   1515 		kn->kn_fop = &bpfread_filtops;
   1516 		break;
   1517 
   1518 	default:
   1519 		mutex_exit(d->bd_buf_mtx);
   1520 		return (EINVAL);
   1521 	}
   1522 
   1523 	kn->kn_hook = d;
   1524 
   1525 	SLIST_INSERT_HEAD(klist, kn, kn_selnext);
   1526 	mutex_exit(d->bd_buf_mtx);
   1527 
   1528 	return (0);
   1529 }
   1530 
   1531 /*
   1532  * Copy data from an mbuf chain into a buffer.  This code is derived
   1533  * from m_copydata in sys/uipc_mbuf.c.
   1534  */
   1535 static void *
   1536 bpf_mcpy(void *dst_arg, const void *src_arg, size_t len)
   1537 {
   1538 	const struct mbuf *m;
   1539 	u_int count;
   1540 	u_char *dst;
   1541 
   1542 	m = src_arg;
   1543 	dst = dst_arg;
   1544 	while (len > 0) {
   1545 		if (m == NULL)
   1546 			panic("bpf_mcpy");
   1547 		count = uimin(m->m_len, len);
   1548 		memcpy(dst, mtod(m, const void *), count);
   1549 		m = m->m_next;
   1550 		dst += count;
   1551 		len -= count;
   1552 	}
   1553 	return dst_arg;
   1554 }
   1555 
   1556 /*
   1557  * Dispatch a packet to all the listeners on interface bp.
   1558  *
   1559  * pkt       pointer to the packet, either a data buffer or an mbuf chain
   1560  * buflen    buffer length, if pkt is a data buffer
   1561  * cpfn      a function that can copy pkt into the listener's buffer
   1562  * pktlen    length of the packet
   1563  * direction BPF_D_IN or BPF_D_OUT
   1564  */
   1565 static inline void
   1566 bpf_deliver(struct bpf_if *bp, void *(*cpfn)(void *, const void *, size_t),
   1567     void *pkt, u_int pktlen, u_int buflen, const u_int direction)
   1568 {
   1569 	uint32_t mem[BPF_MEMWORDS];
   1570 	bpf_args_t args = {
   1571 		.pkt = (const uint8_t *)pkt,
   1572 		.wirelen = pktlen,
   1573 		.buflen = buflen,
   1574 		.mem = mem,
   1575 		.arg = NULL
   1576 	};
   1577 	bool gottime = false;
   1578 	struct timespec ts;
   1579 	struct bpf_d *d;
   1580 	int s;
   1581 
   1582 	KASSERT(!cpu_intr_p());
   1583 
   1584 	/*
   1585 	 * Note that the IPL does not have to be raised at this point.
   1586 	 * The only problem that could arise here is that if two different
   1587 	 * interfaces shared any data.  This is not the case.
   1588 	 */
   1589 	s = pserialize_read_enter();
   1590 	BPFIF_DLIST_READER_FOREACH(d, bp) {
   1591 		u_int slen = 0;
   1592 		struct bpf_filter *filter;
   1593 
   1594 		if (direction == BPF_D_IN) {
   1595 			if (d->bd_direction == BPF_D_OUT)
   1596 				continue;
   1597 		} else { /* BPF_D_OUT */
   1598 			if (d->bd_direction == BPF_D_IN)
   1599 				continue;
   1600 		}
   1601 
   1602 		atomic_inc_ulong(&d->bd_rcount);
   1603 		BPF_STATINC(recv);
   1604 
   1605 		filter = d->bd_filter;
   1606 		membar_datadep_consumer();
   1607 		if (filter != NULL) {
   1608 			if (filter->bf_jitcode != NULL)
   1609 				slen = filter->bf_jitcode(NULL, &args);
   1610 			else
   1611 				slen = bpf_filter_ext(NULL, filter->bf_insn,
   1612 				    &args);
   1613 		}
   1614 
   1615 		if (!slen) {
   1616 			continue;
   1617 		}
   1618 		if (!gottime) {
   1619 			gottime = true;
   1620 			nanotime(&ts);
   1621 		}
   1622 		/* Assume catchpacket doesn't sleep */
   1623 		catchpacket(d, pkt, pktlen, slen, cpfn, &ts);
   1624 	}
   1625 	pserialize_read_exit(s);
   1626 }
   1627 
   1628 /*
   1629  * Incoming linkage from device drivers, when the head of the packet is in
   1630  * a buffer, and the tail is in an mbuf chain.
   1631  */
   1632 static void
   1633 _bpf_mtap2(struct bpf_if *bp, void *data, u_int dlen, struct mbuf *m,
   1634 	u_int direction)
   1635 {
   1636 	u_int pktlen;
   1637 	struct mbuf mb;
   1638 
   1639 	/* Skip outgoing duplicate packets. */
   1640 	if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif_index == 0) {
   1641 		m->m_flags &= ~M_PROMISC;
   1642 		return;
   1643 	}
   1644 
   1645 	pktlen = m_length(m) + dlen;
   1646 
   1647 	/*
   1648 	 * Craft on-stack mbuf suitable for passing to bpf_filter.
   1649 	 * Note that we cut corners here; we only setup what's
   1650 	 * absolutely needed--this mbuf should never go anywhere else.
   1651 	 */
   1652 	(void)memset(&mb, 0, sizeof(mb));
   1653 	mb.m_type = MT_DATA;
   1654 	mb.m_next = m;
   1655 	mb.m_data = data;
   1656 	mb.m_len = dlen;
   1657 
   1658 	bpf_deliver(bp, bpf_mcpy, &mb, pktlen, 0, direction);
   1659 }
   1660 
   1661 /*
   1662  * Incoming linkage from device drivers, when packet is in an mbuf chain.
   1663  */
   1664 static void
   1665 _bpf_mtap(struct bpf_if *bp, struct mbuf *m, u_int direction)
   1666 {
   1667 	void *(*cpfn)(void *, const void *, size_t);
   1668 	u_int pktlen, buflen;
   1669 	void *marg;
   1670 
   1671 	/* Skip outgoing duplicate packets. */
   1672 	if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif_index == 0) {
   1673 		m->m_flags &= ~M_PROMISC;
   1674 		return;
   1675 	}
   1676 
   1677 	pktlen = m_length(m);
   1678 
   1679 	if (pktlen == m->m_len) {
   1680 		cpfn = (void *)memcpy;
   1681 		marg = mtod(m, void *);
   1682 		buflen = pktlen;
   1683 		KASSERT(buflen != 0);
   1684 	} else {
   1685 		cpfn = bpf_mcpy;
   1686 		marg = m;
   1687 		buflen = 0;
   1688 	}
   1689 
   1690 	bpf_deliver(bp, cpfn, marg, pktlen, buflen, direction);
   1691 }
   1692 
   1693 /*
   1694  * We need to prepend the address family as
   1695  * a four byte field.  Cons up a dummy header
   1696  * to pacify bpf.  This is safe because bpf
   1697  * will only read from the mbuf (i.e., it won't
   1698  * try to free it or keep a pointer a to it).
   1699  */
   1700 static void
   1701 _bpf_mtap_af(struct bpf_if *bp, uint32_t af, struct mbuf *m, u_int direction)
   1702 {
   1703 	struct mbuf m0;
   1704 
   1705 	m0.m_type = MT_DATA;
   1706 	m0.m_flags = 0;
   1707 	m0.m_next = m;
   1708 	m0.m_nextpkt = NULL;
   1709 	m0.m_owner = NULL;
   1710 	m0.m_len = 4;
   1711 	m0.m_data = (char *)&af;
   1712 
   1713 	_bpf_mtap(bp, &m0, direction);
   1714 }
   1715 
   1716 /*
   1717  * Put the SLIP pseudo-"link header" in place.
   1718  * Note this M_PREPEND() should never fail,
   1719  * swince we know we always have enough space
   1720  * in the input buffer.
   1721  */
   1722 static void
   1723 _bpf_mtap_sl_in(struct bpf_if *bp, u_char *chdr, struct mbuf **m)
   1724 {
   1725 	u_char *hp;
   1726 
   1727 	M_PREPEND(*m, SLIP_HDRLEN, M_DONTWAIT);
   1728 	if (*m == NULL)
   1729 		return;
   1730 
   1731 	hp = mtod(*m, u_char *);
   1732 	hp[SLX_DIR] = SLIPDIR_IN;
   1733 	(void)memcpy(&hp[SLX_CHDR], chdr, CHDR_LEN);
   1734 
   1735 	_bpf_mtap(bp, *m, BPF_D_IN);
   1736 
   1737 	m_adj(*m, SLIP_HDRLEN);
   1738 }
   1739 
   1740 /*
   1741  * Put the SLIP pseudo-"link header" in
   1742  * place.  The compressed header is now
   1743  * at the beginning of the mbuf.
   1744  */
   1745 static void
   1746 _bpf_mtap_sl_out(struct bpf_if *bp, u_char *chdr, struct mbuf *m)
   1747 {
   1748 	struct mbuf m0;
   1749 	u_char *hp;
   1750 
   1751 	m0.m_type = MT_DATA;
   1752 	m0.m_flags = 0;
   1753 	m0.m_next = m;
   1754 	m0.m_nextpkt = NULL;
   1755 	m0.m_owner = NULL;
   1756 	m0.m_data = m0.m_dat;
   1757 	m0.m_len = SLIP_HDRLEN;
   1758 
   1759 	hp = mtod(&m0, u_char *);
   1760 
   1761 	hp[SLX_DIR] = SLIPDIR_OUT;
   1762 	(void)memcpy(&hp[SLX_CHDR], chdr, CHDR_LEN);
   1763 
   1764 	_bpf_mtap(bp, &m0, BPF_D_OUT);
   1765 	m_freem(m);
   1766 }
   1767 
   1768 static struct mbuf *
   1769 bpf_mbuf_enqueue(struct bpf_if *bp, struct mbuf *m)
   1770 {
   1771 	struct mbuf *dup;
   1772 
   1773 	dup = m_dup(m, 0, M_COPYALL, M_NOWAIT);
   1774 	if (dup == NULL)
   1775 		return NULL;
   1776 
   1777 	if (bp->bif_mbuf_tail != NULL) {
   1778 		bp->bif_mbuf_tail->m_nextpkt = dup;
   1779 	} else {
   1780 		bp->bif_mbuf_head = dup;
   1781 	}
   1782 	bp->bif_mbuf_tail = dup;
   1783 #ifdef BPF_MTAP_SOFTINT_DEBUG
   1784 	log(LOG_DEBUG, "%s: enqueued mbuf=%p to %s\n",
   1785 	    __func__, dup, bp->bif_ifp->if_xname);
   1786 #endif
   1787 
   1788 	return dup;
   1789 }
   1790 
   1791 static struct mbuf *
   1792 bpf_mbuf_dequeue(struct bpf_if *bp)
   1793 {
   1794 	struct mbuf *m;
   1795 	int s;
   1796 
   1797 	/* XXX NOMPSAFE: assumed running on one CPU */
   1798 	s = splnet();
   1799 	m = bp->bif_mbuf_head;
   1800 	if (m != NULL) {
   1801 		bp->bif_mbuf_head = m->m_nextpkt;
   1802 		m->m_nextpkt = NULL;
   1803 
   1804 		if (bp->bif_mbuf_head == NULL)
   1805 			bp->bif_mbuf_tail = NULL;
   1806 #ifdef BPF_MTAP_SOFTINT_DEBUG
   1807 		log(LOG_DEBUG, "%s: dequeued mbuf=%p from %s\n",
   1808 		    __func__, m, bp->bif_ifp->if_xname);
   1809 #endif
   1810 	}
   1811 	splx(s);
   1812 
   1813 	return m;
   1814 }
   1815 
   1816 static void
   1817 bpf_mtap_si(void *arg)
   1818 {
   1819 	struct bpf_if *bp = arg;
   1820 	struct mbuf *m;
   1821 
   1822 	while ((m = bpf_mbuf_dequeue(bp)) != NULL) {
   1823 #ifdef BPF_MTAP_SOFTINT_DEBUG
   1824 		log(LOG_DEBUG, "%s: tapping mbuf=%p on %s\n",
   1825 		    __func__, m, bp->bif_ifp->if_xname);
   1826 #endif
   1827 		bpf_ops->bpf_mtap(bp, m, BPF_D_IN);
   1828 		m_freem(m);
   1829 	}
   1830 }
   1831 
   1832 static void
   1833 _bpf_mtap_softint(struct ifnet *ifp, struct mbuf *m)
   1834 {
   1835 	struct bpf_if *bp = ifp->if_bpf;
   1836 	struct mbuf *dup;
   1837 
   1838 	KASSERT(cpu_intr_p());
   1839 
   1840 	/* To avoid extra invocations of the softint */
   1841 	if (BPFIF_DLIST_READER_EMPTY(bp))
   1842 		return;
   1843 	KASSERT(bp->bif_si != NULL);
   1844 
   1845 	dup = bpf_mbuf_enqueue(bp, m);
   1846 	if (dup != NULL)
   1847 		softint_schedule(bp->bif_si);
   1848 }
   1849 
   1850 static int
   1851 bpf_hdrlen(struct bpf_d *d)
   1852 {
   1853 	int hdrlen = d->bd_bif->bif_hdrlen;
   1854 	/*
   1855 	 * Compute the length of the bpf header.  This is not necessarily
   1856 	 * equal to SIZEOF_BPF_HDR because we want to insert spacing such
   1857 	 * that the network layer header begins on a longword boundary (for
   1858 	 * performance reasons and to alleviate alignment restrictions).
   1859 	 */
   1860 #ifdef _LP64
   1861 	if (d->bd_compat32)
   1862 		return (BPF_WORDALIGN32(hdrlen + SIZEOF_BPF_HDR32) - hdrlen);
   1863 	else
   1864 #endif
   1865 		return (BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen);
   1866 }
   1867 
   1868 /*
   1869  * Move the packet data from interface memory (pkt) into the
   1870  * store buffer. Call the wakeup functions if it's time to wakeup
   1871  * a listener (buffer full), "cpfn" is the routine called to do the
   1872  * actual data transfer. memcpy is passed in to copy contiguous chunks,
   1873  * while bpf_mcpy is passed in to copy mbuf chains.  In the latter case,
   1874  * pkt is really an mbuf.
   1875  */
   1876 static void
   1877 catchpacket(struct bpf_d *d, u_char *pkt, u_int pktlen, u_int snaplen,
   1878     void *(*cpfn)(void *, const void *, size_t), struct timespec *ts)
   1879 {
   1880 	char *h;
   1881 	int totlen, curlen, caplen;
   1882 	int hdrlen = bpf_hdrlen(d);
   1883 	int do_wakeup = 0;
   1884 
   1885 	atomic_inc_ulong(&d->bd_ccount);
   1886 	BPF_STATINC(capt);
   1887 	/*
   1888 	 * Figure out how many bytes to move.  If the packet is
   1889 	 * greater or equal to the snapshot length, transfer that
   1890 	 * much.  Otherwise, transfer the whole packet (unless
   1891 	 * we hit the buffer size limit).
   1892 	 */
   1893 	totlen = hdrlen + uimin(snaplen, pktlen);
   1894 	if (totlen > d->bd_bufsize)
   1895 		totlen = d->bd_bufsize;
   1896 	/*
   1897 	 * If we adjusted totlen to fit the bufsize, it could be that
   1898 	 * totlen is smaller than hdrlen because of the link layer header.
   1899 	 */
   1900 	caplen = totlen - hdrlen;
   1901 	if (caplen < 0)
   1902 		caplen = 0;
   1903 
   1904 	mutex_enter(d->bd_buf_mtx);
   1905 	/*
   1906 	 * Round up the end of the previous packet to the next longword.
   1907 	 */
   1908 #ifdef _LP64
   1909 	if (d->bd_compat32)
   1910 		curlen = BPF_WORDALIGN32(d->bd_slen);
   1911 	else
   1912 #endif
   1913 		curlen = BPF_WORDALIGN(d->bd_slen);
   1914 	if (curlen + totlen > d->bd_bufsize) {
   1915 		/*
   1916 		 * This packet will overflow the storage buffer.
   1917 		 * Rotate the buffers if we can, then wakeup any
   1918 		 * pending reads.
   1919 		 */
   1920 		if (d->bd_fbuf == NULL) {
   1921 			mutex_exit(d->bd_buf_mtx);
   1922 			/*
   1923 			 * We haven't completed the previous read yet,
   1924 			 * so drop the packet.
   1925 			 */
   1926 			atomic_inc_ulong(&d->bd_dcount);
   1927 			BPF_STATINC(drop);
   1928 			return;
   1929 		}
   1930 		ROTATE_BUFFERS(d);
   1931 		do_wakeup = 1;
   1932 		curlen = 0;
   1933 	} else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT) {
   1934 		/*
   1935 		 * Immediate mode is set, or the read timeout has
   1936 		 * already expired during a select call.  A packet
   1937 		 * arrived, so the reader should be woken up.
   1938 		 */
   1939 		do_wakeup = 1;
   1940 	}
   1941 
   1942 	/*
   1943 	 * Append the bpf header.
   1944 	 */
   1945 	h = (char *)d->bd_sbuf + curlen;
   1946 #ifdef _LP64
   1947 	if (d->bd_compat32) {
   1948 		struct bpf_hdr32 *hp32;
   1949 
   1950 		hp32 = (struct bpf_hdr32 *)h;
   1951 		hp32->bh_tstamp.tv_sec = ts->tv_sec;
   1952 		hp32->bh_tstamp.tv_usec = ts->tv_nsec / 1000;
   1953 		hp32->bh_datalen = pktlen;
   1954 		hp32->bh_hdrlen = hdrlen;
   1955 		hp32->bh_caplen = caplen;
   1956 	} else
   1957 #endif
   1958 	{
   1959 		struct bpf_hdr *hp;
   1960 
   1961 		hp = (struct bpf_hdr *)h;
   1962 		hp->bh_tstamp.tv_sec = ts->tv_sec;
   1963 		hp->bh_tstamp.tv_usec = ts->tv_nsec / 1000;
   1964 		hp->bh_datalen = pktlen;
   1965 		hp->bh_hdrlen = hdrlen;
   1966 		hp->bh_caplen = caplen;
   1967 	}
   1968 
   1969 	/*
   1970 	 * Copy the packet data into the store buffer and update its length.
   1971 	 */
   1972 	(*cpfn)(h + hdrlen, pkt, caplen);
   1973 	d->bd_slen = curlen + totlen;
   1974 	mutex_exit(d->bd_buf_mtx);
   1975 
   1976 	/*
   1977 	 * Call bpf_wakeup after bd_slen has been updated so that kevent(2)
   1978 	 * will cause filt_bpfread() to be called with it adjusted.
   1979 	 */
   1980 	if (do_wakeup)
   1981 		bpf_wakeup(d);
   1982 }
   1983 
   1984 /*
   1985  * Initialize all nonzero fields of a descriptor.
   1986  */
   1987 static int
   1988 bpf_allocbufs(struct bpf_d *d)
   1989 {
   1990 
   1991 	d->bd_fbuf = kmem_zalloc(d->bd_bufsize, KM_NOSLEEP);
   1992 	if (!d->bd_fbuf)
   1993 		return (ENOBUFS);
   1994 	d->bd_sbuf = kmem_zalloc(d->bd_bufsize, KM_NOSLEEP);
   1995 	if (!d->bd_sbuf) {
   1996 		kmem_free(d->bd_fbuf, d->bd_bufsize);
   1997 		return (ENOBUFS);
   1998 	}
   1999 	d->bd_slen = 0;
   2000 	d->bd_hlen = 0;
   2001 	return (0);
   2002 }
   2003 
   2004 static void
   2005 bpf_free_filter(struct bpf_filter *filter)
   2006 {
   2007 
   2008 	KASSERT(filter != NULL);
   2009 	KASSERT(filter->bf_insn != NULL);
   2010 
   2011 	kmem_free(filter->bf_insn, filter->bf_size);
   2012 	if (filter->bf_jitcode != NULL)
   2013 		bpf_jit_freecode(filter->bf_jitcode);
   2014 	kmem_free(filter, sizeof(*filter));
   2015 }
   2016 
   2017 /*
   2018  * Free buffers currently in use by a descriptor.
   2019  * Called on close.
   2020  */
   2021 static void
   2022 bpf_freed(struct bpf_d *d)
   2023 {
   2024 	/*
   2025 	 * We don't need to lock out interrupts since this descriptor has
   2026 	 * been detached from its interface and it yet hasn't been marked
   2027 	 * free.
   2028 	 */
   2029 	if (d->bd_sbuf != NULL) {
   2030 		kmem_free(d->bd_sbuf, d->bd_bufsize);
   2031 		if (d->bd_hbuf != NULL)
   2032 			kmem_free(d->bd_hbuf, d->bd_bufsize);
   2033 		if (d->bd_fbuf != NULL)
   2034 			kmem_free(d->bd_fbuf, d->bd_bufsize);
   2035 	}
   2036 	if (d->bd_filter != NULL) {
   2037 		bpf_free_filter(d->bd_filter);
   2038 		d->bd_filter = NULL;
   2039 	}
   2040 	d->bd_jitcode = NULL;
   2041 }
   2042 
   2043 /*
   2044  * Attach an interface to bpf.  dlt is the link layer type;
   2045  * hdrlen is the fixed size of the link header for the specified dlt
   2046  * (variable length headers not yet supported).
   2047  */
   2048 static void
   2049 _bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp)
   2050 {
   2051 	struct bpf_if *bp;
   2052 	bp = kmem_alloc(sizeof(*bp), KM_NOSLEEP);
   2053 	if (bp == NULL)
   2054 		panic("bpfattach");
   2055 
   2056 	mutex_enter(&bpf_mtx);
   2057 	bp->bif_driverp = driverp;
   2058 	bp->bif_ifp = ifp;
   2059 	bp->bif_dlt = dlt;
   2060 	bp->bif_si = NULL;
   2061 	BPF_IFLIST_ENTRY_INIT(bp);
   2062 	PSLIST_INIT(&bp->bif_dlist_head);
   2063 	psref_target_init(&bp->bif_psref, bpf_psref_class);
   2064 
   2065 	BPF_IFLIST_WRITER_INSERT_HEAD(bp);
   2066 
   2067 	*bp->bif_driverp = NULL;
   2068 
   2069 	bp->bif_hdrlen = hdrlen;
   2070 	mutex_exit(&bpf_mtx);
   2071 #if 0
   2072 	printf("bpf: %s attached\n", ifp->if_xname);
   2073 #endif
   2074 }
   2075 
   2076 static void
   2077 _bpf_mtap_softint_init(struct ifnet *ifp)
   2078 {
   2079 	struct bpf_if *bp;
   2080 
   2081 	mutex_enter(&bpf_mtx);
   2082 	BPF_IFLIST_WRITER_FOREACH(bp) {
   2083 		if (bp->bif_ifp != ifp)
   2084 			continue;
   2085 
   2086 		bp->bif_mbuf_head = NULL;
   2087 		bp->bif_mbuf_tail = NULL;
   2088 		bp->bif_si = softint_establish(SOFTINT_NET, bpf_mtap_si, bp);
   2089 		if (bp->bif_si == NULL)
   2090 			panic("%s: softint_establish() failed", __func__);
   2091 		break;
   2092 	}
   2093 	mutex_exit(&bpf_mtx);
   2094 
   2095 	if (bp == NULL)
   2096 		panic("%s: no bpf_if found for %s", __func__, ifp->if_xname);
   2097 }
   2098 
   2099 /*
   2100  * Remove an interface from bpf.
   2101  */
   2102 static void
   2103 _bpfdetach(struct ifnet *ifp)
   2104 {
   2105 	struct bpf_if *bp;
   2106 	struct bpf_d *d;
   2107 	int s;
   2108 
   2109 	mutex_enter(&bpf_mtx);
   2110 	/* Nuke the vnodes for any open instances */
   2111   again_d:
   2112 	BPF_DLIST_WRITER_FOREACH(d) {
   2113 		mutex_enter(d->bd_mtx);
   2114 		if (d->bd_bif != NULL && d->bd_bif->bif_ifp == ifp) {
   2115 			/*
   2116 			 * Detach the descriptor from an interface now.
   2117 			 * It will be free'ed later by close routine.
   2118 			 */
   2119 			d->bd_promisc = 0;	/* we can't touch device. */
   2120 			bpf_detachd(d);
   2121 			mutex_exit(d->bd_mtx);
   2122 			goto again_d;
   2123 		}
   2124 		mutex_exit(d->bd_mtx);
   2125 	}
   2126 
   2127   again:
   2128 	BPF_IFLIST_WRITER_FOREACH(bp) {
   2129 		if (bp->bif_ifp == ifp) {
   2130 			BPF_IFLIST_WRITER_REMOVE(bp);
   2131 
   2132 			pserialize_perform(bpf_psz);
   2133 			psref_target_destroy(&bp->bif_psref, bpf_psref_class);
   2134 
   2135 			BPF_IFLIST_ENTRY_DESTROY(bp);
   2136 			if (bp->bif_si != NULL) {
   2137 				/* XXX NOMPSAFE: assumed running on one CPU */
   2138 				s = splnet();
   2139 				while (bp->bif_mbuf_head != NULL) {
   2140 					struct mbuf *m = bp->bif_mbuf_head;
   2141 					bp->bif_mbuf_head = m->m_nextpkt;
   2142 					m_freem(m);
   2143 				}
   2144 				splx(s);
   2145 				softint_disestablish(bp->bif_si);
   2146 			}
   2147 			kmem_free(bp, sizeof(*bp));
   2148 			goto again;
   2149 		}
   2150 	}
   2151 	mutex_exit(&bpf_mtx);
   2152 }
   2153 
   2154 /*
   2155  * Change the data link type of a interface.
   2156  */
   2157 static void
   2158 _bpf_change_type(struct ifnet *ifp, u_int dlt, u_int hdrlen)
   2159 {
   2160 	struct bpf_if *bp;
   2161 
   2162 	mutex_enter(&bpf_mtx);
   2163 	BPF_IFLIST_WRITER_FOREACH(bp) {
   2164 		if (bp->bif_driverp == &ifp->if_bpf)
   2165 			break;
   2166 	}
   2167 	if (bp == NULL)
   2168 		panic("bpf_change_type");
   2169 
   2170 	bp->bif_dlt = dlt;
   2171 
   2172 	bp->bif_hdrlen = hdrlen;
   2173 	mutex_exit(&bpf_mtx);
   2174 }
   2175 
   2176 /*
   2177  * Get a list of available data link type of the interface.
   2178  */
   2179 static int
   2180 bpf_getdltlist(struct bpf_d *d, struct bpf_dltlist *bfl)
   2181 {
   2182 	int n, error;
   2183 	struct ifnet *ifp;
   2184 	struct bpf_if *bp;
   2185 	int s, bound;
   2186 
   2187 	KASSERT(mutex_owned(d->bd_mtx));
   2188 
   2189 	ifp = d->bd_bif->bif_ifp;
   2190 	n = 0;
   2191 	error = 0;
   2192 
   2193 	bound = curlwp_bind();
   2194 	s = pserialize_read_enter();
   2195 	BPF_IFLIST_READER_FOREACH(bp) {
   2196 		if (bp->bif_ifp != ifp)
   2197 			continue;
   2198 		if (bfl->bfl_list != NULL) {
   2199 			struct psref psref;
   2200 
   2201 			if (n >= bfl->bfl_len) {
   2202 				pserialize_read_exit(s);
   2203 				return ENOMEM;
   2204 			}
   2205 
   2206 			bpf_if_acquire(bp, &psref);
   2207 			pserialize_read_exit(s);
   2208 
   2209 			error = copyout(&bp->bif_dlt,
   2210 			    bfl->bfl_list + n, sizeof(u_int));
   2211 
   2212 			s = pserialize_read_enter();
   2213 			bpf_if_release(bp, &psref);
   2214 		}
   2215 		n++;
   2216 	}
   2217 	pserialize_read_exit(s);
   2218 	curlwp_bindx(bound);
   2219 
   2220 	bfl->bfl_len = n;
   2221 	return error;
   2222 }
   2223 
   2224 /*
   2225  * Set the data link type of a BPF instance.
   2226  */
   2227 static int
   2228 bpf_setdlt(struct bpf_d *d, u_int dlt)
   2229 {
   2230 	int error, opromisc;
   2231 	struct ifnet *ifp;
   2232 	struct bpf_if *bp;
   2233 
   2234 	KASSERT(mutex_owned(&bpf_mtx));
   2235 	KASSERT(mutex_owned(d->bd_mtx));
   2236 
   2237 	if (d->bd_bif->bif_dlt == dlt)
   2238 		return 0;
   2239 	ifp = d->bd_bif->bif_ifp;
   2240 	BPF_IFLIST_WRITER_FOREACH(bp) {
   2241 		if (bp->bif_ifp == ifp && bp->bif_dlt == dlt)
   2242 			break;
   2243 	}
   2244 	if (bp == NULL)
   2245 		return EINVAL;
   2246 	opromisc = d->bd_promisc;
   2247 	bpf_detachd(d);
   2248 	BPFIF_DLIST_ENTRY_INIT(d);
   2249 	bpf_attachd(d, bp);
   2250 	reset_d(d);
   2251 	if (opromisc) {
   2252 		KERNEL_LOCK_UNLESS_NET_MPSAFE();
   2253 		error = ifpromisc(bp->bif_ifp, 1);
   2254 		KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
   2255 		if (error)
   2256 			printf("%s: bpf_setdlt: ifpromisc failed (%d)\n",
   2257 			    bp->bif_ifp->if_xname, error);
   2258 		else
   2259 			d->bd_promisc = 1;
   2260 	}
   2261 	return 0;
   2262 }
   2263 
   2264 static int
   2265 sysctl_net_bpf_maxbufsize(SYSCTLFN_ARGS)
   2266 {
   2267 	int newsize, error;
   2268 	struct sysctlnode node;
   2269 
   2270 	node = *rnode;
   2271 	node.sysctl_data = &newsize;
   2272 	newsize = bpf_maxbufsize;
   2273 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   2274 	if (error || newp == NULL)
   2275 		return (error);
   2276 
   2277 	if (newsize < BPF_MINBUFSIZE || newsize > BPF_MAXBUFSIZE)
   2278 		return (EINVAL);
   2279 
   2280 	bpf_maxbufsize = newsize;
   2281 
   2282 	return (0);
   2283 }
   2284 
   2285 #if defined(MODULAR) || defined(BPFJIT)
   2286 static int
   2287 sysctl_net_bpf_jit(SYSCTLFN_ARGS)
   2288 {
   2289 	bool newval;
   2290 	int error;
   2291 	struct sysctlnode node;
   2292 
   2293 	node = *rnode;
   2294 	node.sysctl_data = &newval;
   2295 	newval = bpf_jit;
   2296 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   2297 	if (error != 0 || newp == NULL)
   2298 		return error;
   2299 
   2300 	bpf_jit = newval;
   2301 
   2302 	/*
   2303 	 * Do a full sync to publish new bpf_jit value and
   2304 	 * update bpfjit_module_ops.bj_generate_code variable.
   2305 	 */
   2306 	membar_sync();
   2307 
   2308 	if (newval && bpfjit_module_ops.bj_generate_code == NULL) {
   2309 		printf("JIT compilation is postponed "
   2310 		    "until after bpfjit module is loaded\n");
   2311 	}
   2312 
   2313 	return 0;
   2314 }
   2315 #endif
   2316 
   2317 static int
   2318 sysctl_net_bpf_peers(SYSCTLFN_ARGS)
   2319 {
   2320 	int    error, elem_count;
   2321 	struct bpf_d	 *dp;
   2322 	struct bpf_d_ext  dpe;
   2323 	size_t len, needed, elem_size, out_size;
   2324 	char   *sp;
   2325 
   2326 	if (namelen == 1 && name[0] == CTL_QUERY)
   2327 		return (sysctl_query(SYSCTLFN_CALL(rnode)));
   2328 
   2329 	if (namelen != 2)
   2330 		return (EINVAL);
   2331 
   2332 	/* BPF peers is privileged information. */
   2333 	error = kauth_authorize_network(l->l_cred, KAUTH_NETWORK_INTERFACE,
   2334 	    KAUTH_REQ_NETWORK_INTERFACE_GETPRIV, NULL, NULL, NULL);
   2335 	if (error)
   2336 		return (EPERM);
   2337 
   2338 	len = (oldp != NULL) ? *oldlenp : 0;
   2339 	sp = oldp;
   2340 	elem_size = name[0];
   2341 	elem_count = name[1];
   2342 	out_size = MIN(sizeof(dpe), elem_size);
   2343 	needed = 0;
   2344 
   2345 	if (elem_size < 1 || elem_count < 0)
   2346 		return (EINVAL);
   2347 
   2348 	mutex_enter(&bpf_mtx);
   2349 	BPF_DLIST_WRITER_FOREACH(dp) {
   2350 		if (len >= elem_size && elem_count > 0) {
   2351 #define BPF_EXT(field)	dpe.bde_ ## field = dp->bd_ ## field
   2352 			BPF_EXT(bufsize);
   2353 			BPF_EXT(promisc);
   2354 			BPF_EXT(state);
   2355 			BPF_EXT(immediate);
   2356 			BPF_EXT(hdrcmplt);
   2357 			BPF_EXT(direction);
   2358 			BPF_EXT(pid);
   2359 			BPF_EXT(rcount);
   2360 			BPF_EXT(dcount);
   2361 			BPF_EXT(ccount);
   2362 #undef BPF_EXT
   2363 			mutex_enter(dp->bd_mtx);
   2364 			if (dp->bd_bif)
   2365 				(void)strlcpy(dpe.bde_ifname,
   2366 				    dp->bd_bif->bif_ifp->if_xname,
   2367 				    IFNAMSIZ - 1);
   2368 			else
   2369 				dpe.bde_ifname[0] = '\0';
   2370 			mutex_exit(dp->bd_mtx);
   2371 
   2372 			error = copyout(&dpe, sp, out_size);
   2373 			if (error)
   2374 				break;
   2375 			sp += elem_size;
   2376 			len -= elem_size;
   2377 		}
   2378 		needed += elem_size;
   2379 		if (elem_count > 0 && elem_count != INT_MAX)
   2380 			elem_count--;
   2381 	}
   2382 	mutex_exit(&bpf_mtx);
   2383 
   2384 	*oldlenp = needed;
   2385 
   2386 	return (error);
   2387 }
   2388 
   2389 static void
   2390 bpf_stats(void *p, void *arg, struct cpu_info *ci __unused)
   2391 {
   2392 	struct bpf_stat *const stats = p;
   2393 	struct bpf_stat *sum = arg;
   2394 
   2395 	sum->bs_recv += stats->bs_recv;
   2396 	sum->bs_drop += stats->bs_drop;
   2397 	sum->bs_capt += stats->bs_capt;
   2398 }
   2399 
   2400 static int
   2401 bpf_sysctl_gstats_handler(SYSCTLFN_ARGS)
   2402 {
   2403 	struct sysctlnode node;
   2404 	int error;
   2405 	struct bpf_stat sum;
   2406 
   2407 	memset(&sum, 0, sizeof(sum));
   2408 	node = *rnode;
   2409 
   2410 	percpu_foreach(bpf_gstats_percpu, bpf_stats, &sum);
   2411 
   2412 	node.sysctl_data = &sum;
   2413 	node.sysctl_size = sizeof(sum);
   2414 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   2415 	if (error != 0 || newp == NULL)
   2416 		return error;
   2417 
   2418 	return 0;
   2419 }
   2420 
   2421 static struct sysctllog *bpf_sysctllog;
   2422 static void
   2423 sysctl_net_bpf_setup(void)
   2424 {
   2425 	const struct sysctlnode *node;
   2426 
   2427 	node = NULL;
   2428 	sysctl_createv(&bpf_sysctllog, 0, NULL, &node,
   2429 		       CTLFLAG_PERMANENT,
   2430 		       CTLTYPE_NODE, "bpf",
   2431 		       SYSCTL_DESCR("BPF options"),
   2432 		       NULL, 0, NULL, 0,
   2433 		       CTL_NET, CTL_CREATE, CTL_EOL);
   2434 	if (node != NULL) {
   2435 #if defined(MODULAR) || defined(BPFJIT)
   2436 		sysctl_createv(&bpf_sysctllog, 0, NULL, NULL,
   2437 			CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
   2438 			CTLTYPE_BOOL, "jit",
   2439 			SYSCTL_DESCR("Toggle Just-In-Time compilation"),
   2440 			sysctl_net_bpf_jit, 0, &bpf_jit, 0,
   2441 			CTL_NET, node->sysctl_num, CTL_CREATE, CTL_EOL);
   2442 #endif
   2443 		sysctl_createv(&bpf_sysctllog, 0, NULL, NULL,
   2444 			CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
   2445 			CTLTYPE_INT, "maxbufsize",
   2446 			SYSCTL_DESCR("Maximum size for data capture buffer"),
   2447 			sysctl_net_bpf_maxbufsize, 0, &bpf_maxbufsize, 0,
   2448 			CTL_NET, node->sysctl_num, CTL_CREATE, CTL_EOL);
   2449 		sysctl_createv(&bpf_sysctllog, 0, NULL, NULL,
   2450 			CTLFLAG_PERMANENT,
   2451 			CTLTYPE_STRUCT, "stats",
   2452 			SYSCTL_DESCR("BPF stats"),
   2453 			bpf_sysctl_gstats_handler, 0, NULL, 0,
   2454 			CTL_NET, node->sysctl_num, CTL_CREATE, CTL_EOL);
   2455 		sysctl_createv(&bpf_sysctllog, 0, NULL, NULL,
   2456 			CTLFLAG_PERMANENT,
   2457 			CTLTYPE_STRUCT, "peers",
   2458 			SYSCTL_DESCR("BPF peers"),
   2459 			sysctl_net_bpf_peers, 0, NULL, 0,
   2460 			CTL_NET, node->sysctl_num, CTL_CREATE, CTL_EOL);
   2461 	}
   2462 
   2463 }
   2464 
   2465 struct bpf_ops bpf_ops_kernel = {
   2466 	.bpf_attach =		_bpfattach,
   2467 	.bpf_detach =		_bpfdetach,
   2468 	.bpf_change_type =	_bpf_change_type,
   2469 
   2470 	.bpf_mtap =		_bpf_mtap,
   2471 	.bpf_mtap2 =		_bpf_mtap2,
   2472 	.bpf_mtap_af =		_bpf_mtap_af,
   2473 	.bpf_mtap_sl_in =	_bpf_mtap_sl_in,
   2474 	.bpf_mtap_sl_out =	_bpf_mtap_sl_out,
   2475 
   2476 	.bpf_mtap_softint =		_bpf_mtap_softint,
   2477 	.bpf_mtap_softint_init =	_bpf_mtap_softint_init,
   2478 };
   2479 
   2480 MODULE(MODULE_CLASS_DRIVER, bpf, "bpf_filter");
   2481 
   2482 static int
   2483 bpf_modcmd(modcmd_t cmd, void *arg)
   2484 {
   2485 #ifdef _MODULE
   2486 	devmajor_t bmajor, cmajor;
   2487 #endif
   2488 	int error = 0;
   2489 
   2490 	switch (cmd) {
   2491 	case MODULE_CMD_INIT:
   2492 		bpf_init();
   2493 #ifdef _MODULE
   2494 		bmajor = cmajor = NODEVMAJOR;
   2495 		error = devsw_attach("bpf", NULL, &bmajor,
   2496 		    &bpf_cdevsw, &cmajor);
   2497 		if (error)
   2498 			break;
   2499 #endif
   2500 
   2501 		bpf_ops_handover_enter(&bpf_ops_kernel);
   2502 		atomic_swap_ptr(&bpf_ops, &bpf_ops_kernel);
   2503 		bpf_ops_handover_exit();
   2504 		sysctl_net_bpf_setup();
   2505 		break;
   2506 
   2507 	case MODULE_CMD_FINI:
   2508 		/*
   2509 		 * While there is no reference counting for bpf callers,
   2510 		 * unload could at least in theory be done similarly to
   2511 		 * system call disestablishment.  This should even be
   2512 		 * a little simpler:
   2513 		 *
   2514 		 * 1) replace op vector with stubs
   2515 		 * 2) post update to all cpus with xc
   2516 		 * 3) check that nobody is in bpf anymore
   2517 		 *    (it's doubtful we'd want something like l_sysent,
   2518 		 *     but we could do something like *signed* percpu
   2519 		 *     counters.  if the sum is 0, we're good).
   2520 		 * 4) if fail, unroll changes
   2521 		 *
   2522 		 * NOTE: change won't be atomic to the outside.  some
   2523 		 * packets may be not captured even if unload is
   2524 		 * not succesful.  I think packet capture not working
   2525 		 * is a perfectly logical consequence of trying to
   2526 		 * disable packet capture.
   2527 		 */
   2528 		error = EOPNOTSUPP;
   2529 		/* insert sysctl teardown */
   2530 		break;
   2531 
   2532 	default:
   2533 		error = ENOTTY;
   2534 		break;
   2535 	}
   2536 
   2537 	return error;
   2538 }
   2539