Home | History | Annotate | Line # | Download | only in net
bpf.c revision 1.216.6.5
      1 /*	$NetBSD: bpf.c,v 1.216.6.5 2018/02/05 14:18:00 martin Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1990, 1991, 1993
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * This code is derived from the Stanford/CMU enet packet filter,
      8  * (net/enet.c) distributed as part of 4.3BSD, and code contributed
      9  * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
     10  * Berkeley Laboratory.
     11  *
     12  * Redistribution and use in source and binary forms, with or without
     13  * modification, are permitted provided that the following conditions
     14  * are met:
     15  * 1. Redistributions of source code must retain the above copyright
     16  *    notice, this list of conditions and the following disclaimer.
     17  * 2. Redistributions in binary form must reproduce the above copyright
     18  *    notice, this list of conditions and the following disclaimer in the
     19  *    documentation and/or other materials provided with the distribution.
     20  * 3. Neither the name of the University nor the names of its contributors
     21  *    may be used to endorse or promote products derived from this software
     22  *    without specific prior written permission.
     23  *
     24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     34  * SUCH DAMAGE.
     35  *
     36  *	@(#)bpf.c	8.4 (Berkeley) 1/9/95
     37  * static char rcsid[] =
     38  * "Header: bpf.c,v 1.67 96/09/26 22:00:52 leres Exp ";
     39  */
     40 
     41 #include <sys/cdefs.h>
     42 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.216.6.5 2018/02/05 14:18:00 martin Exp $");
     43 
     44 #if defined(_KERNEL_OPT)
     45 #include "opt_bpf.h"
     46 #include "sl.h"
     47 #include "strip.h"
     48 #include "opt_net_mpsafe.h"
     49 #endif
     50 
     51 #include <sys/param.h>
     52 #include <sys/systm.h>
     53 #include <sys/mbuf.h>
     54 #include <sys/buf.h>
     55 #include <sys/time.h>
     56 #include <sys/proc.h>
     57 #include <sys/ioctl.h>
     58 #include <sys/conf.h>
     59 #include <sys/vnode.h>
     60 #include <sys/queue.h>
     61 #include <sys/stat.h>
     62 #include <sys/module.h>
     63 #include <sys/atomic.h>
     64 #include <sys/cpu.h>
     65 
     66 #include <sys/file.h>
     67 #include <sys/filedesc.h>
     68 #include <sys/tty.h>
     69 #include <sys/uio.h>
     70 
     71 #include <sys/protosw.h>
     72 #include <sys/socket.h>
     73 #include <sys/errno.h>
     74 #include <sys/kernel.h>
     75 #include <sys/poll.h>
     76 #include <sys/sysctl.h>
     77 #include <sys/kauth.h>
     78 #include <sys/syslog.h>
     79 #include <sys/percpu.h>
     80 #include <sys/pserialize.h>
     81 #include <sys/lwp.h>
     82 
     83 #include <net/if.h>
     84 #include <net/slip.h>
     85 
     86 #include <net/bpf.h>
     87 #include <net/bpfdesc.h>
     88 #include <net/bpfjit.h>
     89 
     90 #include <net/if_arc.h>
     91 #include <net/if_ether.h>
     92 
     93 #include <netinet/in.h>
     94 #include <netinet/if_inarp.h>
     95 
     96 
     97 #include <compat/sys/sockio.h>
     98 
     99 #ifndef BPF_BUFSIZE
    100 /*
    101  * 4096 is too small for FDDI frames. 8192 is too small for gigabit Ethernet
    102  * jumbos (circa 9k), ATM, or Intel gig/10gig ethernet jumbos (16k).
    103  */
    104 # define BPF_BUFSIZE 32768
    105 #endif
    106 
    107 #define PRINET  26			/* interruptible */
    108 
    109 /*
    110  * The default read buffer size, and limit for BIOCSBLEN, is sysctl'able.
    111  * XXX the default values should be computed dynamically based
    112  * on available memory size and available mbuf clusters.
    113  */
    114 static int bpf_bufsize = BPF_BUFSIZE;
    115 static int bpf_maxbufsize = BPF_DFLTBUFSIZE;	/* XXX set dynamically, see above */
    116 static bool bpf_jit = false;
    117 
    118 struct bpfjit_ops bpfjit_module_ops = {
    119 	.bj_generate_code = NULL,
    120 	.bj_free_code = NULL
    121 };
    122 
    123 /*
    124  * Global BPF statistics returned by net.bpf.stats sysctl.
    125  */
    126 static struct percpu	*bpf_gstats_percpu; /* struct bpf_stat */
    127 
    128 #define BPF_STATINC(id)					\
    129 	{						\
    130 		struct bpf_stat *__stats =		\
    131 		    percpu_getref(bpf_gstats_percpu);	\
    132 		__stats->bs_##id++;			\
    133 		percpu_putref(bpf_gstats_percpu);	\
    134 	}
    135 
    136 /*
    137  * Locking notes:
    138  * - bpf_mtx (adaptive mutex) protects:
    139  *   - Gobal lists: bpf_iflist and bpf_dlist
    140  *   - struct bpf_if
    141  *   - bpf_close
    142  *   - bpf_psz (pserialize)
    143  * - struct bpf_d has two mutexes:
    144  *   - bd_buf_mtx (spin mutex) protects the buffers that can be accessed
    145  *     on packet tapping
    146  *   - bd_mtx (adaptive mutex) protects member variables other than the buffers
    147  * - Locking order: bpf_mtx => bpf_d#bd_mtx => bpf_d#bd_buf_mtx
    148  * - struct bpf_d obtained via fp->f_bpf in bpf_read and bpf_write is
    149  *   never freed because struct bpf_d is only freed in bpf_close and
    150  *   bpf_close never be called while executing bpf_read and bpf_write
    151  * - A filter that is assigned to bpf_d can be replaced with another filter
    152  *   while tapping packets, so it needs to be done atomically
    153  * - struct bpf_d is iterated on bpf_dlist with psz
    154  * - struct bpf_if is iterated on bpf_iflist with psz or psref
    155  */
    156 /*
    157  * Use a mutex to avoid a race condition between gathering the stats/peers
    158  * and opening/closing the device.
    159  */
    160 static kmutex_t bpf_mtx;
    161 
    162 static struct psref_class	*bpf_psref_class __read_mostly;
    163 static pserialize_t		bpf_psz;
    164 
    165 static inline void
    166 bpf_if_acquire(struct bpf_if *bp, struct psref *psref)
    167 {
    168 
    169 	psref_acquire(psref, &bp->bif_psref, bpf_psref_class);
    170 }
    171 
    172 static inline void
    173 bpf_if_release(struct bpf_if *bp, struct psref *psref)
    174 {
    175 
    176 	psref_release(psref, &bp->bif_psref, bpf_psref_class);
    177 }
    178 
    179 /*
    180  *  bpf_iflist is the list of interfaces; each corresponds to an ifnet
    181  *  bpf_dtab holds the descriptors, indexed by minor device #
    182  */
    183 static struct pslist_head bpf_iflist;
    184 static struct pslist_head bpf_dlist;
    185 
    186 /* Macros for bpf_d on bpf_dlist */
    187 #define BPF_DLIST_WRITER_INSERT_HEAD(__d)				\
    188 	PSLIST_WRITER_INSERT_HEAD(&bpf_dlist, (__d), bd_bpf_dlist_entry)
    189 #define BPF_DLIST_READER_FOREACH(__d)					\
    190 	PSLIST_READER_FOREACH((__d), &bpf_dlist, struct bpf_d,		\
    191 	                      bd_bpf_dlist_entry)
    192 #define BPF_DLIST_WRITER_FOREACH(__d)					\
    193 	PSLIST_WRITER_FOREACH((__d), &bpf_dlist, struct bpf_d,		\
    194 	                      bd_bpf_dlist_entry)
    195 #define BPF_DLIST_ENTRY_INIT(__d)					\
    196 	PSLIST_ENTRY_INIT((__d), bd_bpf_dlist_entry)
    197 #define BPF_DLIST_WRITER_REMOVE(__d)					\
    198 	PSLIST_WRITER_REMOVE((__d), bd_bpf_dlist_entry)
    199 #define BPF_DLIST_ENTRY_DESTROY(__d)					\
    200 	PSLIST_ENTRY_DESTROY((__d), bd_bpf_dlist_entry)
    201 
    202 /* Macros for bpf_if on bpf_iflist */
    203 #define BPF_IFLIST_WRITER_INSERT_HEAD(__bp)				\
    204 	PSLIST_WRITER_INSERT_HEAD(&bpf_iflist, (__bp), bif_iflist_entry)
    205 #define BPF_IFLIST_READER_FOREACH(__bp)					\
    206 	PSLIST_READER_FOREACH((__bp), &bpf_iflist, struct bpf_if,	\
    207 	                      bif_iflist_entry)
    208 #define BPF_IFLIST_WRITER_FOREACH(__bp)					\
    209 	PSLIST_WRITER_FOREACH((__bp), &bpf_iflist, struct bpf_if,	\
    210 	                      bif_iflist_entry)
    211 #define BPF_IFLIST_WRITER_REMOVE(__bp)					\
    212 	PSLIST_WRITER_REMOVE((__bp), bif_iflist_entry)
    213 #define BPF_IFLIST_ENTRY_INIT(__bp)					\
    214 	PSLIST_ENTRY_INIT((__bp), bif_iflist_entry)
    215 #define BPF_IFLIST_ENTRY_DESTROY(__bp)					\
    216 	PSLIST_ENTRY_DESTROY((__bp), bif_iflist_entry)
    217 
    218 /* Macros for bpf_d on bpf_if#bif_dlist_pslist */
    219 #define BPFIF_DLIST_READER_FOREACH(__d, __bp)				\
    220 	PSLIST_READER_FOREACH((__d), &(__bp)->bif_dlist_head, struct bpf_d, \
    221 	                      bd_bif_dlist_entry)
    222 #define BPFIF_DLIST_WRITER_INSERT_HEAD(__bp, __d)			\
    223 	PSLIST_WRITER_INSERT_HEAD(&(__bp)->bif_dlist_head, (__d),	\
    224 	                          bd_bif_dlist_entry)
    225 #define BPFIF_DLIST_WRITER_REMOVE(__d)					\
    226 	PSLIST_WRITER_REMOVE((__d), bd_bif_dlist_entry)
    227 #define BPFIF_DLIST_ENTRY_INIT(__d)					\
    228 	PSLIST_ENTRY_INIT((__d), bd_bif_dlist_entry)
    229 #define	BPFIF_DLIST_READER_EMPTY(__bp)					\
    230 	(PSLIST_READER_FIRST(&(__bp)->bif_dlist_head, struct bpf_d,	\
    231 	                     bd_bif_dlist_entry) == NULL)
    232 #define	BPFIF_DLIST_WRITER_EMPTY(__bp)					\
    233 	(PSLIST_WRITER_FIRST(&(__bp)->bif_dlist_head, struct bpf_d,	\
    234 	                     bd_bif_dlist_entry) == NULL)
    235 #define BPFIF_DLIST_ENTRY_DESTROY(__d)					\
    236 	PSLIST_ENTRY_DESTROY((__d), bd_bif_dlist_entry)
    237 
    238 static int	bpf_allocbufs(struct bpf_d *);
    239 static void	bpf_deliver(struct bpf_if *,
    240 		            void *(*cpfn)(void *, const void *, size_t),
    241 		            void *, u_int, u_int, const bool);
    242 static void	bpf_freed(struct bpf_d *);
    243 static void	bpf_free_filter(struct bpf_filter *);
    244 static void	bpf_ifname(struct ifnet *, struct ifreq *);
    245 static void	*bpf_mcpy(void *, const void *, size_t);
    246 static int	bpf_movein(struct uio *, int, uint64_t,
    247 			        struct mbuf **, struct sockaddr *);
    248 static void	bpf_attachd(struct bpf_d *, struct bpf_if *);
    249 static void	bpf_detachd(struct bpf_d *);
    250 static int	bpf_setif(struct bpf_d *, struct ifreq *);
    251 static int	bpf_setf(struct bpf_d *, struct bpf_program *);
    252 static void	bpf_timed_out(void *);
    253 static inline void
    254 		bpf_wakeup(struct bpf_d *);
    255 static int	bpf_hdrlen(struct bpf_d *);
    256 static void	catchpacket(struct bpf_d *, u_char *, u_int, u_int,
    257     void *(*)(void *, const void *, size_t), struct timespec *);
    258 static void	reset_d(struct bpf_d *);
    259 static int	bpf_getdltlist(struct bpf_d *, struct bpf_dltlist *);
    260 static int	bpf_setdlt(struct bpf_d *, u_int);
    261 
    262 static int	bpf_read(struct file *, off_t *, struct uio *, kauth_cred_t,
    263     int);
    264 static int	bpf_write(struct file *, off_t *, struct uio *, kauth_cred_t,
    265     int);
    266 static int	bpf_ioctl(struct file *, u_long, void *);
    267 static int	bpf_poll(struct file *, int);
    268 static int	bpf_stat(struct file *, struct stat *);
    269 static int	bpf_close(struct file *);
    270 static int	bpf_kqfilter(struct file *, struct knote *);
    271 
    272 static const struct fileops bpf_fileops = {
    273 	.fo_read = bpf_read,
    274 	.fo_write = bpf_write,
    275 	.fo_ioctl = bpf_ioctl,
    276 	.fo_fcntl = fnullop_fcntl,
    277 	.fo_poll = bpf_poll,
    278 	.fo_stat = bpf_stat,
    279 	.fo_close = bpf_close,
    280 	.fo_kqfilter = bpf_kqfilter,
    281 	.fo_restart = fnullop_restart,
    282 };
    283 
    284 dev_type_open(bpfopen);
    285 
    286 const struct cdevsw bpf_cdevsw = {
    287 	.d_open = bpfopen,
    288 	.d_close = noclose,
    289 	.d_read = noread,
    290 	.d_write = nowrite,
    291 	.d_ioctl = noioctl,
    292 	.d_stop = nostop,
    293 	.d_tty = notty,
    294 	.d_poll = nopoll,
    295 	.d_mmap = nommap,
    296 	.d_kqfilter = nokqfilter,
    297 	.d_discard = nodiscard,
    298 	.d_flag = D_OTHER | D_MPSAFE
    299 };
    300 
    301 bpfjit_func_t
    302 bpf_jit_generate(bpf_ctx_t *bc, void *code, size_t size)
    303 {
    304 
    305 	membar_consumer();
    306 	if (bpfjit_module_ops.bj_generate_code != NULL) {
    307 		return bpfjit_module_ops.bj_generate_code(bc, code, size);
    308 	}
    309 	return NULL;
    310 }
    311 
    312 void
    313 bpf_jit_freecode(bpfjit_func_t jcode)
    314 {
    315 	KASSERT(bpfjit_module_ops.bj_free_code != NULL);
    316 	bpfjit_module_ops.bj_free_code(jcode);
    317 }
    318 
    319 static int
    320 bpf_movein(struct uio *uio, int linktype, uint64_t mtu, struct mbuf **mp,
    321 	   struct sockaddr *sockp)
    322 {
    323 	struct mbuf *m;
    324 	int error;
    325 	size_t len;
    326 	size_t hlen;
    327 	size_t align;
    328 
    329 	/*
    330 	 * Build a sockaddr based on the data link layer type.
    331 	 * We do this at this level because the ethernet header
    332 	 * is copied directly into the data field of the sockaddr.
    333 	 * In the case of SLIP, there is no header and the packet
    334 	 * is forwarded as is.
    335 	 * Also, we are careful to leave room at the front of the mbuf
    336 	 * for the link level header.
    337 	 */
    338 	switch (linktype) {
    339 
    340 	case DLT_SLIP:
    341 		sockp->sa_family = AF_INET;
    342 		hlen = 0;
    343 		align = 0;
    344 		break;
    345 
    346 	case DLT_PPP:
    347 		sockp->sa_family = AF_UNSPEC;
    348 		hlen = 0;
    349 		align = 0;
    350 		break;
    351 
    352 	case DLT_EN10MB:
    353 		sockp->sa_family = AF_UNSPEC;
    354 		/* XXX Would MAXLINKHDR be better? */
    355  		/* 6(dst)+6(src)+2(type) */
    356 		hlen = sizeof(struct ether_header);
    357 		align = 2;
    358 		break;
    359 
    360 	case DLT_ARCNET:
    361 		sockp->sa_family = AF_UNSPEC;
    362 		hlen = ARC_HDRLEN;
    363 		align = 5;
    364 		break;
    365 
    366 	case DLT_FDDI:
    367 		sockp->sa_family = AF_LINK;
    368 		/* XXX 4(FORMAC)+6(dst)+6(src) */
    369 		hlen = 16;
    370 		align = 0;
    371 		break;
    372 
    373 	case DLT_ECONET:
    374 		sockp->sa_family = AF_UNSPEC;
    375 		hlen = 6;
    376 		align = 2;
    377 		break;
    378 
    379 	case DLT_NULL:
    380 		sockp->sa_family = AF_UNSPEC;
    381 		hlen = 0;
    382 		align = 0;
    383 		break;
    384 
    385 	default:
    386 		return (EIO);
    387 	}
    388 
    389 	len = uio->uio_resid;
    390 	/*
    391 	 * If there aren't enough bytes for a link level header or the
    392 	 * packet length exceeds the interface mtu, return an error.
    393 	 */
    394 	if (len - hlen > mtu)
    395 		return (EMSGSIZE);
    396 
    397 	/*
    398 	 * XXX Avoid complicated buffer chaining ---
    399 	 * bail if it won't fit in a single mbuf.
    400 	 * (Take into account possible alignment bytes)
    401 	 */
    402 	if (len + align > MCLBYTES)
    403 		return (EIO);
    404 
    405 	m = m_gethdr(M_WAIT, MT_DATA);
    406 	m_reset_rcvif(m);
    407 	m->m_pkthdr.len = (int)(len - hlen);
    408 	if (len + align > MHLEN) {
    409 		m_clget(m, M_WAIT);
    410 		if ((m->m_flags & M_EXT) == 0) {
    411 			error = ENOBUFS;
    412 			goto bad;
    413 		}
    414 	}
    415 
    416 	/* Insure the data is properly aligned */
    417 	if (align > 0) {
    418 		m->m_data += align;
    419 		m->m_len -= (int)align;
    420 	}
    421 
    422 	error = uiomove(mtod(m, void *), len, uio);
    423 	if (error)
    424 		goto bad;
    425 	if (hlen != 0) {
    426 		memcpy(sockp->sa_data, mtod(m, void *), hlen);
    427 		m->m_data += hlen; /* XXX */
    428 		len -= hlen;
    429 	}
    430 	m->m_len = (int)len;
    431 	*mp = m;
    432 	return (0);
    433 
    434 bad:
    435 	m_freem(m);
    436 	return (error);
    437 }
    438 
    439 /*
    440  * Attach file to the bpf interface, i.e. make d listen on bp.
    441  */
    442 static void
    443 bpf_attachd(struct bpf_d *d, struct bpf_if *bp)
    444 {
    445 
    446 	KASSERT(mutex_owned(&bpf_mtx));
    447 	KASSERT(mutex_owned(d->bd_mtx));
    448 	/*
    449 	 * Point d at bp, and add d to the interface's list of listeners.
    450 	 * Finally, point the driver's bpf cookie at the interface so
    451 	 * it will divert packets to bpf.
    452 	 */
    453 	d->bd_bif = bp;
    454 	BPFIF_DLIST_WRITER_INSERT_HEAD(bp, d);
    455 
    456 	*bp->bif_driverp = bp;
    457 }
    458 
    459 /*
    460  * Detach a file from its interface.
    461  */
    462 static void
    463 bpf_detachd(struct bpf_d *d)
    464 {
    465 	struct bpf_if *bp;
    466 
    467 	KASSERT(mutex_owned(&bpf_mtx));
    468 	KASSERT(mutex_owned(d->bd_mtx));
    469 
    470 	bp = d->bd_bif;
    471 	/*
    472 	 * Check if this descriptor had requested promiscuous mode.
    473 	 * If so, turn it off.
    474 	 */
    475 	if (d->bd_promisc) {
    476 		int error __diagused;
    477 
    478 		d->bd_promisc = 0;
    479 		/*
    480 		 * Take device out of promiscuous mode.  Since we were
    481 		 * able to enter promiscuous mode, we should be able
    482 		 * to turn it off.  But we can get an error if
    483 		 * the interface was configured down, so only panic
    484 		 * if we don't get an unexpected error.
    485 		 */
    486 		KERNEL_LOCK_UNLESS_NET_MPSAFE();
    487   		error = ifpromisc(bp->bif_ifp, 0);
    488 		KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
    489 #ifdef DIAGNOSTIC
    490 		if (error)
    491 			printf("%s: ifpromisc failed: %d", __func__, error);
    492 #endif
    493 	}
    494 
    495 	/* Remove d from the interface's descriptor list. */
    496 	BPFIF_DLIST_WRITER_REMOVE(d);
    497 
    498 	pserialize_perform(bpf_psz);
    499 
    500 	if (BPFIF_DLIST_WRITER_EMPTY(bp)) {
    501 		/*
    502 		 * Let the driver know that there are no more listeners.
    503 		 */
    504 		*d->bd_bif->bif_driverp = NULL;
    505 	}
    506 	d->bd_bif = NULL;
    507 }
    508 
    509 static void
    510 bpf_init(void)
    511 {
    512 
    513 	mutex_init(&bpf_mtx, MUTEX_DEFAULT, IPL_NONE);
    514 	bpf_psz = pserialize_create();
    515 	bpf_psref_class = psref_class_create("bpf", IPL_SOFTNET);
    516 
    517 	PSLIST_INIT(&bpf_iflist);
    518 	PSLIST_INIT(&bpf_dlist);
    519 
    520 	bpf_gstats_percpu = percpu_alloc(sizeof(struct bpf_stat));
    521 
    522 	return;
    523 }
    524 
    525 /*
    526  * bpfilterattach() is called at boot time.  We don't need to do anything
    527  * here, since any initialization will happen as part of module init code.
    528  */
    529 /* ARGSUSED */
    530 void
    531 bpfilterattach(int n)
    532 {
    533 
    534 }
    535 
    536 /*
    537  * Open ethernet device. Clones.
    538  */
    539 /* ARGSUSED */
    540 int
    541 bpfopen(dev_t dev, int flag, int mode, struct lwp *l)
    542 {
    543 	struct bpf_d *d;
    544 	struct file *fp;
    545 	int error, fd;
    546 
    547 	/* falloc() will fill in the descriptor for us. */
    548 	if ((error = fd_allocfile(&fp, &fd)) != 0)
    549 		return error;
    550 
    551 	d = kmem_zalloc(sizeof(*d), KM_SLEEP);
    552 	d->bd_bufsize = bpf_bufsize;
    553 	d->bd_seesent = 1;
    554 	d->bd_feedback = 0;
    555 	d->bd_pid = l->l_proc->p_pid;
    556 #ifdef _LP64
    557 	if (curproc->p_flag & PK_32)
    558 		d->bd_compat32 = 1;
    559 #endif
    560 	getnanotime(&d->bd_btime);
    561 	d->bd_atime = d->bd_mtime = d->bd_btime;
    562 	callout_init(&d->bd_callout, CALLOUT_MPSAFE);
    563 	selinit(&d->bd_sel);
    564 	d->bd_jitcode = NULL;
    565 	d->bd_filter = NULL;
    566 	BPF_DLIST_ENTRY_INIT(d);
    567 	BPFIF_DLIST_ENTRY_INIT(d);
    568 	d->bd_mtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET);
    569 	d->bd_buf_mtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
    570 	cv_init(&d->bd_cv, "bpf");
    571 
    572 	mutex_enter(&bpf_mtx);
    573 	BPF_DLIST_WRITER_INSERT_HEAD(d);
    574 	mutex_exit(&bpf_mtx);
    575 
    576 	return fd_clone(fp, fd, flag, &bpf_fileops, d);
    577 }
    578 
    579 /*
    580  * Close the descriptor by detaching it from its interface,
    581  * deallocating its buffers, and marking it free.
    582  */
    583 /* ARGSUSED */
    584 static int
    585 bpf_close(struct file *fp)
    586 {
    587 	struct bpf_d *d;
    588 
    589 	mutex_enter(&bpf_mtx);
    590 
    591 	if ((d = fp->f_bpf) == NULL) {
    592 		mutex_exit(&bpf_mtx);
    593 		return 0;
    594 	}
    595 
    596 	/*
    597 	 * Refresh the PID associated with this bpf file.
    598 	 */
    599 	d->bd_pid = curproc->p_pid;
    600 
    601 	mutex_enter(d->bd_mtx);
    602 	if (d->bd_state == BPF_WAITING)
    603 		callout_halt(&d->bd_callout, d->bd_mtx);
    604 	d->bd_state = BPF_IDLE;
    605 	if (d->bd_bif)
    606 		bpf_detachd(d);
    607 	mutex_exit(d->bd_mtx);
    608 
    609 	BPF_DLIST_WRITER_REMOVE(d);
    610 
    611 	pserialize_perform(bpf_psz);
    612 	mutex_exit(&bpf_mtx);
    613 
    614 	BPFIF_DLIST_ENTRY_DESTROY(d);
    615 	BPF_DLIST_ENTRY_DESTROY(d);
    616 	fp->f_bpf = NULL;
    617 	bpf_freed(d);
    618 	callout_destroy(&d->bd_callout);
    619 	seldestroy(&d->bd_sel);
    620 	mutex_obj_free(d->bd_mtx);
    621 	mutex_obj_free(d->bd_buf_mtx);
    622 	cv_destroy(&d->bd_cv);
    623 
    624 	kmem_free(d, sizeof(*d));
    625 
    626 	return (0);
    627 }
    628 
    629 /*
    630  * Rotate the packet buffers in descriptor d.  Move the store buffer
    631  * into the hold slot, and the free buffer into the store slot.
    632  * Zero the length of the new store buffer.
    633  */
    634 #define ROTATE_BUFFERS(d) \
    635 	(d)->bd_hbuf = (d)->bd_sbuf; \
    636 	(d)->bd_hlen = (d)->bd_slen; \
    637 	(d)->bd_sbuf = (d)->bd_fbuf; \
    638 	(d)->bd_slen = 0; \
    639 	(d)->bd_fbuf = NULL;
    640 /*
    641  *  bpfread - read next chunk of packets from buffers
    642  */
    643 static int
    644 bpf_read(struct file *fp, off_t *offp, struct uio *uio,
    645     kauth_cred_t cred, int flags)
    646 {
    647 	struct bpf_d *d = fp->f_bpf;
    648 	int timed_out;
    649 	int error;
    650 
    651 	getnanotime(&d->bd_atime);
    652 	/*
    653 	 * Restrict application to use a buffer the same size as
    654 	 * the kernel buffers.
    655 	 */
    656 	if (uio->uio_resid != d->bd_bufsize)
    657 		return (EINVAL);
    658 
    659 	mutex_enter(d->bd_mtx);
    660 	if (d->bd_state == BPF_WAITING)
    661 		callout_halt(&d->bd_callout, d->bd_mtx);
    662 	timed_out = (d->bd_state == BPF_TIMED_OUT);
    663 	d->bd_state = BPF_IDLE;
    664 	mutex_exit(d->bd_mtx);
    665 	/*
    666 	 * If the hold buffer is empty, then do a timed sleep, which
    667 	 * ends when the timeout expires or when enough packets
    668 	 * have arrived to fill the store buffer.
    669 	 */
    670 	mutex_enter(d->bd_buf_mtx);
    671 	while (d->bd_hbuf == NULL) {
    672 		if (fp->f_flag & FNONBLOCK) {
    673 			if (d->bd_slen == 0) {
    674 				error = EWOULDBLOCK;
    675 				goto out;
    676 			}
    677 			ROTATE_BUFFERS(d);
    678 			break;
    679 		}
    680 
    681 		if ((d->bd_immediate || timed_out) && d->bd_slen != 0) {
    682 			/*
    683 			 * A packet(s) either arrived since the previous
    684 			 * read or arrived while we were asleep.
    685 			 * Rotate the buffers and return what's here.
    686 			 */
    687 			ROTATE_BUFFERS(d);
    688 			break;
    689 		}
    690 
    691 		error = cv_timedwait_sig(&d->bd_cv, d->bd_buf_mtx, d->bd_rtout);
    692 
    693 		if (error == EINTR || error == ERESTART)
    694 			goto out;
    695 
    696 		if (error == EWOULDBLOCK) {
    697 			/*
    698 			 * On a timeout, return what's in the buffer,
    699 			 * which may be nothing.  If there is something
    700 			 * in the store buffer, we can rotate the buffers.
    701 			 */
    702 			if (d->bd_hbuf)
    703 				/*
    704 				 * We filled up the buffer in between
    705 				 * getting the timeout and arriving
    706 				 * here, so we don't need to rotate.
    707 				 */
    708 				break;
    709 
    710 			if (d->bd_slen == 0) {
    711 				error = 0;
    712 				goto out;
    713 			}
    714 			ROTATE_BUFFERS(d);
    715 			break;
    716 		}
    717 		if (error != 0)
    718 			goto out;
    719 	}
    720 	/*
    721 	 * At this point, we know we have something in the hold slot.
    722 	 */
    723 	mutex_exit(d->bd_buf_mtx);
    724 
    725 	/*
    726 	 * Move data from hold buffer into user space.
    727 	 * We know the entire buffer is transferred since
    728 	 * we checked above that the read buffer is bpf_bufsize bytes.
    729 	 */
    730 	error = uiomove(d->bd_hbuf, d->bd_hlen, uio);
    731 
    732 	mutex_enter(d->bd_buf_mtx);
    733 	d->bd_fbuf = d->bd_hbuf;
    734 	d->bd_hbuf = NULL;
    735 	d->bd_hlen = 0;
    736 out:
    737 	mutex_exit(d->bd_buf_mtx);
    738 	return (error);
    739 }
    740 
    741 
    742 /*
    743  * If there are processes sleeping on this descriptor, wake them up.
    744  */
    745 static inline void
    746 bpf_wakeup(struct bpf_d *d)
    747 {
    748 
    749 	mutex_enter(d->bd_buf_mtx);
    750 	cv_broadcast(&d->bd_cv);
    751 	mutex_exit(d->bd_buf_mtx);
    752 
    753 	if (d->bd_async)
    754 		fownsignal(d->bd_pgid, SIGIO, 0, 0, NULL);
    755 	selnotify(&d->bd_sel, 0, 0);
    756 }
    757 
    758 static void
    759 bpf_timed_out(void *arg)
    760 {
    761 	struct bpf_d *d = arg;
    762 
    763 	mutex_enter(d->bd_mtx);
    764 	if (d->bd_state == BPF_WAITING) {
    765 		d->bd_state = BPF_TIMED_OUT;
    766 		if (d->bd_slen != 0)
    767 			bpf_wakeup(d);
    768 	}
    769 	mutex_exit(d->bd_mtx);
    770 }
    771 
    772 
    773 static int
    774 bpf_write(struct file *fp, off_t *offp, struct uio *uio,
    775     kauth_cred_t cred, int flags)
    776 {
    777 	struct bpf_d *d = fp->f_bpf;
    778 	struct bpf_if *bp;
    779 	struct ifnet *ifp;
    780 	struct mbuf *m, *mc;
    781 	int error;
    782 	static struct sockaddr_storage dst;
    783 	struct psref psref;
    784 	int bound;
    785 
    786 	m = NULL;	/* XXX gcc */
    787 
    788 	bound = curlwp_bind();
    789 	mutex_enter(d->bd_mtx);
    790 	bp = d->bd_bif;
    791 	if (bp == NULL) {
    792 		mutex_exit(d->bd_mtx);
    793 		error = ENXIO;
    794 		goto out_bindx;
    795 	}
    796 	bpf_if_acquire(bp, &psref);
    797 	mutex_exit(d->bd_mtx);
    798 
    799 	getnanotime(&d->bd_mtime);
    800 
    801 	ifp = bp->bif_ifp;
    802 	if (if_is_deactivated(ifp)) {
    803 		error = ENXIO;
    804 		goto out;
    805 	}
    806 
    807 	if (uio->uio_resid == 0) {
    808 		error = 0;
    809 		goto out;
    810 	}
    811 
    812 	error = bpf_movein(uio, (int)bp->bif_dlt, ifp->if_mtu, &m,
    813 		(struct sockaddr *) &dst);
    814 	if (error)
    815 		goto out;
    816 
    817 	if (m->m_pkthdr.len > ifp->if_mtu) {
    818 		m_freem(m);
    819 		error = EMSGSIZE;
    820 		goto out;
    821 	}
    822 
    823 	if (d->bd_hdrcmplt)
    824 		dst.ss_family = pseudo_AF_HDRCMPLT;
    825 
    826 	if (d->bd_feedback) {
    827 		mc = m_dup(m, 0, M_COPYALL, M_NOWAIT);
    828 		if (mc != NULL)
    829 			m_set_rcvif(mc, ifp);
    830 		/* Set M_PROMISC for outgoing packets to be discarded. */
    831 		if (1 /*d->bd_direction == BPF_D_INOUT*/)
    832 			m->m_flags |= M_PROMISC;
    833 	} else
    834 		mc = NULL;
    835 
    836 	error = if_output_lock(ifp, ifp, m, (struct sockaddr *) &dst, NULL);
    837 
    838 	if (mc != NULL) {
    839 		if (error == 0)
    840 			ifp->_if_input(ifp, mc);
    841 		else
    842 			m_freem(mc);
    843 	}
    844 	/*
    845 	 * The driver frees the mbuf.
    846 	 */
    847 out:
    848 	bpf_if_release(bp, &psref);
    849 out_bindx:
    850 	curlwp_bindx(bound);
    851 	return error;
    852 }
    853 
    854 /*
    855  * Reset a descriptor by flushing its packet buffer and clearing the
    856  * receive and drop counts.
    857  */
    858 static void
    859 reset_d(struct bpf_d *d)
    860 {
    861 
    862 	KASSERT(mutex_owned(d->bd_mtx));
    863 
    864 	mutex_enter(d->bd_buf_mtx);
    865 	if (d->bd_hbuf) {
    866 		/* Free the hold buffer. */
    867 		d->bd_fbuf = d->bd_hbuf;
    868 		d->bd_hbuf = NULL;
    869 	}
    870 	d->bd_slen = 0;
    871 	d->bd_hlen = 0;
    872 	d->bd_rcount = 0;
    873 	d->bd_dcount = 0;
    874 	d->bd_ccount = 0;
    875 	mutex_exit(d->bd_buf_mtx);
    876 }
    877 
    878 /*
    879  *  FIONREAD		Check for read packet available.
    880  *  BIOCGBLEN		Get buffer len [for read()].
    881  *  BIOCSETF		Set ethernet read filter.
    882  *  BIOCFLUSH		Flush read packet buffer.
    883  *  BIOCPROMISC		Put interface into promiscuous mode.
    884  *  BIOCGDLT		Get link layer type.
    885  *  BIOCGETIF		Get interface name.
    886  *  BIOCSETIF		Set interface.
    887  *  BIOCSRTIMEOUT	Set read timeout.
    888  *  BIOCGRTIMEOUT	Get read timeout.
    889  *  BIOCGSTATS		Get packet stats.
    890  *  BIOCIMMEDIATE	Set immediate mode.
    891  *  BIOCVERSION		Get filter language version.
    892  *  BIOCGHDRCMPLT	Get "header already complete" flag.
    893  *  BIOCSHDRCMPLT	Set "header already complete" flag.
    894  *  BIOCSFEEDBACK	Set packet feedback mode.
    895  *  BIOCGFEEDBACK	Get packet feedback mode.
    896  *  BIOCGSEESENT  	Get "see sent packets" mode.
    897  *  BIOCSSEESENT  	Set "see sent packets" mode.
    898  */
    899 /* ARGSUSED */
    900 static int
    901 bpf_ioctl(struct file *fp, u_long cmd, void *addr)
    902 {
    903 	struct bpf_d *d = fp->f_bpf;
    904 	int error = 0;
    905 
    906 	/*
    907 	 * Refresh the PID associated with this bpf file.
    908 	 */
    909 	d->bd_pid = curproc->p_pid;
    910 #ifdef _LP64
    911 	if (curproc->p_flag & PK_32)
    912 		d->bd_compat32 = 1;
    913 	else
    914 		d->bd_compat32 = 0;
    915 #endif
    916 
    917 	mutex_enter(d->bd_mtx);
    918 	if (d->bd_state == BPF_WAITING)
    919 		callout_halt(&d->bd_callout, d->bd_mtx);
    920 	d->bd_state = BPF_IDLE;
    921 	mutex_exit(d->bd_mtx);
    922 
    923 	switch (cmd) {
    924 
    925 	default:
    926 		error = EINVAL;
    927 		break;
    928 
    929 	/*
    930 	 * Check for read packet available.
    931 	 */
    932 	case FIONREAD:
    933 		{
    934 			int n;
    935 
    936 			mutex_enter(d->bd_buf_mtx);
    937 			n = d->bd_slen;
    938 			if (d->bd_hbuf)
    939 				n += d->bd_hlen;
    940 			mutex_exit(d->bd_buf_mtx);
    941 
    942 			*(int *)addr = n;
    943 			break;
    944 		}
    945 
    946 	/*
    947 	 * Get buffer len [for read()].
    948 	 */
    949 	case BIOCGBLEN:
    950 		*(u_int *)addr = d->bd_bufsize;
    951 		break;
    952 
    953 	/*
    954 	 * Set buffer length.
    955 	 */
    956 	case BIOCSBLEN:
    957 		/*
    958 		 * Forbid to change the buffer length if buffers are already
    959 		 * allocated.
    960 		 */
    961 		mutex_enter(d->bd_mtx);
    962 		mutex_enter(d->bd_buf_mtx);
    963 		if (d->bd_bif != NULL || d->bd_sbuf != NULL)
    964 			error = EINVAL;
    965 		else {
    966 			u_int size = *(u_int *)addr;
    967 
    968 			if (size > bpf_maxbufsize)
    969 				*(u_int *)addr = size = bpf_maxbufsize;
    970 			else if (size < BPF_MINBUFSIZE)
    971 				*(u_int *)addr = size = BPF_MINBUFSIZE;
    972 			d->bd_bufsize = size;
    973 		}
    974 		mutex_exit(d->bd_buf_mtx);
    975 		mutex_exit(d->bd_mtx);
    976 		break;
    977 
    978 	/*
    979 	 * Set link layer read filter.
    980 	 */
    981 	case BIOCSETF:
    982 		error = bpf_setf(d, addr);
    983 		break;
    984 
    985 	/*
    986 	 * Flush read packet buffer.
    987 	 */
    988 	case BIOCFLUSH:
    989 		mutex_enter(d->bd_mtx);
    990 		reset_d(d);
    991 		mutex_exit(d->bd_mtx);
    992 		break;
    993 
    994 	/*
    995 	 * Put interface into promiscuous mode.
    996 	 */
    997 	case BIOCPROMISC:
    998 		mutex_enter(d->bd_mtx);
    999 		if (d->bd_bif == NULL) {
   1000 			mutex_exit(d->bd_mtx);
   1001 			/*
   1002 			 * No interface attached yet.
   1003 			 */
   1004 			error = EINVAL;
   1005 			break;
   1006 		}
   1007 		if (d->bd_promisc == 0) {
   1008 			KERNEL_LOCK_UNLESS_NET_MPSAFE();
   1009 			error = ifpromisc(d->bd_bif->bif_ifp, 1);
   1010 			KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
   1011 			if (error == 0)
   1012 				d->bd_promisc = 1;
   1013 		}
   1014 		mutex_exit(d->bd_mtx);
   1015 		break;
   1016 
   1017 	/*
   1018 	 * Get device parameters.
   1019 	 */
   1020 	case BIOCGDLT:
   1021 		mutex_enter(d->bd_mtx);
   1022 		if (d->bd_bif == NULL)
   1023 			error = EINVAL;
   1024 		else
   1025 			*(u_int *)addr = d->bd_bif->bif_dlt;
   1026 		mutex_exit(d->bd_mtx);
   1027 		break;
   1028 
   1029 	/*
   1030 	 * Get a list of supported device parameters.
   1031 	 */
   1032 	case BIOCGDLTLIST:
   1033 		mutex_enter(d->bd_mtx);
   1034 		if (d->bd_bif == NULL)
   1035 			error = EINVAL;
   1036 		else
   1037 			error = bpf_getdltlist(d, addr);
   1038 		mutex_exit(d->bd_mtx);
   1039 		break;
   1040 
   1041 	/*
   1042 	 * Set device parameters.
   1043 	 */
   1044 	case BIOCSDLT:
   1045 		mutex_enter(&bpf_mtx);
   1046 		mutex_enter(d->bd_mtx);
   1047 		if (d->bd_bif == NULL)
   1048 			error = EINVAL;
   1049 		else
   1050 			error = bpf_setdlt(d, *(u_int *)addr);
   1051 		mutex_exit(d->bd_mtx);
   1052 		mutex_exit(&bpf_mtx);
   1053 		break;
   1054 
   1055 	/*
   1056 	 * Set interface name.
   1057 	 */
   1058 #ifdef OBIOCGETIF
   1059 	case OBIOCGETIF:
   1060 #endif
   1061 	case BIOCGETIF:
   1062 		mutex_enter(d->bd_mtx);
   1063 		if (d->bd_bif == NULL)
   1064 			error = EINVAL;
   1065 		else
   1066 			bpf_ifname(d->bd_bif->bif_ifp, addr);
   1067 		mutex_exit(d->bd_mtx);
   1068 		break;
   1069 
   1070 	/*
   1071 	 * Set interface.
   1072 	 */
   1073 #ifdef OBIOCSETIF
   1074 	case OBIOCSETIF:
   1075 #endif
   1076 	case BIOCSETIF:
   1077 		mutex_enter(&bpf_mtx);
   1078 		error = bpf_setif(d, addr);
   1079 		mutex_exit(&bpf_mtx);
   1080 		break;
   1081 
   1082 	/*
   1083 	 * Set read timeout.
   1084 	 */
   1085 	case BIOCSRTIMEOUT:
   1086 		{
   1087 			struct timeval *tv = addr;
   1088 
   1089 			/* Compute number of ticks. */
   1090 			d->bd_rtout = tv->tv_sec * hz + tv->tv_usec / tick;
   1091 			if ((d->bd_rtout == 0) && (tv->tv_usec != 0))
   1092 				d->bd_rtout = 1;
   1093 			break;
   1094 		}
   1095 
   1096 #ifdef BIOCGORTIMEOUT
   1097 	/*
   1098 	 * Get read timeout.
   1099 	 */
   1100 	case BIOCGORTIMEOUT:
   1101 		{
   1102 			struct timeval50 *tv = addr;
   1103 
   1104 			tv->tv_sec = d->bd_rtout / hz;
   1105 			tv->tv_usec = (d->bd_rtout % hz) * tick;
   1106 			break;
   1107 		}
   1108 #endif
   1109 
   1110 #ifdef BIOCSORTIMEOUT
   1111 	/*
   1112 	 * Set read timeout.
   1113 	 */
   1114 	case BIOCSORTIMEOUT:
   1115 		{
   1116 			struct timeval50 *tv = addr;
   1117 
   1118 			/* Compute number of ticks. */
   1119 			d->bd_rtout = tv->tv_sec * hz + tv->tv_usec / tick;
   1120 			if ((d->bd_rtout == 0) && (tv->tv_usec != 0))
   1121 				d->bd_rtout = 1;
   1122 			break;
   1123 		}
   1124 #endif
   1125 
   1126 	/*
   1127 	 * Get read timeout.
   1128 	 */
   1129 	case BIOCGRTIMEOUT:
   1130 		{
   1131 			struct timeval *tv = addr;
   1132 
   1133 			tv->tv_sec = d->bd_rtout / hz;
   1134 			tv->tv_usec = (d->bd_rtout % hz) * tick;
   1135 			break;
   1136 		}
   1137 	/*
   1138 	 * Get packet stats.
   1139 	 */
   1140 	case BIOCGSTATS:
   1141 		{
   1142 			struct bpf_stat *bs = addr;
   1143 
   1144 			bs->bs_recv = d->bd_rcount;
   1145 			bs->bs_drop = d->bd_dcount;
   1146 			bs->bs_capt = d->bd_ccount;
   1147 			break;
   1148 		}
   1149 
   1150 	case BIOCGSTATSOLD:
   1151 		{
   1152 			struct bpf_stat_old *bs = addr;
   1153 
   1154 			bs->bs_recv = d->bd_rcount;
   1155 			bs->bs_drop = d->bd_dcount;
   1156 			break;
   1157 		}
   1158 
   1159 	/*
   1160 	 * Set immediate mode.
   1161 	 */
   1162 	case BIOCIMMEDIATE:
   1163 		d->bd_immediate = *(u_int *)addr;
   1164 		break;
   1165 
   1166 	case BIOCVERSION:
   1167 		{
   1168 			struct bpf_version *bv = addr;
   1169 
   1170 			bv->bv_major = BPF_MAJOR_VERSION;
   1171 			bv->bv_minor = BPF_MINOR_VERSION;
   1172 			break;
   1173 		}
   1174 
   1175 	case BIOCGHDRCMPLT:	/* get "header already complete" flag */
   1176 		*(u_int *)addr = d->bd_hdrcmplt;
   1177 		break;
   1178 
   1179 	case BIOCSHDRCMPLT:	/* set "header already complete" flag */
   1180 		d->bd_hdrcmplt = *(u_int *)addr ? 1 : 0;
   1181 		break;
   1182 
   1183 	/*
   1184 	 * Get "see sent packets" flag
   1185 	 */
   1186 	case BIOCGSEESENT:
   1187 		*(u_int *)addr = d->bd_seesent;
   1188 		break;
   1189 
   1190 	/*
   1191 	 * Set "see sent" packets flag
   1192 	 */
   1193 	case BIOCSSEESENT:
   1194 		d->bd_seesent = *(u_int *)addr;
   1195 		break;
   1196 
   1197 	/*
   1198 	 * Set "feed packets from bpf back to input" mode
   1199 	 */
   1200 	case BIOCSFEEDBACK:
   1201 		d->bd_feedback = *(u_int *)addr;
   1202 		break;
   1203 
   1204 	/*
   1205 	 * Get "feed packets from bpf back to input" mode
   1206 	 */
   1207 	case BIOCGFEEDBACK:
   1208 		*(u_int *)addr = d->bd_feedback;
   1209 		break;
   1210 
   1211 	case FIONBIO:		/* Non-blocking I/O */
   1212 		/*
   1213 		 * No need to do anything special as we use IO_NDELAY in
   1214 		 * bpfread() as an indication of whether or not to block
   1215 		 * the read.
   1216 		 */
   1217 		break;
   1218 
   1219 	case FIOASYNC:		/* Send signal on receive packets */
   1220 		mutex_enter(d->bd_mtx);
   1221 		d->bd_async = *(int *)addr;
   1222 		mutex_exit(d->bd_mtx);
   1223 		break;
   1224 
   1225 	case TIOCSPGRP:		/* Process or group to send signals to */
   1226 	case FIOSETOWN:
   1227 		error = fsetown(&d->bd_pgid, cmd, addr);
   1228 		break;
   1229 
   1230 	case TIOCGPGRP:
   1231 	case FIOGETOWN:
   1232 		error = fgetown(d->bd_pgid, cmd, addr);
   1233 		break;
   1234 	}
   1235 	return (error);
   1236 }
   1237 
   1238 /*
   1239  * Set d's packet filter program to fp.  If this file already has a filter,
   1240  * free it and replace it.  Returns EINVAL for bogus requests.
   1241  */
   1242 static int
   1243 bpf_setf(struct bpf_d *d, struct bpf_program *fp)
   1244 {
   1245 	struct bpf_insn *fcode;
   1246 	bpfjit_func_t jcode;
   1247 	size_t flen, size = 0;
   1248 	struct bpf_filter *oldf, *newf;
   1249 
   1250 	jcode = NULL;
   1251 	flen = fp->bf_len;
   1252 
   1253 	if ((fp->bf_insns == NULL && flen) || flen > BPF_MAXINSNS) {
   1254 		return EINVAL;
   1255 	}
   1256 
   1257 	if (flen) {
   1258 		/*
   1259 		 * Allocate the buffer, copy the byte-code from
   1260 		 * userspace and validate it.
   1261 		 */
   1262 		size = flen * sizeof(*fp->bf_insns);
   1263 		fcode = kmem_alloc(size, KM_SLEEP);
   1264 		if (copyin(fp->bf_insns, fcode, size) != 0 ||
   1265 		    !bpf_validate(fcode, (int)flen)) {
   1266 			kmem_free(fcode, size);
   1267 			return EINVAL;
   1268 		}
   1269 		membar_consumer();
   1270 		if (bpf_jit)
   1271 			jcode = bpf_jit_generate(NULL, fcode, flen);
   1272 	} else {
   1273 		fcode = NULL;
   1274 	}
   1275 
   1276 	newf = kmem_alloc(sizeof(*newf), KM_SLEEP);
   1277 	newf->bf_insn = fcode;
   1278 	newf->bf_size = size;
   1279 	newf->bf_jitcode = jcode;
   1280 	d->bd_jitcode = jcode; /* XXX just for kvm(3) users */
   1281 
   1282 	/* Need to hold bpf_mtx for pserialize_perform */
   1283 	mutex_enter(&bpf_mtx);
   1284 	mutex_enter(d->bd_mtx);
   1285 	oldf = d->bd_filter;
   1286 	d->bd_filter = newf;
   1287 	membar_producer();
   1288 	reset_d(d);
   1289 	pserialize_perform(bpf_psz);
   1290 	mutex_exit(d->bd_mtx);
   1291 	mutex_exit(&bpf_mtx);
   1292 
   1293 	if (oldf != NULL)
   1294 		bpf_free_filter(oldf);
   1295 
   1296 	return 0;
   1297 }
   1298 
   1299 /*
   1300  * Detach a file from its current interface (if attached at all) and attach
   1301  * to the interface indicated by the name stored in ifr.
   1302  * Return an errno or 0.
   1303  */
   1304 static int
   1305 bpf_setif(struct bpf_d *d, struct ifreq *ifr)
   1306 {
   1307 	struct bpf_if *bp;
   1308 	char *cp;
   1309 	int unit_seen, i, error;
   1310 
   1311 	KASSERT(mutex_owned(&bpf_mtx));
   1312 	/*
   1313 	 * Make sure the provided name has a unit number, and default
   1314 	 * it to '0' if not specified.
   1315 	 * XXX This is ugly ... do this differently?
   1316 	 */
   1317 	unit_seen = 0;
   1318 	cp = ifr->ifr_name;
   1319 	cp[sizeof(ifr->ifr_name) - 1] = '\0';	/* sanity */
   1320 	while (*cp++)
   1321 		if (*cp >= '0' && *cp <= '9')
   1322 			unit_seen = 1;
   1323 	if (!unit_seen) {
   1324 		/* Make sure to leave room for the '\0'. */
   1325 		for (i = 0; i < (IFNAMSIZ - 1); ++i) {
   1326 			if ((ifr->ifr_name[i] >= 'a' &&
   1327 			     ifr->ifr_name[i] <= 'z') ||
   1328 			    (ifr->ifr_name[i] >= 'A' &&
   1329 			     ifr->ifr_name[i] <= 'Z'))
   1330 				continue;
   1331 			ifr->ifr_name[i] = '0';
   1332 		}
   1333 	}
   1334 
   1335 	/*
   1336 	 * Look through attached interfaces for the named one.
   1337 	 */
   1338 	BPF_IFLIST_WRITER_FOREACH(bp) {
   1339 		struct ifnet *ifp = bp->bif_ifp;
   1340 
   1341 		if (ifp == NULL ||
   1342 		    strcmp(ifp->if_xname, ifr->ifr_name) != 0)
   1343 			continue;
   1344 		/* skip additional entry */
   1345 		if (bp->bif_driverp != &ifp->if_bpf)
   1346 			continue;
   1347 		/*
   1348 		 * We found the requested interface.
   1349 		 * Allocate the packet buffers if we need to.
   1350 		 * If we're already attached to requested interface,
   1351 		 * just flush the buffer.
   1352 		 */
   1353 		/*
   1354 		 * bpf_allocbufs is called only here. bpf_mtx ensures that
   1355 		 * no race condition happen on d->bd_sbuf.
   1356 		 */
   1357 		if (d->bd_sbuf == NULL) {
   1358 			error = bpf_allocbufs(d);
   1359 			if (error != 0)
   1360 				return (error);
   1361 		}
   1362 		mutex_enter(d->bd_mtx);
   1363 		if (bp != d->bd_bif) {
   1364 			if (d->bd_bif) {
   1365 				/*
   1366 				 * Detach if attached to something else.
   1367 				 */
   1368 				bpf_detachd(d);
   1369 				BPFIF_DLIST_ENTRY_INIT(d);
   1370 			}
   1371 
   1372 			bpf_attachd(d, bp);
   1373 		}
   1374 		reset_d(d);
   1375 		mutex_exit(d->bd_mtx);
   1376 		return (0);
   1377 	}
   1378 	/* Not found. */
   1379 	return (ENXIO);
   1380 }
   1381 
   1382 /*
   1383  * Copy the interface name to the ifreq.
   1384  */
   1385 static void
   1386 bpf_ifname(struct ifnet *ifp, struct ifreq *ifr)
   1387 {
   1388 	memcpy(ifr->ifr_name, ifp->if_xname, IFNAMSIZ);
   1389 }
   1390 
   1391 static int
   1392 bpf_stat(struct file *fp, struct stat *st)
   1393 {
   1394 	struct bpf_d *d = fp->f_bpf;
   1395 
   1396 	(void)memset(st, 0, sizeof(*st));
   1397 	mutex_enter(d->bd_mtx);
   1398 	st->st_dev = makedev(cdevsw_lookup_major(&bpf_cdevsw), d->bd_pid);
   1399 	st->st_atimespec = d->bd_atime;
   1400 	st->st_mtimespec = d->bd_mtime;
   1401 	st->st_ctimespec = st->st_birthtimespec = d->bd_btime;
   1402 	st->st_uid = kauth_cred_geteuid(fp->f_cred);
   1403 	st->st_gid = kauth_cred_getegid(fp->f_cred);
   1404 	st->st_mode = S_IFCHR;
   1405 	mutex_exit(d->bd_mtx);
   1406 	return 0;
   1407 }
   1408 
   1409 /*
   1410  * Support for poll() system call
   1411  *
   1412  * Return true iff the specific operation will not block indefinitely - with
   1413  * the assumption that it is safe to positively acknowledge a request for the
   1414  * ability to write to the BPF device.
   1415  * Otherwise, return false but make a note that a selnotify() must be done.
   1416  */
   1417 static int
   1418 bpf_poll(struct file *fp, int events)
   1419 {
   1420 	struct bpf_d *d = fp->f_bpf;
   1421 	int revents;
   1422 
   1423 	/*
   1424 	 * Refresh the PID associated with this bpf file.
   1425 	 */
   1426 	mutex_enter(&bpf_mtx);
   1427 	d->bd_pid = curproc->p_pid;
   1428 
   1429 	revents = events & (POLLOUT | POLLWRNORM);
   1430 	if (events & (POLLIN | POLLRDNORM)) {
   1431 		/*
   1432 		 * An imitation of the FIONREAD ioctl code.
   1433 		 */
   1434 		mutex_enter(d->bd_mtx);
   1435 		if (d->bd_hlen != 0 ||
   1436 		    ((d->bd_immediate || d->bd_state == BPF_TIMED_OUT) &&
   1437 		     d->bd_slen != 0)) {
   1438 			revents |= events & (POLLIN | POLLRDNORM);
   1439 		} else {
   1440 			selrecord(curlwp, &d->bd_sel);
   1441 			/* Start the read timeout if necessary */
   1442 			if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
   1443 				callout_reset(&d->bd_callout, d->bd_rtout,
   1444 					      bpf_timed_out, d);
   1445 				d->bd_state = BPF_WAITING;
   1446 			}
   1447 		}
   1448 		mutex_exit(d->bd_mtx);
   1449 	}
   1450 
   1451 	mutex_exit(&bpf_mtx);
   1452 	return (revents);
   1453 }
   1454 
   1455 static void
   1456 filt_bpfrdetach(struct knote *kn)
   1457 {
   1458 	struct bpf_d *d = kn->kn_hook;
   1459 
   1460 	mutex_enter(d->bd_buf_mtx);
   1461 	SLIST_REMOVE(&d->bd_sel.sel_klist, kn, knote, kn_selnext);
   1462 	mutex_exit(d->bd_buf_mtx);
   1463 }
   1464 
   1465 static int
   1466 filt_bpfread(struct knote *kn, long hint)
   1467 {
   1468 	struct bpf_d *d = kn->kn_hook;
   1469 	int rv;
   1470 
   1471 	mutex_enter(d->bd_buf_mtx);
   1472 	kn->kn_data = d->bd_hlen;
   1473 	if (d->bd_immediate)
   1474 		kn->kn_data += d->bd_slen;
   1475 	rv = (kn->kn_data > 0);
   1476 	mutex_exit(d->bd_buf_mtx);
   1477 	return rv;
   1478 }
   1479 
   1480 static const struct filterops bpfread_filtops =
   1481 	{ 1, NULL, filt_bpfrdetach, filt_bpfread };
   1482 
   1483 static int
   1484 bpf_kqfilter(struct file *fp, struct knote *kn)
   1485 {
   1486 	struct bpf_d *d = fp->f_bpf;
   1487 	struct klist *klist;
   1488 
   1489 	mutex_enter(d->bd_buf_mtx);
   1490 	switch (kn->kn_filter) {
   1491 	case EVFILT_READ:
   1492 		klist = &d->bd_sel.sel_klist;
   1493 		kn->kn_fop = &bpfread_filtops;
   1494 		break;
   1495 
   1496 	default:
   1497 		mutex_exit(d->bd_buf_mtx);
   1498 		return (EINVAL);
   1499 	}
   1500 
   1501 	kn->kn_hook = d;
   1502 
   1503 	SLIST_INSERT_HEAD(klist, kn, kn_selnext);
   1504 	mutex_exit(d->bd_buf_mtx);
   1505 
   1506 	return (0);
   1507 }
   1508 
   1509 /*
   1510  * Copy data from an mbuf chain into a buffer.  This code is derived
   1511  * from m_copydata in sys/uipc_mbuf.c.
   1512  */
   1513 static void *
   1514 bpf_mcpy(void *dst_arg, const void *src_arg, size_t len)
   1515 {
   1516 	const struct mbuf *m;
   1517 	u_int count;
   1518 	u_char *dst;
   1519 
   1520 	m = src_arg;
   1521 	dst = dst_arg;
   1522 	while (len > 0) {
   1523 		if (m == NULL)
   1524 			panic("bpf_mcpy");
   1525 		count = min(m->m_len, len);
   1526 		memcpy(dst, mtod(m, const void *), count);
   1527 		m = m->m_next;
   1528 		dst += count;
   1529 		len -= count;
   1530 	}
   1531 	return dst_arg;
   1532 }
   1533 
   1534 /*
   1535  * Dispatch a packet to all the listeners on interface bp.
   1536  *
   1537  * pkt     pointer to the packet, either a data buffer or an mbuf chain
   1538  * buflen  buffer length, if pkt is a data buffer
   1539  * cpfn    a function that can copy pkt into the listener's buffer
   1540  * pktlen  length of the packet
   1541  * rcv     true if packet came in
   1542  */
   1543 static inline void
   1544 bpf_deliver(struct bpf_if *bp, void *(*cpfn)(void *, const void *, size_t),
   1545     void *pkt, u_int pktlen, u_int buflen, const bool rcv)
   1546 {
   1547 	uint32_t mem[BPF_MEMWORDS];
   1548 	bpf_args_t args = {
   1549 		.pkt = (const uint8_t *)pkt,
   1550 		.wirelen = pktlen,
   1551 		.buflen = buflen,
   1552 		.mem = mem,
   1553 		.arg = NULL
   1554 	};
   1555 	bool gottime = false;
   1556 	struct timespec ts;
   1557 	struct bpf_d *d;
   1558 	int s;
   1559 
   1560 	KASSERT(!cpu_intr_p());
   1561 
   1562 	/*
   1563 	 * Note that the IPL does not have to be raised at this point.
   1564 	 * The only problem that could arise here is that if two different
   1565 	 * interfaces shared any data.  This is not the case.
   1566 	 */
   1567 	s = pserialize_read_enter();
   1568 	BPFIF_DLIST_READER_FOREACH(d, bp) {
   1569 		u_int slen = 0;
   1570 		struct bpf_filter *filter;
   1571 
   1572 		if (!d->bd_seesent && !rcv) {
   1573 			continue;
   1574 		}
   1575 		atomic_inc_ulong(&d->bd_rcount);
   1576 		BPF_STATINC(recv);
   1577 
   1578 		filter = d->bd_filter;
   1579 		membar_datadep_consumer();
   1580 		if (filter != NULL) {
   1581 			if (filter->bf_jitcode != NULL)
   1582 				slen = filter->bf_jitcode(NULL, &args);
   1583 			else
   1584 				slen = bpf_filter_ext(NULL, filter->bf_insn,
   1585 				    &args);
   1586 		}
   1587 
   1588 		if (!slen) {
   1589 			continue;
   1590 		}
   1591 		if (!gottime) {
   1592 			gottime = true;
   1593 			nanotime(&ts);
   1594 		}
   1595 		/* Assume catchpacket doesn't sleep */
   1596 		catchpacket(d, pkt, pktlen, slen, cpfn, &ts);
   1597 	}
   1598 	pserialize_read_exit(s);
   1599 }
   1600 
   1601 /*
   1602  * Incoming linkage from device drivers.  Process the packet pkt, of length
   1603  * pktlen, which is stored in a contiguous buffer.  The packet is parsed
   1604  * by each process' filter, and if accepted, stashed into the corresponding
   1605  * buffer.
   1606  */
   1607 static void
   1608 _bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen)
   1609 {
   1610 
   1611 	bpf_deliver(bp, memcpy, pkt, pktlen, pktlen, true);
   1612 }
   1613 
   1614 /*
   1615  * Incoming linkage from device drivers, when the head of the packet is in
   1616  * a buffer, and the tail is in an mbuf chain.
   1617  */
   1618 static void
   1619 _bpf_mtap2(struct bpf_if *bp, void *data, u_int dlen, struct mbuf *m)
   1620 {
   1621 	u_int pktlen;
   1622 	struct mbuf mb;
   1623 
   1624 	/* Skip outgoing duplicate packets. */
   1625 	if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif_index == 0) {
   1626 		m->m_flags &= ~M_PROMISC;
   1627 		return;
   1628 	}
   1629 
   1630 	pktlen = m_length(m) + dlen;
   1631 
   1632 	/*
   1633 	 * Craft on-stack mbuf suitable for passing to bpf_filter.
   1634 	 * Note that we cut corners here; we only setup what's
   1635 	 * absolutely needed--this mbuf should never go anywhere else.
   1636 	 */
   1637 	(void)memset(&mb, 0, sizeof(mb));
   1638 	mb.m_next = m;
   1639 	mb.m_data = data;
   1640 	mb.m_len = dlen;
   1641 
   1642 	bpf_deliver(bp, bpf_mcpy, &mb, pktlen, 0, m->m_pkthdr.rcvif_index != 0);
   1643 }
   1644 
   1645 /*
   1646  * Incoming linkage from device drivers, when packet is in an mbuf chain.
   1647  */
   1648 static void
   1649 _bpf_mtap(struct bpf_if *bp, struct mbuf *m)
   1650 {
   1651 	void *(*cpfn)(void *, const void *, size_t);
   1652 	u_int pktlen, buflen;
   1653 	void *marg;
   1654 
   1655 	/* Skip outgoing duplicate packets. */
   1656 	if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif_index == 0) {
   1657 		m->m_flags &= ~M_PROMISC;
   1658 		return;
   1659 	}
   1660 
   1661 	pktlen = m_length(m);
   1662 
   1663 	if (pktlen == m->m_len) {
   1664 		cpfn = (void *)memcpy;
   1665 		marg = mtod(m, void *);
   1666 		buflen = pktlen;
   1667 	} else {
   1668 		cpfn = bpf_mcpy;
   1669 		marg = m;
   1670 		buflen = 0;
   1671 	}
   1672 
   1673 	bpf_deliver(bp, cpfn, marg, pktlen, buflen, m->m_pkthdr.rcvif_index != 0);
   1674 }
   1675 
   1676 /*
   1677  * We need to prepend the address family as
   1678  * a four byte field.  Cons up a dummy header
   1679  * to pacify bpf.  This is safe because bpf
   1680  * will only read from the mbuf (i.e., it won't
   1681  * try to free it or keep a pointer a to it).
   1682  */
   1683 static void
   1684 _bpf_mtap_af(struct bpf_if *bp, uint32_t af, struct mbuf *m)
   1685 {
   1686 	struct mbuf m0;
   1687 
   1688 	m0.m_flags = 0;
   1689 	m0.m_next = m;
   1690 	m0.m_len = 4;
   1691 	m0.m_data = (char *)&af;
   1692 
   1693 	_bpf_mtap(bp, &m0);
   1694 }
   1695 
   1696 /*
   1697  * Put the SLIP pseudo-"link header" in place.
   1698  * Note this M_PREPEND() should never fail,
   1699  * swince we know we always have enough space
   1700  * in the input buffer.
   1701  */
   1702 static void
   1703 _bpf_mtap_sl_in(struct bpf_if *bp, u_char *chdr, struct mbuf **m)
   1704 {
   1705 	u_char *hp;
   1706 
   1707 	M_PREPEND(*m, SLIP_HDRLEN, M_DONTWAIT);
   1708 	if (*m == NULL)
   1709 		return;
   1710 
   1711 	hp = mtod(*m, u_char *);
   1712 	hp[SLX_DIR] = SLIPDIR_IN;
   1713 	(void)memcpy(&hp[SLX_CHDR], chdr, CHDR_LEN);
   1714 
   1715 	_bpf_mtap(bp, *m);
   1716 
   1717 	m_adj(*m, SLIP_HDRLEN);
   1718 }
   1719 
   1720 /*
   1721  * Put the SLIP pseudo-"link header" in
   1722  * place.  The compressed header is now
   1723  * at the beginning of the mbuf.
   1724  */
   1725 static void
   1726 _bpf_mtap_sl_out(struct bpf_if *bp, u_char *chdr, struct mbuf *m)
   1727 {
   1728 	struct mbuf m0;
   1729 	u_char *hp;
   1730 
   1731 	m0.m_flags = 0;
   1732 	m0.m_next = m;
   1733 	m0.m_data = m0.m_dat;
   1734 	m0.m_len = SLIP_HDRLEN;
   1735 
   1736 	hp = mtod(&m0, u_char *);
   1737 
   1738 	hp[SLX_DIR] = SLIPDIR_OUT;
   1739 	(void)memcpy(&hp[SLX_CHDR], chdr, CHDR_LEN);
   1740 
   1741 	_bpf_mtap(bp, &m0);
   1742 	m_freem(m);
   1743 }
   1744 
   1745 static struct mbuf *
   1746 bpf_mbuf_enqueue(struct bpf_if *bp, struct mbuf *m)
   1747 {
   1748 	struct mbuf *dup;
   1749 
   1750 	dup = m_dup(m, 0, M_COPYALL, M_NOWAIT);
   1751 	if (dup == NULL)
   1752 		return NULL;
   1753 
   1754 	if (bp->bif_mbuf_tail != NULL) {
   1755 		bp->bif_mbuf_tail->m_nextpkt = dup;
   1756 	} else {
   1757 		bp->bif_mbuf_head = dup;
   1758 	}
   1759 	bp->bif_mbuf_tail = dup;
   1760 #ifdef BPF_MTAP_SOFTINT_DEBUG
   1761 	log(LOG_DEBUG, "%s: enqueued mbuf=%p to %s\n",
   1762 	    __func__, dup, bp->bif_ifp->if_xname);
   1763 #endif
   1764 
   1765 	return dup;
   1766 }
   1767 
   1768 static struct mbuf *
   1769 bpf_mbuf_dequeue(struct bpf_if *bp)
   1770 {
   1771 	struct mbuf *m;
   1772 	int s;
   1773 
   1774 	/* XXX NOMPSAFE: assumed running on one CPU */
   1775 	s = splnet();
   1776 	m = bp->bif_mbuf_head;
   1777 	if (m != NULL) {
   1778 		bp->bif_mbuf_head = m->m_nextpkt;
   1779 		m->m_nextpkt = NULL;
   1780 
   1781 		if (bp->bif_mbuf_head == NULL)
   1782 			bp->bif_mbuf_tail = NULL;
   1783 #ifdef BPF_MTAP_SOFTINT_DEBUG
   1784 		log(LOG_DEBUG, "%s: dequeued mbuf=%p from %s\n",
   1785 		    __func__, m, bp->bif_ifp->if_xname);
   1786 #endif
   1787 	}
   1788 	splx(s);
   1789 
   1790 	return m;
   1791 }
   1792 
   1793 static void
   1794 bpf_mtap_si(void *arg)
   1795 {
   1796 	struct bpf_if *bp = arg;
   1797 	struct mbuf *m;
   1798 
   1799 	while ((m = bpf_mbuf_dequeue(bp)) != NULL) {
   1800 #ifdef BPF_MTAP_SOFTINT_DEBUG
   1801 		log(LOG_DEBUG, "%s: tapping mbuf=%p on %s\n",
   1802 		    __func__, m, bp->bif_ifp->if_xname);
   1803 #endif
   1804 		bpf_ops->bpf_mtap(bp, m);
   1805 		m_freem(m);
   1806 	}
   1807 }
   1808 
   1809 static void
   1810 _bpf_mtap_softint(struct ifnet *ifp, struct mbuf *m)
   1811 {
   1812 	struct bpf_if *bp = ifp->if_bpf;
   1813 	struct mbuf *dup;
   1814 
   1815 	KASSERT(cpu_intr_p());
   1816 
   1817 	/* To avoid extra invocations of the softint */
   1818 	if (BPFIF_DLIST_READER_EMPTY(bp))
   1819 		return;
   1820 	KASSERT(bp->bif_si != NULL);
   1821 
   1822 	dup = bpf_mbuf_enqueue(bp, m);
   1823 	if (dup != NULL)
   1824 		softint_schedule(bp->bif_si);
   1825 }
   1826 
   1827 static int
   1828 bpf_hdrlen(struct bpf_d *d)
   1829 {
   1830 	int hdrlen = d->bd_bif->bif_hdrlen;
   1831 	/*
   1832 	 * Compute the length of the bpf header.  This is not necessarily
   1833 	 * equal to SIZEOF_BPF_HDR because we want to insert spacing such
   1834 	 * that the network layer header begins on a longword boundary (for
   1835 	 * performance reasons and to alleviate alignment restrictions).
   1836 	 */
   1837 #ifdef _LP64
   1838 	if (d->bd_compat32)
   1839 		return (BPF_WORDALIGN32(hdrlen + SIZEOF_BPF_HDR32) - hdrlen);
   1840 	else
   1841 #endif
   1842 		return (BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen);
   1843 }
   1844 
   1845 /*
   1846  * Move the packet data from interface memory (pkt) into the
   1847  * store buffer. Call the wakeup functions if it's time to wakeup
   1848  * a listener (buffer full), "cpfn" is the routine called to do the
   1849  * actual data transfer. memcpy is passed in to copy contiguous chunks,
   1850  * while bpf_mcpy is passed in to copy mbuf chains.  In the latter case,
   1851  * pkt is really an mbuf.
   1852  */
   1853 static void
   1854 catchpacket(struct bpf_d *d, u_char *pkt, u_int pktlen, u_int snaplen,
   1855     void *(*cpfn)(void *, const void *, size_t), struct timespec *ts)
   1856 {
   1857 	char *h;
   1858 	int totlen, curlen, caplen;
   1859 	int hdrlen = bpf_hdrlen(d);
   1860 	int do_wakeup = 0;
   1861 
   1862 	atomic_inc_ulong(&d->bd_ccount);
   1863 	BPF_STATINC(capt);
   1864 	/*
   1865 	 * Figure out how many bytes to move.  If the packet is
   1866 	 * greater or equal to the snapshot length, transfer that
   1867 	 * much.  Otherwise, transfer the whole packet (unless
   1868 	 * we hit the buffer size limit).
   1869 	 */
   1870 	totlen = hdrlen + min(snaplen, pktlen);
   1871 	if (totlen > d->bd_bufsize)
   1872 		totlen = d->bd_bufsize;
   1873 	/*
   1874 	 * If we adjusted totlen to fit the bufsize, it could be that
   1875 	 * totlen is smaller than hdrlen because of the link layer header.
   1876 	 */
   1877 	caplen = totlen - hdrlen;
   1878 	if (caplen < 0)
   1879 		caplen = 0;
   1880 
   1881 	mutex_enter(d->bd_buf_mtx);
   1882 	/*
   1883 	 * Round up the end of the previous packet to the next longword.
   1884 	 */
   1885 #ifdef _LP64
   1886 	if (d->bd_compat32)
   1887 		curlen = BPF_WORDALIGN32(d->bd_slen);
   1888 	else
   1889 #endif
   1890 		curlen = BPF_WORDALIGN(d->bd_slen);
   1891 	if (curlen + totlen > d->bd_bufsize) {
   1892 		/*
   1893 		 * This packet will overflow the storage buffer.
   1894 		 * Rotate the buffers if we can, then wakeup any
   1895 		 * pending reads.
   1896 		 */
   1897 		if (d->bd_fbuf == NULL) {
   1898 			mutex_exit(d->bd_buf_mtx);
   1899 			/*
   1900 			 * We haven't completed the previous read yet,
   1901 			 * so drop the packet.
   1902 			 */
   1903 			atomic_inc_ulong(&d->bd_dcount);
   1904 			BPF_STATINC(drop);
   1905 			return;
   1906 		}
   1907 		ROTATE_BUFFERS(d);
   1908 		do_wakeup = 1;
   1909 		curlen = 0;
   1910 	} else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT) {
   1911 		/*
   1912 		 * Immediate mode is set, or the read timeout has
   1913 		 * already expired during a select call.  A packet
   1914 		 * arrived, so the reader should be woken up.
   1915 		 */
   1916 		do_wakeup = 1;
   1917 	}
   1918 
   1919 	/*
   1920 	 * Append the bpf header.
   1921 	 */
   1922 	h = (char *)d->bd_sbuf + curlen;
   1923 #ifdef _LP64
   1924 	if (d->bd_compat32) {
   1925 		struct bpf_hdr32 *hp32;
   1926 
   1927 		hp32 = (struct bpf_hdr32 *)h;
   1928 		hp32->bh_tstamp.tv_sec = ts->tv_sec;
   1929 		hp32->bh_tstamp.tv_usec = ts->tv_nsec / 1000;
   1930 		hp32->bh_datalen = pktlen;
   1931 		hp32->bh_hdrlen = hdrlen;
   1932 		hp32->bh_caplen = caplen;
   1933 	} else
   1934 #endif
   1935 	{
   1936 		struct bpf_hdr *hp;
   1937 
   1938 		hp = (struct bpf_hdr *)h;
   1939 		hp->bh_tstamp.tv_sec = ts->tv_sec;
   1940 		hp->bh_tstamp.tv_usec = ts->tv_nsec / 1000;
   1941 		hp->bh_datalen = pktlen;
   1942 		hp->bh_hdrlen = hdrlen;
   1943 		hp->bh_caplen = caplen;
   1944 	}
   1945 
   1946 	/*
   1947 	 * Copy the packet data into the store buffer and update its length.
   1948 	 */
   1949 	(*cpfn)(h + hdrlen, pkt, caplen);
   1950 	d->bd_slen = curlen + totlen;
   1951 	mutex_exit(d->bd_buf_mtx);
   1952 
   1953 	/*
   1954 	 * Call bpf_wakeup after bd_slen has been updated so that kevent(2)
   1955 	 * will cause filt_bpfread() to be called with it adjusted.
   1956 	 */
   1957 	if (do_wakeup)
   1958 		bpf_wakeup(d);
   1959 }
   1960 
   1961 /*
   1962  * Initialize all nonzero fields of a descriptor.
   1963  */
   1964 static int
   1965 bpf_allocbufs(struct bpf_d *d)
   1966 {
   1967 
   1968 	d->bd_fbuf = kmem_alloc(d->bd_bufsize, KM_NOSLEEP);
   1969 	if (!d->bd_fbuf)
   1970 		return (ENOBUFS);
   1971 	d->bd_sbuf = kmem_alloc(d->bd_bufsize, KM_NOSLEEP);
   1972 	if (!d->bd_sbuf) {
   1973 		kmem_free(d->bd_fbuf, d->bd_bufsize);
   1974 		return (ENOBUFS);
   1975 	}
   1976 	d->bd_slen = 0;
   1977 	d->bd_hlen = 0;
   1978 	return (0);
   1979 }
   1980 
   1981 static void
   1982 bpf_free_filter(struct bpf_filter *filter)
   1983 {
   1984 
   1985 	KASSERT(filter != NULL);
   1986 	KASSERT(filter->bf_insn != NULL);
   1987 
   1988 	kmem_free(filter->bf_insn, filter->bf_size);
   1989 	if (filter->bf_jitcode != NULL)
   1990 		bpf_jit_freecode(filter->bf_jitcode);
   1991 	kmem_free(filter, sizeof(*filter));
   1992 }
   1993 
   1994 /*
   1995  * Free buffers currently in use by a descriptor.
   1996  * Called on close.
   1997  */
   1998 static void
   1999 bpf_freed(struct bpf_d *d)
   2000 {
   2001 	/*
   2002 	 * We don't need to lock out interrupts since this descriptor has
   2003 	 * been detached from its interface and it yet hasn't been marked
   2004 	 * free.
   2005 	 */
   2006 	if (d->bd_sbuf != NULL) {
   2007 		kmem_free(d->bd_sbuf, d->bd_bufsize);
   2008 		if (d->bd_hbuf != NULL)
   2009 			kmem_free(d->bd_hbuf, d->bd_bufsize);
   2010 		if (d->bd_fbuf != NULL)
   2011 			kmem_free(d->bd_fbuf, d->bd_bufsize);
   2012 	}
   2013 	if (d->bd_filter != NULL) {
   2014 		bpf_free_filter(d->bd_filter);
   2015 		d->bd_filter = NULL;
   2016 	}
   2017 	d->bd_jitcode = NULL;
   2018 }
   2019 
   2020 /*
   2021  * Attach an interface to bpf.  dlt is the link layer type;
   2022  * hdrlen is the fixed size of the link header for the specified dlt
   2023  * (variable length headers not yet supported).
   2024  */
   2025 static void
   2026 _bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp)
   2027 {
   2028 	struct bpf_if *bp;
   2029 	bp = kmem_alloc(sizeof(*bp), KM_NOSLEEP);
   2030 	if (bp == NULL)
   2031 		panic("bpfattach");
   2032 
   2033 	mutex_enter(&bpf_mtx);
   2034 	bp->bif_driverp = driverp;
   2035 	bp->bif_ifp = ifp;
   2036 	bp->bif_dlt = dlt;
   2037 	bp->bif_si = NULL;
   2038 	BPF_IFLIST_ENTRY_INIT(bp);
   2039 	PSLIST_INIT(&bp->bif_dlist_head);
   2040 	psref_target_init(&bp->bif_psref, bpf_psref_class);
   2041 
   2042 	BPF_IFLIST_WRITER_INSERT_HEAD(bp);
   2043 
   2044 	*bp->bif_driverp = NULL;
   2045 
   2046 	bp->bif_hdrlen = hdrlen;
   2047 	mutex_exit(&bpf_mtx);
   2048 #if 0
   2049 	printf("bpf: %s attached\n", ifp->if_xname);
   2050 #endif
   2051 }
   2052 
   2053 static void
   2054 _bpf_mtap_softint_init(struct ifnet *ifp)
   2055 {
   2056 	struct bpf_if *bp;
   2057 
   2058 	mutex_enter(&bpf_mtx);
   2059 	BPF_IFLIST_WRITER_FOREACH(bp) {
   2060 		if (bp->bif_ifp != ifp)
   2061 			continue;
   2062 
   2063 		bp->bif_mbuf_head = NULL;
   2064 		bp->bif_mbuf_tail = NULL;
   2065 		bp->bif_si = softint_establish(SOFTINT_NET, bpf_mtap_si, bp);
   2066 		if (bp->bif_si == NULL)
   2067 			panic("%s: softint_establish() failed", __func__);
   2068 		break;
   2069 	}
   2070 	mutex_exit(&bpf_mtx);
   2071 
   2072 	if (bp == NULL)
   2073 		panic("%s: no bpf_if found for %s", __func__, ifp->if_xname);
   2074 }
   2075 
   2076 /*
   2077  * Remove an interface from bpf.
   2078  */
   2079 static void
   2080 _bpfdetach(struct ifnet *ifp)
   2081 {
   2082 	struct bpf_if *bp;
   2083 	struct bpf_d *d;
   2084 	int s;
   2085 
   2086 	mutex_enter(&bpf_mtx);
   2087 	/* Nuke the vnodes for any open instances */
   2088   again_d:
   2089 	BPF_DLIST_WRITER_FOREACH(d) {
   2090 		mutex_enter(d->bd_mtx);
   2091 		if (d->bd_bif != NULL && d->bd_bif->bif_ifp == ifp) {
   2092 			/*
   2093 			 * Detach the descriptor from an interface now.
   2094 			 * It will be free'ed later by close routine.
   2095 			 */
   2096 			d->bd_promisc = 0;	/* we can't touch device. */
   2097 			bpf_detachd(d);
   2098 			mutex_exit(d->bd_mtx);
   2099 			goto again_d;
   2100 		}
   2101 		mutex_exit(d->bd_mtx);
   2102 	}
   2103 
   2104   again:
   2105 	BPF_IFLIST_WRITER_FOREACH(bp) {
   2106 		if (bp->bif_ifp == ifp) {
   2107 			BPF_IFLIST_WRITER_REMOVE(bp);
   2108 
   2109 			pserialize_perform(bpf_psz);
   2110 			psref_target_destroy(&bp->bif_psref, bpf_psref_class);
   2111 
   2112 			BPF_IFLIST_ENTRY_DESTROY(bp);
   2113 			if (bp->bif_si != NULL) {
   2114 				/* XXX NOMPSAFE: assumed running on one CPU */
   2115 				s = splnet();
   2116 				while (bp->bif_mbuf_head != NULL) {
   2117 					struct mbuf *m = bp->bif_mbuf_head;
   2118 					bp->bif_mbuf_head = m->m_nextpkt;
   2119 					m_freem(m);
   2120 				}
   2121 				splx(s);
   2122 				softint_disestablish(bp->bif_si);
   2123 			}
   2124 			kmem_free(bp, sizeof(*bp));
   2125 			goto again;
   2126 		}
   2127 	}
   2128 	mutex_exit(&bpf_mtx);
   2129 }
   2130 
   2131 /*
   2132  * Change the data link type of a interface.
   2133  */
   2134 static void
   2135 _bpf_change_type(struct ifnet *ifp, u_int dlt, u_int hdrlen)
   2136 {
   2137 	struct bpf_if *bp;
   2138 
   2139 	mutex_enter(&bpf_mtx);
   2140 	BPF_IFLIST_WRITER_FOREACH(bp) {
   2141 		if (bp->bif_driverp == &ifp->if_bpf)
   2142 			break;
   2143 	}
   2144 	if (bp == NULL)
   2145 		panic("bpf_change_type");
   2146 
   2147 	bp->bif_dlt = dlt;
   2148 
   2149 	bp->bif_hdrlen = hdrlen;
   2150 	mutex_exit(&bpf_mtx);
   2151 }
   2152 
   2153 /*
   2154  * Get a list of available data link type of the interface.
   2155  */
   2156 static int
   2157 bpf_getdltlist(struct bpf_d *d, struct bpf_dltlist *bfl)
   2158 {
   2159 	int n, error;
   2160 	struct ifnet *ifp;
   2161 	struct bpf_if *bp;
   2162 	int s, bound;
   2163 
   2164 	KASSERT(mutex_owned(d->bd_mtx));
   2165 
   2166 	ifp = d->bd_bif->bif_ifp;
   2167 	n = 0;
   2168 	error = 0;
   2169 
   2170 	bound = curlwp_bind();
   2171 	s = pserialize_read_enter();
   2172 	BPF_IFLIST_READER_FOREACH(bp) {
   2173 		if (bp->bif_ifp != ifp)
   2174 			continue;
   2175 		if (bfl->bfl_list != NULL) {
   2176 			struct psref psref;
   2177 
   2178 			if (n >= bfl->bfl_len) {
   2179 				pserialize_read_exit(s);
   2180 				return ENOMEM;
   2181 			}
   2182 
   2183 			bpf_if_acquire(bp, &psref);
   2184 			pserialize_read_exit(s);
   2185 
   2186 			error = copyout(&bp->bif_dlt,
   2187 			    bfl->bfl_list + n, sizeof(u_int));
   2188 
   2189 			s = pserialize_read_enter();
   2190 			bpf_if_release(bp, &psref);
   2191 		}
   2192 		n++;
   2193 	}
   2194 	pserialize_read_exit(s);
   2195 	curlwp_bindx(bound);
   2196 
   2197 	bfl->bfl_len = n;
   2198 	return error;
   2199 }
   2200 
   2201 /*
   2202  * Set the data link type of a BPF instance.
   2203  */
   2204 static int
   2205 bpf_setdlt(struct bpf_d *d, u_int dlt)
   2206 {
   2207 	int error, opromisc;
   2208 	struct ifnet *ifp;
   2209 	struct bpf_if *bp;
   2210 
   2211 	KASSERT(mutex_owned(&bpf_mtx));
   2212 	KASSERT(mutex_owned(d->bd_mtx));
   2213 
   2214 	if (d->bd_bif->bif_dlt == dlt)
   2215 		return 0;
   2216 	ifp = d->bd_bif->bif_ifp;
   2217 	BPF_IFLIST_WRITER_FOREACH(bp) {
   2218 		if (bp->bif_ifp == ifp && bp->bif_dlt == dlt)
   2219 			break;
   2220 	}
   2221 	if (bp == NULL)
   2222 		return EINVAL;
   2223 	opromisc = d->bd_promisc;
   2224 	bpf_detachd(d);
   2225 	BPFIF_DLIST_ENTRY_INIT(d);
   2226 	bpf_attachd(d, bp);
   2227 	reset_d(d);
   2228 	if (opromisc) {
   2229 		KERNEL_LOCK_UNLESS_NET_MPSAFE();
   2230 		error = ifpromisc(bp->bif_ifp, 1);
   2231 		KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
   2232 		if (error)
   2233 			printf("%s: bpf_setdlt: ifpromisc failed (%d)\n",
   2234 			    bp->bif_ifp->if_xname, error);
   2235 		else
   2236 			d->bd_promisc = 1;
   2237 	}
   2238 	return 0;
   2239 }
   2240 
   2241 static int
   2242 sysctl_net_bpf_maxbufsize(SYSCTLFN_ARGS)
   2243 {
   2244 	int newsize, error;
   2245 	struct sysctlnode node;
   2246 
   2247 	node = *rnode;
   2248 	node.sysctl_data = &newsize;
   2249 	newsize = bpf_maxbufsize;
   2250 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   2251 	if (error || newp == NULL)
   2252 		return (error);
   2253 
   2254 	if (newsize < BPF_MINBUFSIZE || newsize > BPF_MAXBUFSIZE)
   2255 		return (EINVAL);
   2256 
   2257 	bpf_maxbufsize = newsize;
   2258 
   2259 	return (0);
   2260 }
   2261 
   2262 #if defined(MODULAR) || defined(BPFJIT)
   2263 static int
   2264 sysctl_net_bpf_jit(SYSCTLFN_ARGS)
   2265 {
   2266 	bool newval;
   2267 	int error;
   2268 	struct sysctlnode node;
   2269 
   2270 	node = *rnode;
   2271 	node.sysctl_data = &newval;
   2272 	newval = bpf_jit;
   2273 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   2274 	if (error != 0 || newp == NULL)
   2275 		return error;
   2276 
   2277 	bpf_jit = newval;
   2278 
   2279 	/*
   2280 	 * Do a full sync to publish new bpf_jit value and
   2281 	 * update bpfjit_module_ops.bj_generate_code variable.
   2282 	 */
   2283 	membar_sync();
   2284 
   2285 	if (newval && bpfjit_module_ops.bj_generate_code == NULL) {
   2286 		printf("JIT compilation is postponed "
   2287 		    "until after bpfjit module is loaded\n");
   2288 	}
   2289 
   2290 	return 0;
   2291 }
   2292 #endif
   2293 
   2294 static int
   2295 sysctl_net_bpf_peers(SYSCTLFN_ARGS)
   2296 {
   2297 	int    error, elem_count;
   2298 	struct bpf_d	 *dp;
   2299 	struct bpf_d_ext  dpe;
   2300 	size_t len, needed, elem_size, out_size;
   2301 	char   *sp;
   2302 
   2303 	if (namelen == 1 && name[0] == CTL_QUERY)
   2304 		return (sysctl_query(SYSCTLFN_CALL(rnode)));
   2305 
   2306 	if (namelen != 2)
   2307 		return (EINVAL);
   2308 
   2309 	/* BPF peers is privileged information. */
   2310 	error = kauth_authorize_network(l->l_cred, KAUTH_NETWORK_INTERFACE,
   2311 	    KAUTH_REQ_NETWORK_INTERFACE_GETPRIV, NULL, NULL, NULL);
   2312 	if (error)
   2313 		return (EPERM);
   2314 
   2315 	len = (oldp != NULL) ? *oldlenp : 0;
   2316 	sp = oldp;
   2317 	elem_size = name[0];
   2318 	elem_count = name[1];
   2319 	out_size = MIN(sizeof(dpe), elem_size);
   2320 	needed = 0;
   2321 
   2322 	if (elem_size < 1 || elem_count < 0)
   2323 		return (EINVAL);
   2324 
   2325 	mutex_enter(&bpf_mtx);
   2326 	BPF_DLIST_WRITER_FOREACH(dp) {
   2327 		if (len >= elem_size && elem_count > 0) {
   2328 #define BPF_EXT(field)	dpe.bde_ ## field = dp->bd_ ## field
   2329 			BPF_EXT(bufsize);
   2330 			BPF_EXT(promisc);
   2331 			BPF_EXT(state);
   2332 			BPF_EXT(immediate);
   2333 			BPF_EXT(hdrcmplt);
   2334 			BPF_EXT(seesent);
   2335 			BPF_EXT(pid);
   2336 			BPF_EXT(rcount);
   2337 			BPF_EXT(dcount);
   2338 			BPF_EXT(ccount);
   2339 #undef BPF_EXT
   2340 			mutex_enter(dp->bd_mtx);
   2341 			if (dp->bd_bif)
   2342 				(void)strlcpy(dpe.bde_ifname,
   2343 				    dp->bd_bif->bif_ifp->if_xname,
   2344 				    IFNAMSIZ - 1);
   2345 			else
   2346 				dpe.bde_ifname[0] = '\0';
   2347 			mutex_exit(dp->bd_mtx);
   2348 
   2349 			error = copyout(&dpe, sp, out_size);
   2350 			if (error)
   2351 				break;
   2352 			sp += elem_size;
   2353 			len -= elem_size;
   2354 		}
   2355 		needed += elem_size;
   2356 		if (elem_count > 0 && elem_count != INT_MAX)
   2357 			elem_count--;
   2358 	}
   2359 	mutex_exit(&bpf_mtx);
   2360 
   2361 	*oldlenp = needed;
   2362 
   2363 	return (error);
   2364 }
   2365 
   2366 static void
   2367 bpf_stats(void *p, void *arg, struct cpu_info *ci __unused)
   2368 {
   2369 	struct bpf_stat *const stats = p;
   2370 	struct bpf_stat *sum = arg;
   2371 
   2372 	sum->bs_recv += stats->bs_recv;
   2373 	sum->bs_drop += stats->bs_drop;
   2374 	sum->bs_capt += stats->bs_capt;
   2375 }
   2376 
   2377 static int
   2378 bpf_sysctl_gstats_handler(SYSCTLFN_ARGS)
   2379 {
   2380 	struct sysctlnode node;
   2381 	int error;
   2382 	struct bpf_stat sum;
   2383 
   2384 	memset(&sum, 0, sizeof(sum));
   2385 	node = *rnode;
   2386 
   2387 	percpu_foreach(bpf_gstats_percpu, bpf_stats, &sum);
   2388 
   2389 	node.sysctl_data = &sum;
   2390 	node.sysctl_size = sizeof(sum);
   2391 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   2392 	if (error != 0 || newp == NULL)
   2393 		return error;
   2394 
   2395 	return 0;
   2396 }
   2397 
   2398 static struct sysctllog *bpf_sysctllog;
   2399 static void
   2400 sysctl_net_bpf_setup(void)
   2401 {
   2402 	const struct sysctlnode *node;
   2403 
   2404 	node = NULL;
   2405 	sysctl_createv(&bpf_sysctllog, 0, NULL, &node,
   2406 		       CTLFLAG_PERMANENT,
   2407 		       CTLTYPE_NODE, "bpf",
   2408 		       SYSCTL_DESCR("BPF options"),
   2409 		       NULL, 0, NULL, 0,
   2410 		       CTL_NET, CTL_CREATE, CTL_EOL);
   2411 	if (node != NULL) {
   2412 #if defined(MODULAR) || defined(BPFJIT)
   2413 		sysctl_createv(&bpf_sysctllog, 0, NULL, NULL,
   2414 			CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
   2415 			CTLTYPE_BOOL, "jit",
   2416 			SYSCTL_DESCR("Toggle Just-In-Time compilation"),
   2417 			sysctl_net_bpf_jit, 0, &bpf_jit, 0,
   2418 			CTL_NET, node->sysctl_num, CTL_CREATE, CTL_EOL);
   2419 #endif
   2420 		sysctl_createv(&bpf_sysctllog, 0, NULL, NULL,
   2421 			CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
   2422 			CTLTYPE_INT, "maxbufsize",
   2423 			SYSCTL_DESCR("Maximum size for data capture buffer"),
   2424 			sysctl_net_bpf_maxbufsize, 0, &bpf_maxbufsize, 0,
   2425 			CTL_NET, node->sysctl_num, CTL_CREATE, CTL_EOL);
   2426 		sysctl_createv(&bpf_sysctllog, 0, NULL, NULL,
   2427 			CTLFLAG_PERMANENT,
   2428 			CTLTYPE_STRUCT, "stats",
   2429 			SYSCTL_DESCR("BPF stats"),
   2430 			bpf_sysctl_gstats_handler, 0, NULL, 0,
   2431 			CTL_NET, node->sysctl_num, CTL_CREATE, CTL_EOL);
   2432 		sysctl_createv(&bpf_sysctllog, 0, NULL, NULL,
   2433 			CTLFLAG_PERMANENT,
   2434 			CTLTYPE_STRUCT, "peers",
   2435 			SYSCTL_DESCR("BPF peers"),
   2436 			sysctl_net_bpf_peers, 0, NULL, 0,
   2437 			CTL_NET, node->sysctl_num, CTL_CREATE, CTL_EOL);
   2438 	}
   2439 
   2440 }
   2441 
   2442 struct bpf_ops bpf_ops_kernel = {
   2443 	.bpf_attach =		_bpfattach,
   2444 	.bpf_detach =		_bpfdetach,
   2445 	.bpf_change_type =	_bpf_change_type,
   2446 
   2447 	.bpf_tap =		_bpf_tap,
   2448 	.bpf_mtap =		_bpf_mtap,
   2449 	.bpf_mtap2 =		_bpf_mtap2,
   2450 	.bpf_mtap_af =		_bpf_mtap_af,
   2451 	.bpf_mtap_sl_in =	_bpf_mtap_sl_in,
   2452 	.bpf_mtap_sl_out =	_bpf_mtap_sl_out,
   2453 
   2454 	.bpf_mtap_softint =		_bpf_mtap_softint,
   2455 	.bpf_mtap_softint_init =	_bpf_mtap_softint_init,
   2456 };
   2457 
   2458 MODULE(MODULE_CLASS_DRIVER, bpf, "bpf_filter");
   2459 
   2460 static int
   2461 bpf_modcmd(modcmd_t cmd, void *arg)
   2462 {
   2463 #ifdef _MODULE
   2464 	devmajor_t bmajor, cmajor;
   2465 #endif
   2466 	int error = 0;
   2467 
   2468 	switch (cmd) {
   2469 	case MODULE_CMD_INIT:
   2470 		bpf_init();
   2471 #ifdef _MODULE
   2472 		bmajor = cmajor = NODEVMAJOR;
   2473 		error = devsw_attach("bpf", NULL, &bmajor,
   2474 		    &bpf_cdevsw, &cmajor);
   2475 		if (error)
   2476 			break;
   2477 #endif
   2478 
   2479 		bpf_ops_handover_enter(&bpf_ops_kernel);
   2480 		atomic_swap_ptr(&bpf_ops, &bpf_ops_kernel);
   2481 		bpf_ops_handover_exit();
   2482 		sysctl_net_bpf_setup();
   2483 		break;
   2484 
   2485 	case MODULE_CMD_FINI:
   2486 		/*
   2487 		 * While there is no reference counting for bpf callers,
   2488 		 * unload could at least in theory be done similarly to
   2489 		 * system call disestablishment.  This should even be
   2490 		 * a little simpler:
   2491 		 *
   2492 		 * 1) replace op vector with stubs
   2493 		 * 2) post update to all cpus with xc
   2494 		 * 3) check that nobody is in bpf anymore
   2495 		 *    (it's doubtful we'd want something like l_sysent,
   2496 		 *     but we could do something like *signed* percpu
   2497 		 *     counters.  if the sum is 0, we're good).
   2498 		 * 4) if fail, unroll changes
   2499 		 *
   2500 		 * NOTE: change won't be atomic to the outside.  some
   2501 		 * packets may be not captured even if unload is
   2502 		 * not succesful.  I think packet capture not working
   2503 		 * is a perfectly logical consequence of trying to
   2504 		 * disable packet capture.
   2505 		 */
   2506 		error = EOPNOTSUPP;
   2507 		/* insert sysctl teardown */
   2508 		break;
   2509 
   2510 	default:
   2511 		error = ENOTTY;
   2512 		break;
   2513 	}
   2514 
   2515 	return error;
   2516 }
   2517