bpf.c revision 1.199.2.1 1 /* $NetBSD: bpf.c,v 1.199.2.1 2016/07/17 05:05:10 pgoyette Exp $ */
2
3 /*
4 * Copyright (c) 1990, 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from the Stanford/CMU enet packet filter,
8 * (net/enet.c) distributed as part of 4.3BSD, and code contributed
9 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
10 * Berkeley Laboratory.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)bpf.c 8.4 (Berkeley) 1/9/95
37 * static char rcsid[] =
38 * "Header: bpf.c,v 1.67 96/09/26 22:00:52 leres Exp ";
39 */
40
41 #include <sys/cdefs.h>
42 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.199.2.1 2016/07/17 05:05:10 pgoyette Exp $");
43
44 #if defined(_KERNEL_OPT)
45 #include "opt_bpf.h"
46 #include "sl.h"
47 #include "strip.h"
48 #endif
49
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/mbuf.h>
53 #include <sys/buf.h>
54 #include <sys/time.h>
55 #include <sys/proc.h>
56 #include <sys/ioctl.h>
57 #include <sys/conf.h>
58 #include <sys/vnode.h>
59 #include <sys/queue.h>
60 #include <sys/stat.h>
61 #include <sys/module.h>
62 #include <sys/once.h>
63 #include <sys/atomic.h>
64 #include <sys/localcount.h>
65
66 #include <sys/file.h>
67 #include <sys/filedesc.h>
68 #include <sys/tty.h>
69 #include <sys/uio.h>
70
71 #include <sys/protosw.h>
72 #include <sys/socket.h>
73 #include <sys/errno.h>
74 #include <sys/kernel.h>
75 #include <sys/poll.h>
76 #include <sys/sysctl.h>
77 #include <sys/kauth.h>
78
79 #include <net/if.h>
80 #include <net/slip.h>
81
82 #include <net/bpf.h>
83 #include <net/bpfdesc.h>
84 #include <net/bpfjit.h>
85
86 #include <net/if_arc.h>
87 #include <net/if_ether.h>
88
89 #include <netinet/in.h>
90 #include <netinet/if_inarp.h>
91
92
93 #include <compat/sys/sockio.h>
94
95 #ifndef BPF_BUFSIZE
96 /*
97 * 4096 is too small for FDDI frames. 8192 is too small for gigabit Ethernet
98 * jumbos (circa 9k), ATM, or Intel gig/10gig ethernet jumbos (16k).
99 */
100 # define BPF_BUFSIZE 32768
101 #endif
102
103 #define PRINET 26 /* interruptible */
104
105 /*
106 * The default read buffer size, and limit for BIOCSBLEN, is sysctl'able.
107 * XXX the default values should be computed dynamically based
108 * on available memory size and available mbuf clusters.
109 */
110 int bpf_bufsize = BPF_BUFSIZE;
111 int bpf_maxbufsize = BPF_DFLTBUFSIZE; /* XXX set dynamically, see above */
112 bool bpf_jit = false;
113
114 struct bpfjit_ops bpfjit_module_ops = {
115 .bj_generate_code = NULL,
116 .bj_free_code = NULL
117 };
118
119 /*
120 * Global BPF statistics returned by net.bpf.stats sysctl.
121 */
122 struct bpf_stat bpf_gstats;
123
124 /*
125 * Use a mutex to avoid a race condition between gathering the stats/peers
126 * and opening/closing the device.
127 */
128 static kmutex_t bpf_mtx;
129
130 /*
131 * bpf_iflist is the list of interfaces; each corresponds to an ifnet
132 * bpf_dtab holds the descriptors, indexed by minor device #
133 */
134 struct bpf_if *bpf_iflist;
135 LIST_HEAD(, bpf_d) bpf_list;
136
137 static int bpf_allocbufs(struct bpf_d *);
138 static void bpf_deliver(struct bpf_if *,
139 void *(*cpfn)(void *, const void *, size_t),
140 void *, u_int, u_int, const bool);
141 static void bpf_freed(struct bpf_d *);
142 static void bpf_ifname(struct ifnet *, struct ifreq *);
143 static void *bpf_mcpy(void *, const void *, size_t);
144 static int bpf_movein(struct uio *, int, uint64_t,
145 struct mbuf **, struct sockaddr *);
146 static void bpf_attachd(struct bpf_d *, struct bpf_if *);
147 static void bpf_detachd(struct bpf_d *);
148 static int bpf_setif(struct bpf_d *, struct ifreq *);
149 static void bpf_timed_out(void *);
150 static inline void
151 bpf_wakeup(struct bpf_d *);
152 static int bpf_hdrlen(struct bpf_d *);
153 static void catchpacket(struct bpf_d *, u_char *, u_int, u_int,
154 void *(*)(void *, const void *, size_t), struct timespec *);
155 static void reset_d(struct bpf_d *);
156 static int bpf_getdltlist(struct bpf_d *, struct bpf_dltlist *);
157 static int bpf_setdlt(struct bpf_d *, u_int);
158
159 static int bpf_read(struct file *, off_t *, struct uio *, kauth_cred_t,
160 int);
161 static int bpf_write(struct file *, off_t *, struct uio *, kauth_cred_t,
162 int);
163 static int bpf_ioctl(struct file *, u_long, void *);
164 static int bpf_poll(struct file *, int);
165 static int bpf_stat(struct file *, struct stat *);
166 static int bpf_close(struct file *);
167 static int bpf_kqfilter(struct file *, struct knote *);
168 static void bpf_softintr(void *);
169
170 static const struct fileops bpf_fileops = {
171 .fo_read = bpf_read,
172 .fo_write = bpf_write,
173 .fo_ioctl = bpf_ioctl,
174 .fo_fcntl = fnullop_fcntl,
175 .fo_poll = bpf_poll,
176 .fo_stat = bpf_stat,
177 .fo_close = bpf_close,
178 .fo_kqfilter = bpf_kqfilter,
179 .fo_restart = fnullop_restart,
180 };
181
182 dev_type_open(bpfopen);
183
184 #ifdef _MODULE
185 struct localcount bpf_localcount;
186 #endif
187
188 const struct cdevsw bpf_cdevsw = {
189 .d_open = bpfopen,
190 .d_close = noclose,
191 .d_read = noread,
192 .d_write = nowrite,
193 .d_ioctl = noioctl,
194 .d_stop = nostop,
195 .d_tty = notty,
196 .d_poll = nopoll,
197 .d_mmap = nommap,
198 .d_kqfilter = nokqfilter,
199 .d_discard = nodiscard,
200 #ifdef _MODULE
201 .d_localcount = &bpf_localcount,
202 #endif
203 .d_flag = D_OTHER
204 };
205
206 bpfjit_func_t
207 bpf_jit_generate(bpf_ctx_t *bc, void *code, size_t size)
208 {
209
210 membar_consumer();
211 if (bpfjit_module_ops.bj_generate_code != NULL) {
212 return bpfjit_module_ops.bj_generate_code(bc, code, size);
213 }
214 return NULL;
215 }
216
217 void
218 bpf_jit_freecode(bpfjit_func_t jcode)
219 {
220 KASSERT(bpfjit_module_ops.bj_free_code != NULL);
221 bpfjit_module_ops.bj_free_code(jcode);
222 }
223
224 static int
225 bpf_movein(struct uio *uio, int linktype, uint64_t mtu, struct mbuf **mp,
226 struct sockaddr *sockp)
227 {
228 struct mbuf *m;
229 int error;
230 size_t len;
231 size_t hlen;
232 size_t align;
233
234 /*
235 * Build a sockaddr based on the data link layer type.
236 * We do this at this level because the ethernet header
237 * is copied directly into the data field of the sockaddr.
238 * In the case of SLIP, there is no header and the packet
239 * is forwarded as is.
240 * Also, we are careful to leave room at the front of the mbuf
241 * for the link level header.
242 */
243 switch (linktype) {
244
245 case DLT_SLIP:
246 sockp->sa_family = AF_INET;
247 hlen = 0;
248 align = 0;
249 break;
250
251 case DLT_PPP:
252 sockp->sa_family = AF_UNSPEC;
253 hlen = 0;
254 align = 0;
255 break;
256
257 case DLT_EN10MB:
258 sockp->sa_family = AF_UNSPEC;
259 /* XXX Would MAXLINKHDR be better? */
260 /* 6(dst)+6(src)+2(type) */
261 hlen = sizeof(struct ether_header);
262 align = 2;
263 break;
264
265 case DLT_ARCNET:
266 sockp->sa_family = AF_UNSPEC;
267 hlen = ARC_HDRLEN;
268 align = 5;
269 break;
270
271 case DLT_FDDI:
272 sockp->sa_family = AF_LINK;
273 /* XXX 4(FORMAC)+6(dst)+6(src) */
274 hlen = 16;
275 align = 0;
276 break;
277
278 case DLT_ECONET:
279 sockp->sa_family = AF_UNSPEC;
280 hlen = 6;
281 align = 2;
282 break;
283
284 case DLT_NULL:
285 sockp->sa_family = AF_UNSPEC;
286 hlen = 0;
287 align = 0;
288 break;
289
290 default:
291 return (EIO);
292 }
293
294 len = uio->uio_resid;
295 /*
296 * If there aren't enough bytes for a link level header or the
297 * packet length exceeds the interface mtu, return an error.
298 */
299 if (len - hlen > mtu)
300 return (EMSGSIZE);
301
302 /*
303 * XXX Avoid complicated buffer chaining ---
304 * bail if it won't fit in a single mbuf.
305 * (Take into account possible alignment bytes)
306 */
307 if (len + align > MCLBYTES)
308 return (EIO);
309
310 m = m_gethdr(M_WAIT, MT_DATA);
311 m_reset_rcvif(m);
312 m->m_pkthdr.len = (int)(len - hlen);
313 if (len + align > MHLEN) {
314 m_clget(m, M_WAIT);
315 if ((m->m_flags & M_EXT) == 0) {
316 error = ENOBUFS;
317 goto bad;
318 }
319 }
320
321 /* Insure the data is properly aligned */
322 if (align > 0) {
323 m->m_data += align;
324 m->m_len -= (int)align;
325 }
326
327 error = uiomove(mtod(m, void *), len, uio);
328 if (error)
329 goto bad;
330 if (hlen != 0) {
331 memcpy(sockp->sa_data, mtod(m, void *), hlen);
332 m->m_data += hlen; /* XXX */
333 len -= hlen;
334 }
335 m->m_len = (int)len;
336 *mp = m;
337 return (0);
338
339 bad:
340 m_freem(m);
341 return (error);
342 }
343
344 /*
345 * Attach file to the bpf interface, i.e. make d listen on bp.
346 * Must be called at splnet.
347 */
348 static void
349 bpf_attachd(struct bpf_d *d, struct bpf_if *bp)
350 {
351 KASSERT(mutex_owned(&bpf_mtx));
352 /*
353 * Point d at bp, and add d to the interface's list of listeners.
354 * Finally, point the driver's bpf cookie at the interface so
355 * it will divert packets to bpf.
356 */
357 d->bd_bif = bp;
358 d->bd_next = bp->bif_dlist;
359 bp->bif_dlist = d;
360
361 *bp->bif_driverp = bp;
362 }
363
364 /*
365 * Detach a file from its interface.
366 */
367 static void
368 bpf_detachd(struct bpf_d *d)
369 {
370 struct bpf_d **p;
371 struct bpf_if *bp;
372
373 KASSERT(mutex_owned(&bpf_mtx));
374
375 bp = d->bd_bif;
376 /*
377 * Check if this descriptor had requested promiscuous mode.
378 * If so, turn it off.
379 */
380 if (d->bd_promisc) {
381 int error __diagused;
382
383 d->bd_promisc = 0;
384 /*
385 * Take device out of promiscuous mode. Since we were
386 * able to enter promiscuous mode, we should be able
387 * to turn it off. But we can get an error if
388 * the interface was configured down, so only panic
389 * if we don't get an unexpected error.
390 */
391 error = ifpromisc(bp->bif_ifp, 0);
392 #ifdef DIAGNOSTIC
393 if (error)
394 printf("%s: ifpromisc failed: %d", __func__, error);
395 #endif
396 }
397 /* Remove d from the interface's descriptor list. */
398 p = &bp->bif_dlist;
399 while (*p != d) {
400 p = &(*p)->bd_next;
401 if (*p == NULL)
402 panic("%s: descriptor not in list", __func__);
403 }
404 *p = (*p)->bd_next;
405 if (bp->bif_dlist == NULL)
406 /*
407 * Let the driver know that there are no more listeners.
408 */
409 *d->bd_bif->bif_driverp = NULL;
410 d->bd_bif = NULL;
411 }
412
413 static int
414 doinit(void)
415 {
416
417 mutex_init(&bpf_mtx, MUTEX_DEFAULT, IPL_NONE);
418
419 LIST_INIT(&bpf_list);
420
421 bpf_gstats.bs_recv = 0;
422 bpf_gstats.bs_drop = 0;
423 bpf_gstats.bs_capt = 0;
424
425 return 0;
426 }
427
428 /*
429 * bpfilterattach() is called at boot time.
430 */
431 /* ARGSUSED */
432 void
433 bpfilterattach(int n)
434 {
435 static ONCE_DECL(control);
436
437 RUN_ONCE(&control, doinit);
438 }
439
440 /*
441 * Open ethernet device. Clones.
442 */
443 /* ARGSUSED */
444 int
445 bpfopen(dev_t dev, int flag, int mode, struct lwp *l)
446 {
447 struct bpf_d *d;
448 struct file *fp;
449 int error, fd;
450
451 /* falloc() will fill in the descriptor for us. */
452 if ((error = fd_allocfile(&fp, &fd)) != 0)
453 return error;
454
455 d = malloc(sizeof(*d), M_DEVBUF, M_WAITOK|M_ZERO);
456 d->bd_bufsize = bpf_bufsize;
457 d->bd_seesent = 1;
458 d->bd_feedback = 0;
459 d->bd_pid = l->l_proc->p_pid;
460 #ifdef _LP64
461 if (curproc->p_flag & PK_32)
462 d->bd_compat32 = 1;
463 #endif
464 getnanotime(&d->bd_btime);
465 d->bd_atime = d->bd_mtime = d->bd_btime;
466 callout_init(&d->bd_callout, 0);
467 selinit(&d->bd_sel);
468 d->bd_sih = softint_establish(SOFTINT_CLOCK, bpf_softintr, d);
469 d->bd_jitcode = NULL;
470
471 mutex_enter(&bpf_mtx);
472 LIST_INSERT_HEAD(&bpf_list, d, bd_list);
473 mutex_exit(&bpf_mtx);
474
475 return fd_clone(fp, fd, flag, &bpf_fileops, d);
476 }
477
478 /*
479 * Close the descriptor by detaching it from its interface,
480 * deallocating its buffers, and marking it free.
481 */
482 /* ARGSUSED */
483 static int
484 bpf_close(struct file *fp)
485 {
486 struct bpf_d *d;
487 int s;
488
489 KERNEL_LOCK(1, NULL);
490 mutex_enter(&bpf_mtx);
491
492 if ((d = fp->f_bpf) == NULL) {
493 mutex_exit(&bpf_mtx);
494 KERNEL_UNLOCK_ONE(NULL);
495 return 0;
496 }
497
498 /*
499 * Refresh the PID associated with this bpf file.
500 */
501 d->bd_pid = curproc->p_pid;
502
503 s = splnet();
504 if (d->bd_state == BPF_WAITING)
505 callout_stop(&d->bd_callout);
506 d->bd_state = BPF_IDLE;
507 if (d->bd_bif)
508 bpf_detachd(d);
509 splx(s);
510 bpf_freed(d);
511 LIST_REMOVE(d, bd_list);
512 fp->f_bpf = NULL;
513
514 mutex_exit(&bpf_mtx);
515 KERNEL_UNLOCK_ONE(NULL);
516
517 callout_destroy(&d->bd_callout);
518 seldestroy(&d->bd_sel);
519 softint_disestablish(d->bd_sih);
520 free(d, M_DEVBUF);
521
522 return (0);
523 }
524
525 /*
526 * Rotate the packet buffers in descriptor d. Move the store buffer
527 * into the hold slot, and the free buffer into the store slot.
528 * Zero the length of the new store buffer.
529 */
530 #define ROTATE_BUFFERS(d) \
531 (d)->bd_hbuf = (d)->bd_sbuf; \
532 (d)->bd_hlen = (d)->bd_slen; \
533 (d)->bd_sbuf = (d)->bd_fbuf; \
534 (d)->bd_slen = 0; \
535 (d)->bd_fbuf = NULL;
536 /*
537 * bpfread - read next chunk of packets from buffers
538 */
539 static int
540 bpf_read(struct file *fp, off_t *offp, struct uio *uio,
541 kauth_cred_t cred, int flags)
542 {
543 struct bpf_d *d = fp->f_bpf;
544 int timed_out;
545 int error;
546 int s;
547
548 getnanotime(&d->bd_atime);
549 /*
550 * Restrict application to use a buffer the same size as
551 * the kernel buffers.
552 */
553 if (uio->uio_resid != d->bd_bufsize)
554 return (EINVAL);
555
556 KERNEL_LOCK(1, NULL);
557 s = splnet();
558 if (d->bd_state == BPF_WAITING)
559 callout_stop(&d->bd_callout);
560 timed_out = (d->bd_state == BPF_TIMED_OUT);
561 d->bd_state = BPF_IDLE;
562 /*
563 * If the hold buffer is empty, then do a timed sleep, which
564 * ends when the timeout expires or when enough packets
565 * have arrived to fill the store buffer.
566 */
567 while (d->bd_hbuf == NULL) {
568 if (fp->f_flag & FNONBLOCK) {
569 if (d->bd_slen == 0) {
570 splx(s);
571 KERNEL_UNLOCK_ONE(NULL);
572 return (EWOULDBLOCK);
573 }
574 ROTATE_BUFFERS(d);
575 break;
576 }
577
578 if ((d->bd_immediate || timed_out) && d->bd_slen != 0) {
579 /*
580 * A packet(s) either arrived since the previous
581 * read or arrived while we were asleep.
582 * Rotate the buffers and return what's here.
583 */
584 ROTATE_BUFFERS(d);
585 break;
586 }
587 error = tsleep(d, PRINET|PCATCH, "bpf",
588 d->bd_rtout);
589 if (error == EINTR || error == ERESTART) {
590 splx(s);
591 KERNEL_UNLOCK_ONE(NULL);
592 return (error);
593 }
594 if (error == EWOULDBLOCK) {
595 /*
596 * On a timeout, return what's in the buffer,
597 * which may be nothing. If there is something
598 * in the store buffer, we can rotate the buffers.
599 */
600 if (d->bd_hbuf)
601 /*
602 * We filled up the buffer in between
603 * getting the timeout and arriving
604 * here, so we don't need to rotate.
605 */
606 break;
607
608 if (d->bd_slen == 0) {
609 splx(s);
610 KERNEL_UNLOCK_ONE(NULL);
611 return (0);
612 }
613 ROTATE_BUFFERS(d);
614 break;
615 }
616 if (error != 0)
617 goto done;
618 }
619 /*
620 * At this point, we know we have something in the hold slot.
621 */
622 splx(s);
623
624 /*
625 * Move data from hold buffer into user space.
626 * We know the entire buffer is transferred since
627 * we checked above that the read buffer is bpf_bufsize bytes.
628 */
629 error = uiomove(d->bd_hbuf, d->bd_hlen, uio);
630
631 s = splnet();
632 d->bd_fbuf = d->bd_hbuf;
633 d->bd_hbuf = NULL;
634 d->bd_hlen = 0;
635 done:
636 splx(s);
637 KERNEL_UNLOCK_ONE(NULL);
638 return (error);
639 }
640
641
642 /*
643 * If there are processes sleeping on this descriptor, wake them up.
644 */
645 static inline void
646 bpf_wakeup(struct bpf_d *d)
647 {
648 wakeup(d);
649 if (d->bd_async)
650 softint_schedule(d->bd_sih);
651 selnotify(&d->bd_sel, 0, 0);
652 }
653
654 static void
655 bpf_softintr(void *cookie)
656 {
657 struct bpf_d *d;
658
659 d = cookie;
660 if (d->bd_async)
661 fownsignal(d->bd_pgid, SIGIO, 0, 0, NULL);
662 }
663
664 static void
665 bpf_timed_out(void *arg)
666 {
667 struct bpf_d *d = arg;
668 int s;
669
670 s = splnet();
671 if (d->bd_state == BPF_WAITING) {
672 d->bd_state = BPF_TIMED_OUT;
673 if (d->bd_slen != 0)
674 bpf_wakeup(d);
675 }
676 splx(s);
677 }
678
679
680 static int
681 bpf_write(struct file *fp, off_t *offp, struct uio *uio,
682 kauth_cred_t cred, int flags)
683 {
684 struct bpf_d *d = fp->f_bpf;
685 struct ifnet *ifp;
686 struct mbuf *m, *mc;
687 int error, s;
688 static struct sockaddr_storage dst;
689
690 m = NULL; /* XXX gcc */
691
692 KERNEL_LOCK(1, NULL);
693
694 if (d->bd_bif == NULL) {
695 KERNEL_UNLOCK_ONE(NULL);
696 return (ENXIO);
697 }
698 getnanotime(&d->bd_mtime);
699
700 ifp = d->bd_bif->bif_ifp;
701
702 if (uio->uio_resid == 0) {
703 KERNEL_UNLOCK_ONE(NULL);
704 return (0);
705 }
706
707 error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, ifp->if_mtu, &m,
708 (struct sockaddr *) &dst);
709 if (error) {
710 KERNEL_UNLOCK_ONE(NULL);
711 return (error);
712 }
713
714 if (m->m_pkthdr.len > ifp->if_mtu) {
715 KERNEL_UNLOCK_ONE(NULL);
716 m_freem(m);
717 return (EMSGSIZE);
718 }
719
720 if (d->bd_hdrcmplt)
721 dst.ss_family = pseudo_AF_HDRCMPLT;
722
723 if (d->bd_feedback) {
724 mc = m_dup(m, 0, M_COPYALL, M_NOWAIT);
725 if (mc != NULL)
726 m_set_rcvif(mc, ifp);
727 /* Set M_PROMISC for outgoing packets to be discarded. */
728 if (1 /*d->bd_direction == BPF_D_INOUT*/)
729 m->m_flags |= M_PROMISC;
730 } else
731 mc = NULL;
732
733 s = splsoftnet();
734 error = if_output_lock(ifp, ifp, m, (struct sockaddr *) &dst, NULL);
735
736 if (mc != NULL) {
737 if (error == 0)
738 ifp->_if_input(ifp, mc);
739 else
740 m_freem(mc);
741 }
742 splx(s);
743 KERNEL_UNLOCK_ONE(NULL);
744 /*
745 * The driver frees the mbuf.
746 */
747 return (error);
748 }
749
750 /*
751 * Reset a descriptor by flushing its packet buffer and clearing the
752 * receive and drop counts. Should be called at splnet.
753 */
754 static void
755 reset_d(struct bpf_d *d)
756 {
757 if (d->bd_hbuf) {
758 /* Free the hold buffer. */
759 d->bd_fbuf = d->bd_hbuf;
760 d->bd_hbuf = NULL;
761 }
762 d->bd_slen = 0;
763 d->bd_hlen = 0;
764 d->bd_rcount = 0;
765 d->bd_dcount = 0;
766 d->bd_ccount = 0;
767 }
768
769 /*
770 * FIONREAD Check for read packet available.
771 * BIOCGBLEN Get buffer len [for read()].
772 * BIOCSETF Set ethernet read filter.
773 * BIOCFLUSH Flush read packet buffer.
774 * BIOCPROMISC Put interface into promiscuous mode.
775 * BIOCGDLT Get link layer type.
776 * BIOCGETIF Get interface name.
777 * BIOCSETIF Set interface.
778 * BIOCSRTIMEOUT Set read timeout.
779 * BIOCGRTIMEOUT Get read timeout.
780 * BIOCGSTATS Get packet stats.
781 * BIOCIMMEDIATE Set immediate mode.
782 * BIOCVERSION Get filter language version.
783 * BIOCGHDRCMPLT Get "header already complete" flag.
784 * BIOCSHDRCMPLT Set "header already complete" flag.
785 * BIOCSFEEDBACK Set packet feedback mode.
786 * BIOCGFEEDBACK Get packet feedback mode.
787 * BIOCGSEESENT Get "see sent packets" mode.
788 * BIOCSSEESENT Set "see sent packets" mode.
789 */
790 /* ARGSUSED */
791 static int
792 bpf_ioctl(struct file *fp, u_long cmd, void *addr)
793 {
794 struct bpf_d *d = fp->f_bpf;
795 int s, error = 0;
796
797 /*
798 * Refresh the PID associated with this bpf file.
799 */
800 KERNEL_LOCK(1, NULL);
801 d->bd_pid = curproc->p_pid;
802 #ifdef _LP64
803 if (curproc->p_flag & PK_32)
804 d->bd_compat32 = 1;
805 else
806 d->bd_compat32 = 0;
807 #endif
808
809 s = splnet();
810 if (d->bd_state == BPF_WAITING)
811 callout_stop(&d->bd_callout);
812 d->bd_state = BPF_IDLE;
813 splx(s);
814
815 switch (cmd) {
816
817 default:
818 error = EINVAL;
819 break;
820
821 /*
822 * Check for read packet available.
823 */
824 case FIONREAD:
825 {
826 int n;
827
828 s = splnet();
829 n = d->bd_slen;
830 if (d->bd_hbuf)
831 n += d->bd_hlen;
832 splx(s);
833
834 *(int *)addr = n;
835 break;
836 }
837
838 /*
839 * Get buffer len [for read()].
840 */
841 case BIOCGBLEN:
842 *(u_int *)addr = d->bd_bufsize;
843 break;
844
845 /*
846 * Set buffer length.
847 */
848 case BIOCSBLEN:
849 if (d->bd_bif != NULL)
850 error = EINVAL;
851 else {
852 u_int size = *(u_int *)addr;
853
854 if (size > bpf_maxbufsize)
855 *(u_int *)addr = size = bpf_maxbufsize;
856 else if (size < BPF_MINBUFSIZE)
857 *(u_int *)addr = size = BPF_MINBUFSIZE;
858 d->bd_bufsize = size;
859 }
860 break;
861
862 /*
863 * Set link layer read filter.
864 */
865 case BIOCSETF:
866 error = bpf_setf(d, addr);
867 break;
868
869 /*
870 * Flush read packet buffer.
871 */
872 case BIOCFLUSH:
873 s = splnet();
874 reset_d(d);
875 splx(s);
876 break;
877
878 /*
879 * Put interface into promiscuous mode.
880 */
881 case BIOCPROMISC:
882 if (d->bd_bif == NULL) {
883 /*
884 * No interface attached yet.
885 */
886 error = EINVAL;
887 break;
888 }
889 s = splnet();
890 if (d->bd_promisc == 0) {
891 error = ifpromisc(d->bd_bif->bif_ifp, 1);
892 if (error == 0)
893 d->bd_promisc = 1;
894 }
895 splx(s);
896 break;
897
898 /*
899 * Get device parameters.
900 */
901 case BIOCGDLT:
902 if (d->bd_bif == NULL)
903 error = EINVAL;
904 else
905 *(u_int *)addr = d->bd_bif->bif_dlt;
906 break;
907
908 /*
909 * Get a list of supported device parameters.
910 */
911 case BIOCGDLTLIST:
912 if (d->bd_bif == NULL)
913 error = EINVAL;
914 else
915 error = bpf_getdltlist(d, addr);
916 break;
917
918 /*
919 * Set device parameters.
920 */
921 case BIOCSDLT:
922 mutex_enter(&bpf_mtx);
923 if (d->bd_bif == NULL)
924 error = EINVAL;
925 else
926 error = bpf_setdlt(d, *(u_int *)addr);
927 mutex_exit(&bpf_mtx);
928 break;
929
930 /*
931 * Set interface name.
932 */
933 #ifdef OBIOCGETIF
934 case OBIOCGETIF:
935 #endif
936 case BIOCGETIF:
937 if (d->bd_bif == NULL)
938 error = EINVAL;
939 else
940 bpf_ifname(d->bd_bif->bif_ifp, addr);
941 break;
942
943 /*
944 * Set interface.
945 */
946 #ifdef OBIOCSETIF
947 case OBIOCSETIF:
948 #endif
949 case BIOCSETIF:
950 mutex_enter(&bpf_mtx);
951 error = bpf_setif(d, addr);
952 mutex_exit(&bpf_mtx);
953 break;
954
955 /*
956 * Set read timeout.
957 */
958 case BIOCSRTIMEOUT:
959 {
960 struct timeval *tv = addr;
961
962 /* Compute number of ticks. */
963 d->bd_rtout = tv->tv_sec * hz + tv->tv_usec / tick;
964 if ((d->bd_rtout == 0) && (tv->tv_usec != 0))
965 d->bd_rtout = 1;
966 break;
967 }
968
969 #ifdef BIOCGORTIMEOUT
970 /*
971 * Get read timeout.
972 */
973 case BIOCGORTIMEOUT:
974 {
975 struct timeval50 *tv = addr;
976
977 tv->tv_sec = d->bd_rtout / hz;
978 tv->tv_usec = (d->bd_rtout % hz) * tick;
979 break;
980 }
981 #endif
982
983 #ifdef BIOCSORTIMEOUT
984 /*
985 * Set read timeout.
986 */
987 case BIOCSORTIMEOUT:
988 {
989 struct timeval50 *tv = addr;
990
991 /* Compute number of ticks. */
992 d->bd_rtout = tv->tv_sec * hz + tv->tv_usec / tick;
993 if ((d->bd_rtout == 0) && (tv->tv_usec != 0))
994 d->bd_rtout = 1;
995 break;
996 }
997 #endif
998
999 /*
1000 * Get read timeout.
1001 */
1002 case BIOCGRTIMEOUT:
1003 {
1004 struct timeval *tv = addr;
1005
1006 tv->tv_sec = d->bd_rtout / hz;
1007 tv->tv_usec = (d->bd_rtout % hz) * tick;
1008 break;
1009 }
1010 /*
1011 * Get packet stats.
1012 */
1013 case BIOCGSTATS:
1014 {
1015 struct bpf_stat *bs = addr;
1016
1017 bs->bs_recv = d->bd_rcount;
1018 bs->bs_drop = d->bd_dcount;
1019 bs->bs_capt = d->bd_ccount;
1020 break;
1021 }
1022
1023 case BIOCGSTATSOLD:
1024 {
1025 struct bpf_stat_old *bs = addr;
1026
1027 bs->bs_recv = d->bd_rcount;
1028 bs->bs_drop = d->bd_dcount;
1029 break;
1030 }
1031
1032 /*
1033 * Set immediate mode.
1034 */
1035 case BIOCIMMEDIATE:
1036 d->bd_immediate = *(u_int *)addr;
1037 break;
1038
1039 case BIOCVERSION:
1040 {
1041 struct bpf_version *bv = addr;
1042
1043 bv->bv_major = BPF_MAJOR_VERSION;
1044 bv->bv_minor = BPF_MINOR_VERSION;
1045 break;
1046 }
1047
1048 case BIOCGHDRCMPLT: /* get "header already complete" flag */
1049 *(u_int *)addr = d->bd_hdrcmplt;
1050 break;
1051
1052 case BIOCSHDRCMPLT: /* set "header already complete" flag */
1053 d->bd_hdrcmplt = *(u_int *)addr ? 1 : 0;
1054 break;
1055
1056 /*
1057 * Get "see sent packets" flag
1058 */
1059 case BIOCGSEESENT:
1060 *(u_int *)addr = d->bd_seesent;
1061 break;
1062
1063 /*
1064 * Set "see sent" packets flag
1065 */
1066 case BIOCSSEESENT:
1067 d->bd_seesent = *(u_int *)addr;
1068 break;
1069
1070 /*
1071 * Set "feed packets from bpf back to input" mode
1072 */
1073 case BIOCSFEEDBACK:
1074 d->bd_feedback = *(u_int *)addr;
1075 break;
1076
1077 /*
1078 * Get "feed packets from bpf back to input" mode
1079 */
1080 case BIOCGFEEDBACK:
1081 *(u_int *)addr = d->bd_feedback;
1082 break;
1083
1084 case FIONBIO: /* Non-blocking I/O */
1085 /*
1086 * No need to do anything special as we use IO_NDELAY in
1087 * bpfread() as an indication of whether or not to block
1088 * the read.
1089 */
1090 break;
1091
1092 case FIOASYNC: /* Send signal on receive packets */
1093 d->bd_async = *(int *)addr;
1094 break;
1095
1096 case TIOCSPGRP: /* Process or group to send signals to */
1097 case FIOSETOWN:
1098 error = fsetown(&d->bd_pgid, cmd, addr);
1099 break;
1100
1101 case TIOCGPGRP:
1102 case FIOGETOWN:
1103 error = fgetown(d->bd_pgid, cmd, addr);
1104 break;
1105 }
1106 KERNEL_UNLOCK_ONE(NULL);
1107 return (error);
1108 }
1109
1110 /*
1111 * Set d's packet filter program to fp. If this file already has a filter,
1112 * free it and replace it. Returns EINVAL for bogus requests.
1113 */
1114 int
1115 bpf_setf(struct bpf_d *d, struct bpf_program *fp)
1116 {
1117 struct bpf_insn *fcode, *old;
1118 bpfjit_func_t jcode, oldj;
1119 size_t flen, size;
1120 int s;
1121
1122 jcode = NULL;
1123 flen = fp->bf_len;
1124
1125 if ((fp->bf_insns == NULL && flen) || flen > BPF_MAXINSNS) {
1126 return EINVAL;
1127 }
1128
1129 if (flen) {
1130 /*
1131 * Allocate the buffer, copy the byte-code from
1132 * userspace and validate it.
1133 */
1134 size = flen * sizeof(*fp->bf_insns);
1135 fcode = malloc(size, M_DEVBUF, M_WAITOK);
1136 if (copyin(fp->bf_insns, fcode, size) != 0 ||
1137 !bpf_validate(fcode, (int)flen)) {
1138 free(fcode, M_DEVBUF);
1139 return EINVAL;
1140 }
1141 membar_consumer();
1142 if (bpf_jit)
1143 jcode = bpf_jit_generate(NULL, fcode, flen);
1144 } else {
1145 fcode = NULL;
1146 }
1147
1148 s = splnet();
1149 old = d->bd_filter;
1150 d->bd_filter = fcode;
1151 oldj = d->bd_jitcode;
1152 d->bd_jitcode = jcode;
1153 reset_d(d);
1154 splx(s);
1155
1156 if (old) {
1157 free(old, M_DEVBUF);
1158 }
1159 if (oldj) {
1160 bpf_jit_freecode(oldj);
1161 }
1162
1163 return 0;
1164 }
1165
1166 /*
1167 * Detach a file from its current interface (if attached at all) and attach
1168 * to the interface indicated by the name stored in ifr.
1169 * Return an errno or 0.
1170 */
1171 static int
1172 bpf_setif(struct bpf_d *d, struct ifreq *ifr)
1173 {
1174 struct bpf_if *bp;
1175 char *cp;
1176 int unit_seen, i, s, error;
1177
1178 KASSERT(mutex_owned(&bpf_mtx));
1179 /*
1180 * Make sure the provided name has a unit number, and default
1181 * it to '0' if not specified.
1182 * XXX This is ugly ... do this differently?
1183 */
1184 unit_seen = 0;
1185 cp = ifr->ifr_name;
1186 cp[sizeof(ifr->ifr_name) - 1] = '\0'; /* sanity */
1187 while (*cp++)
1188 if (*cp >= '0' && *cp <= '9')
1189 unit_seen = 1;
1190 if (!unit_seen) {
1191 /* Make sure to leave room for the '\0'. */
1192 for (i = 0; i < (IFNAMSIZ - 1); ++i) {
1193 if ((ifr->ifr_name[i] >= 'a' &&
1194 ifr->ifr_name[i] <= 'z') ||
1195 (ifr->ifr_name[i] >= 'A' &&
1196 ifr->ifr_name[i] <= 'Z'))
1197 continue;
1198 ifr->ifr_name[i] = '0';
1199 }
1200 }
1201
1202 /*
1203 * Look through attached interfaces for the named one.
1204 */
1205 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) {
1206 struct ifnet *ifp = bp->bif_ifp;
1207
1208 if (ifp == NULL ||
1209 strcmp(ifp->if_xname, ifr->ifr_name) != 0)
1210 continue;
1211 /* skip additional entry */
1212 if (bp->bif_driverp != &ifp->if_bpf)
1213 continue;
1214 /*
1215 * We found the requested interface.
1216 * Allocate the packet buffers if we need to.
1217 * If we're already attached to requested interface,
1218 * just flush the buffer.
1219 */
1220 if (d->bd_sbuf == NULL) {
1221 error = bpf_allocbufs(d);
1222 if (error != 0)
1223 return (error);
1224 }
1225 s = splnet();
1226 if (bp != d->bd_bif) {
1227 if (d->bd_bif)
1228 /*
1229 * Detach if attached to something else.
1230 */
1231 bpf_detachd(d);
1232
1233 bpf_attachd(d, bp);
1234 }
1235 reset_d(d);
1236 splx(s);
1237 return (0);
1238 }
1239 /* Not found. */
1240 return (ENXIO);
1241 }
1242
1243 /*
1244 * Copy the interface name to the ifreq.
1245 */
1246 static void
1247 bpf_ifname(struct ifnet *ifp, struct ifreq *ifr)
1248 {
1249 memcpy(ifr->ifr_name, ifp->if_xname, IFNAMSIZ);
1250 }
1251
1252 static int
1253 bpf_stat(struct file *fp, struct stat *st)
1254 {
1255 struct bpf_d *d = fp->f_bpf;
1256
1257 (void)memset(st, 0, sizeof(*st));
1258 KERNEL_LOCK(1, NULL);
1259 st->st_dev = makedev(cdevsw_lookup_major(&bpf_cdevsw), d->bd_pid);
1260 st->st_atimespec = d->bd_atime;
1261 st->st_mtimespec = d->bd_mtime;
1262 st->st_ctimespec = st->st_birthtimespec = d->bd_btime;
1263 st->st_uid = kauth_cred_geteuid(fp->f_cred);
1264 st->st_gid = kauth_cred_getegid(fp->f_cred);
1265 st->st_mode = S_IFCHR;
1266 KERNEL_UNLOCK_ONE(NULL);
1267 return 0;
1268 }
1269
1270 /*
1271 * Support for poll() system call
1272 *
1273 * Return true iff the specific operation will not block indefinitely - with
1274 * the assumption that it is safe to positively acknowledge a request for the
1275 * ability to write to the BPF device.
1276 * Otherwise, return false but make a note that a selnotify() must be done.
1277 */
1278 static int
1279 bpf_poll(struct file *fp, int events)
1280 {
1281 struct bpf_d *d = fp->f_bpf;
1282 int s = splnet();
1283 int revents;
1284
1285 /*
1286 * Refresh the PID associated with this bpf file.
1287 */
1288 KERNEL_LOCK(1, NULL);
1289 d->bd_pid = curproc->p_pid;
1290
1291 revents = events & (POLLOUT | POLLWRNORM);
1292 if (events & (POLLIN | POLLRDNORM)) {
1293 /*
1294 * An imitation of the FIONREAD ioctl code.
1295 */
1296 if (d->bd_hlen != 0 ||
1297 ((d->bd_immediate || d->bd_state == BPF_TIMED_OUT) &&
1298 d->bd_slen != 0)) {
1299 revents |= events & (POLLIN | POLLRDNORM);
1300 } else {
1301 selrecord(curlwp, &d->bd_sel);
1302 /* Start the read timeout if necessary */
1303 if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
1304 callout_reset(&d->bd_callout, d->bd_rtout,
1305 bpf_timed_out, d);
1306 d->bd_state = BPF_WAITING;
1307 }
1308 }
1309 }
1310
1311 KERNEL_UNLOCK_ONE(NULL);
1312 splx(s);
1313 return (revents);
1314 }
1315
1316 static void
1317 filt_bpfrdetach(struct knote *kn)
1318 {
1319 struct bpf_d *d = kn->kn_hook;
1320 int s;
1321
1322 KERNEL_LOCK(1, NULL);
1323 s = splnet();
1324 SLIST_REMOVE(&d->bd_sel.sel_klist, kn, knote, kn_selnext);
1325 splx(s);
1326 KERNEL_UNLOCK_ONE(NULL);
1327 }
1328
1329 static int
1330 filt_bpfread(struct knote *kn, long hint)
1331 {
1332 struct bpf_d *d = kn->kn_hook;
1333 int rv;
1334
1335 KERNEL_LOCK(1, NULL);
1336 kn->kn_data = d->bd_hlen;
1337 if (d->bd_immediate)
1338 kn->kn_data += d->bd_slen;
1339 rv = (kn->kn_data > 0);
1340 KERNEL_UNLOCK_ONE(NULL);
1341 return rv;
1342 }
1343
1344 static const struct filterops bpfread_filtops =
1345 { 1, NULL, filt_bpfrdetach, filt_bpfread };
1346
1347 static int
1348 bpf_kqfilter(struct file *fp, struct knote *kn)
1349 {
1350 struct bpf_d *d = fp->f_bpf;
1351 struct klist *klist;
1352 int s;
1353
1354 KERNEL_LOCK(1, NULL);
1355
1356 switch (kn->kn_filter) {
1357 case EVFILT_READ:
1358 klist = &d->bd_sel.sel_klist;
1359 kn->kn_fop = &bpfread_filtops;
1360 break;
1361
1362 default:
1363 KERNEL_UNLOCK_ONE(NULL);
1364 return (EINVAL);
1365 }
1366
1367 kn->kn_hook = d;
1368
1369 s = splnet();
1370 SLIST_INSERT_HEAD(klist, kn, kn_selnext);
1371 splx(s);
1372 KERNEL_UNLOCK_ONE(NULL);
1373
1374 return (0);
1375 }
1376
1377 /*
1378 * Copy data from an mbuf chain into a buffer. This code is derived
1379 * from m_copydata in sys/uipc_mbuf.c.
1380 */
1381 static void *
1382 bpf_mcpy(void *dst_arg, const void *src_arg, size_t len)
1383 {
1384 const struct mbuf *m;
1385 u_int count;
1386 u_char *dst;
1387
1388 m = src_arg;
1389 dst = dst_arg;
1390 while (len > 0) {
1391 if (m == NULL)
1392 panic("bpf_mcpy");
1393 count = min(m->m_len, len);
1394 memcpy(dst, mtod(m, const void *), count);
1395 m = m->m_next;
1396 dst += count;
1397 len -= count;
1398 }
1399 return dst_arg;
1400 }
1401
1402 /*
1403 * Dispatch a packet to all the listeners on interface bp.
1404 *
1405 * pkt pointer to the packet, either a data buffer or an mbuf chain
1406 * buflen buffer length, if pkt is a data buffer
1407 * cpfn a function that can copy pkt into the listener's buffer
1408 * pktlen length of the packet
1409 * rcv true if packet came in
1410 */
1411 static inline void
1412 bpf_deliver(struct bpf_if *bp, void *(*cpfn)(void *, const void *, size_t),
1413 void *pkt, u_int pktlen, u_int buflen, const bool rcv)
1414 {
1415 uint32_t mem[BPF_MEMWORDS];
1416 bpf_args_t args = {
1417 .pkt = (const uint8_t *)pkt,
1418 .wirelen = pktlen,
1419 .buflen = buflen,
1420 .mem = mem,
1421 .arg = NULL
1422 };
1423 bool gottime = false;
1424 struct timespec ts;
1425
1426 /*
1427 * Note that the IPL does not have to be raised at this point.
1428 * The only problem that could arise here is that if two different
1429 * interfaces shared any data. This is not the case.
1430 */
1431 for (struct bpf_d *d = bp->bif_dlist; d != NULL; d = d->bd_next) {
1432 u_int slen;
1433
1434 if (!d->bd_seesent && !rcv) {
1435 continue;
1436 }
1437 d->bd_rcount++;
1438 bpf_gstats.bs_recv++;
1439
1440 if (d->bd_jitcode)
1441 slen = d->bd_jitcode(NULL, &args);
1442 else
1443 slen = bpf_filter_ext(NULL, d->bd_filter, &args);
1444
1445 if (!slen) {
1446 continue;
1447 }
1448 if (!gottime) {
1449 gottime = true;
1450 nanotime(&ts);
1451 }
1452 catchpacket(d, pkt, pktlen, slen, cpfn, &ts);
1453 }
1454 }
1455
1456 /*
1457 * Incoming linkage from device drivers. Process the packet pkt, of length
1458 * pktlen, which is stored in a contiguous buffer. The packet is parsed
1459 * by each process' filter, and if accepted, stashed into the corresponding
1460 * buffer.
1461 */
1462 static void
1463 _bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen)
1464 {
1465
1466 bpf_deliver(bp, memcpy, pkt, pktlen, pktlen, true);
1467 }
1468
1469 /*
1470 * Incoming linkage from device drivers, when the head of the packet is in
1471 * a buffer, and the tail is in an mbuf chain.
1472 */
1473 static void
1474 _bpf_mtap2(struct bpf_if *bp, void *data, u_int dlen, struct mbuf *m)
1475 {
1476 u_int pktlen;
1477 struct mbuf mb;
1478
1479 /* Skip outgoing duplicate packets. */
1480 if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif_index == 0) {
1481 m->m_flags &= ~M_PROMISC;
1482 return;
1483 }
1484
1485 pktlen = m_length(m) + dlen;
1486
1487 /*
1488 * Craft on-stack mbuf suitable for passing to bpf_filter.
1489 * Note that we cut corners here; we only setup what's
1490 * absolutely needed--this mbuf should never go anywhere else.
1491 */
1492 (void)memset(&mb, 0, sizeof(mb));
1493 mb.m_next = m;
1494 mb.m_data = data;
1495 mb.m_len = dlen;
1496
1497 bpf_deliver(bp, bpf_mcpy, &mb, pktlen, 0, m->m_pkthdr.rcvif_index != 0);
1498 }
1499
1500 /*
1501 * Incoming linkage from device drivers, when packet is in an mbuf chain.
1502 */
1503 static void
1504 _bpf_mtap(struct bpf_if *bp, struct mbuf *m)
1505 {
1506 void *(*cpfn)(void *, const void *, size_t);
1507 u_int pktlen, buflen;
1508 void *marg;
1509
1510 /* Skip outgoing duplicate packets. */
1511 if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif_index == 0) {
1512 m->m_flags &= ~M_PROMISC;
1513 return;
1514 }
1515
1516 pktlen = m_length(m);
1517
1518 if (pktlen == m->m_len) {
1519 cpfn = (void *)memcpy;
1520 marg = mtod(m, void *);
1521 buflen = pktlen;
1522 } else {
1523 cpfn = bpf_mcpy;
1524 marg = m;
1525 buflen = 0;
1526 }
1527
1528 bpf_deliver(bp, cpfn, marg, pktlen, buflen, m->m_pkthdr.rcvif_index != 0);
1529 }
1530
1531 /*
1532 * We need to prepend the address family as
1533 * a four byte field. Cons up a dummy header
1534 * to pacify bpf. This is safe because bpf
1535 * will only read from the mbuf (i.e., it won't
1536 * try to free it or keep a pointer a to it).
1537 */
1538 static void
1539 _bpf_mtap_af(struct bpf_if *bp, uint32_t af, struct mbuf *m)
1540 {
1541 struct mbuf m0;
1542
1543 m0.m_flags = 0;
1544 m0.m_next = m;
1545 m0.m_len = 4;
1546 m0.m_data = (char *)⁡
1547
1548 _bpf_mtap(bp, &m0);
1549 }
1550
1551 /*
1552 * Put the SLIP pseudo-"link header" in place.
1553 * Note this M_PREPEND() should never fail,
1554 * swince we know we always have enough space
1555 * in the input buffer.
1556 */
1557 static void
1558 _bpf_mtap_sl_in(struct bpf_if *bp, u_char *chdr, struct mbuf **m)
1559 {
1560 int s;
1561 u_char *hp;
1562
1563 M_PREPEND(*m, SLIP_HDRLEN, M_DONTWAIT);
1564 if (*m == NULL)
1565 return;
1566
1567 hp = mtod(*m, u_char *);
1568 hp[SLX_DIR] = SLIPDIR_IN;
1569 (void)memcpy(&hp[SLX_CHDR], chdr, CHDR_LEN);
1570
1571 s = splnet();
1572 _bpf_mtap(bp, *m);
1573 splx(s);
1574
1575 m_adj(*m, SLIP_HDRLEN);
1576 }
1577
1578 /*
1579 * Put the SLIP pseudo-"link header" in
1580 * place. The compressed header is now
1581 * at the beginning of the mbuf.
1582 */
1583 static void
1584 _bpf_mtap_sl_out(struct bpf_if *bp, u_char *chdr, struct mbuf *m)
1585 {
1586 struct mbuf m0;
1587 u_char *hp;
1588 int s;
1589
1590 m0.m_flags = 0;
1591 m0.m_next = m;
1592 m0.m_data = m0.m_dat;
1593 m0.m_len = SLIP_HDRLEN;
1594
1595 hp = mtod(&m0, u_char *);
1596
1597 hp[SLX_DIR] = SLIPDIR_OUT;
1598 (void)memcpy(&hp[SLX_CHDR], chdr, CHDR_LEN);
1599
1600 s = splnet();
1601 _bpf_mtap(bp, &m0);
1602 splx(s);
1603 m_freem(m);
1604 }
1605
1606 static int
1607 bpf_hdrlen(struct bpf_d *d)
1608 {
1609 int hdrlen = d->bd_bif->bif_hdrlen;
1610 /*
1611 * Compute the length of the bpf header. This is not necessarily
1612 * equal to SIZEOF_BPF_HDR because we want to insert spacing such
1613 * that the network layer header begins on a longword boundary (for
1614 * performance reasons and to alleviate alignment restrictions).
1615 */
1616 #ifdef _LP64
1617 if (d->bd_compat32)
1618 return (BPF_WORDALIGN32(hdrlen + SIZEOF_BPF_HDR32) - hdrlen);
1619 else
1620 #endif
1621 return (BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen);
1622 }
1623
1624 /*
1625 * Move the packet data from interface memory (pkt) into the
1626 * store buffer. Call the wakeup functions if it's time to wakeup
1627 * a listener (buffer full), "cpfn" is the routine called to do the
1628 * actual data transfer. memcpy is passed in to copy contiguous chunks,
1629 * while bpf_mcpy is passed in to copy mbuf chains. In the latter case,
1630 * pkt is really an mbuf.
1631 */
1632 static void
1633 catchpacket(struct bpf_d *d, u_char *pkt, u_int pktlen, u_int snaplen,
1634 void *(*cpfn)(void *, const void *, size_t), struct timespec *ts)
1635 {
1636 char *h;
1637 int totlen, curlen, caplen;
1638 int hdrlen = bpf_hdrlen(d);
1639 int do_wakeup = 0;
1640
1641 ++d->bd_ccount;
1642 ++bpf_gstats.bs_capt;
1643 /*
1644 * Figure out how many bytes to move. If the packet is
1645 * greater or equal to the snapshot length, transfer that
1646 * much. Otherwise, transfer the whole packet (unless
1647 * we hit the buffer size limit).
1648 */
1649 totlen = hdrlen + min(snaplen, pktlen);
1650 if (totlen > d->bd_bufsize)
1651 totlen = d->bd_bufsize;
1652 /*
1653 * If we adjusted totlen to fit the bufsize, it could be that
1654 * totlen is smaller than hdrlen because of the link layer header.
1655 */
1656 caplen = totlen - hdrlen;
1657 if (caplen < 0)
1658 caplen = 0;
1659
1660 /*
1661 * Round up the end of the previous packet to the next longword.
1662 */
1663 #ifdef _LP64
1664 if (d->bd_compat32)
1665 curlen = BPF_WORDALIGN32(d->bd_slen);
1666 else
1667 #endif
1668 curlen = BPF_WORDALIGN(d->bd_slen);
1669 if (curlen + totlen > d->bd_bufsize) {
1670 /*
1671 * This packet will overflow the storage buffer.
1672 * Rotate the buffers if we can, then wakeup any
1673 * pending reads.
1674 */
1675 if (d->bd_fbuf == NULL) {
1676 /*
1677 * We haven't completed the previous read yet,
1678 * so drop the packet.
1679 */
1680 ++d->bd_dcount;
1681 ++bpf_gstats.bs_drop;
1682 return;
1683 }
1684 ROTATE_BUFFERS(d);
1685 do_wakeup = 1;
1686 curlen = 0;
1687 } else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT) {
1688 /*
1689 * Immediate mode is set, or the read timeout has
1690 * already expired during a select call. A packet
1691 * arrived, so the reader should be woken up.
1692 */
1693 do_wakeup = 1;
1694 }
1695
1696 /*
1697 * Append the bpf header.
1698 */
1699 h = (char *)d->bd_sbuf + curlen;
1700 #ifdef _LP64
1701 if (d->bd_compat32) {
1702 struct bpf_hdr32 *hp32;
1703
1704 hp32 = (struct bpf_hdr32 *)h;
1705 hp32->bh_tstamp.tv_sec = ts->tv_sec;
1706 hp32->bh_tstamp.tv_usec = ts->tv_nsec / 1000;
1707 hp32->bh_datalen = pktlen;
1708 hp32->bh_hdrlen = hdrlen;
1709 hp32->bh_caplen = caplen;
1710 } else
1711 #endif
1712 {
1713 struct bpf_hdr *hp;
1714
1715 hp = (struct bpf_hdr *)h;
1716 hp->bh_tstamp.tv_sec = ts->tv_sec;
1717 hp->bh_tstamp.tv_usec = ts->tv_nsec / 1000;
1718 hp->bh_datalen = pktlen;
1719 hp->bh_hdrlen = hdrlen;
1720 hp->bh_caplen = caplen;
1721 }
1722
1723 /*
1724 * Copy the packet data into the store buffer and update its length.
1725 */
1726 (*cpfn)(h + hdrlen, pkt, caplen);
1727 d->bd_slen = curlen + totlen;
1728
1729 /*
1730 * Call bpf_wakeup after bd_slen has been updated so that kevent(2)
1731 * will cause filt_bpfread() to be called with it adjusted.
1732 */
1733 if (do_wakeup)
1734 bpf_wakeup(d);
1735 }
1736
1737 /*
1738 * Initialize all nonzero fields of a descriptor.
1739 */
1740 static int
1741 bpf_allocbufs(struct bpf_d *d)
1742 {
1743
1744 d->bd_fbuf = malloc(d->bd_bufsize, M_DEVBUF, M_NOWAIT);
1745 if (!d->bd_fbuf)
1746 return (ENOBUFS);
1747 d->bd_sbuf = malloc(d->bd_bufsize, M_DEVBUF, M_NOWAIT);
1748 if (!d->bd_sbuf) {
1749 free(d->bd_fbuf, M_DEVBUF);
1750 return (ENOBUFS);
1751 }
1752 d->bd_slen = 0;
1753 d->bd_hlen = 0;
1754 return (0);
1755 }
1756
1757 /*
1758 * Free buffers currently in use by a descriptor.
1759 * Called on close.
1760 */
1761 static void
1762 bpf_freed(struct bpf_d *d)
1763 {
1764 /*
1765 * We don't need to lock out interrupts since this descriptor has
1766 * been detached from its interface and it yet hasn't been marked
1767 * free.
1768 */
1769 if (d->bd_sbuf != NULL) {
1770 free(d->bd_sbuf, M_DEVBUF);
1771 if (d->bd_hbuf != NULL)
1772 free(d->bd_hbuf, M_DEVBUF);
1773 if (d->bd_fbuf != NULL)
1774 free(d->bd_fbuf, M_DEVBUF);
1775 }
1776 if (d->bd_filter)
1777 free(d->bd_filter, M_DEVBUF);
1778
1779 if (d->bd_jitcode != NULL) {
1780 bpf_jit_freecode(d->bd_jitcode);
1781 }
1782 }
1783
1784 /*
1785 * Attach an interface to bpf. dlt is the link layer type;
1786 * hdrlen is the fixed size of the link header for the specified dlt
1787 * (variable length headers not yet supported).
1788 */
1789 static void
1790 _bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp)
1791 {
1792 struct bpf_if *bp;
1793 bp = malloc(sizeof(*bp), M_DEVBUF, M_DONTWAIT);
1794 if (bp == NULL)
1795 panic("bpfattach");
1796
1797 mutex_enter(&bpf_mtx);
1798 bp->bif_dlist = NULL;
1799 bp->bif_driverp = driverp;
1800 bp->bif_ifp = ifp;
1801 bp->bif_dlt = dlt;
1802
1803 bp->bif_next = bpf_iflist;
1804 bpf_iflist = bp;
1805
1806 *bp->bif_driverp = NULL;
1807
1808 bp->bif_hdrlen = hdrlen;
1809 mutex_exit(&bpf_mtx);
1810 #if 0
1811 printf("bpf: %s attached\n", ifp->if_xname);
1812 #endif
1813 }
1814
1815 /*
1816 * Remove an interface from bpf.
1817 */
1818 static void
1819 _bpfdetach(struct ifnet *ifp)
1820 {
1821 struct bpf_if *bp, **pbp;
1822 struct bpf_d *d;
1823 int s;
1824
1825 mutex_enter(&bpf_mtx);
1826 /* Nuke the vnodes for any open instances */
1827 LIST_FOREACH(d, &bpf_list, bd_list) {
1828 if (d->bd_bif != NULL && d->bd_bif->bif_ifp == ifp) {
1829 /*
1830 * Detach the descriptor from an interface now.
1831 * It will be free'ed later by close routine.
1832 */
1833 s = splnet();
1834 d->bd_promisc = 0; /* we can't touch device. */
1835 bpf_detachd(d);
1836 splx(s);
1837 }
1838 }
1839
1840 again:
1841 for (bp = bpf_iflist, pbp = &bpf_iflist;
1842 bp != NULL; pbp = &bp->bif_next, bp = bp->bif_next) {
1843 if (bp->bif_ifp == ifp) {
1844 *pbp = bp->bif_next;
1845 free(bp, M_DEVBUF);
1846 goto again;
1847 }
1848 }
1849 mutex_exit(&bpf_mtx);
1850 }
1851
1852 /*
1853 * Change the data link type of a interface.
1854 */
1855 static void
1856 _bpf_change_type(struct ifnet *ifp, u_int dlt, u_int hdrlen)
1857 {
1858 struct bpf_if *bp;
1859
1860 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) {
1861 if (bp->bif_driverp == &ifp->if_bpf)
1862 break;
1863 }
1864 if (bp == NULL)
1865 panic("bpf_change_type");
1866
1867 bp->bif_dlt = dlt;
1868
1869 bp->bif_hdrlen = hdrlen;
1870 }
1871
1872 /*
1873 * Get a list of available data link type of the interface.
1874 */
1875 static int
1876 bpf_getdltlist(struct bpf_d *d, struct bpf_dltlist *bfl)
1877 {
1878 int n, error;
1879 struct ifnet *ifp;
1880 struct bpf_if *bp;
1881
1882 ifp = d->bd_bif->bif_ifp;
1883 n = 0;
1884 error = 0;
1885 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) {
1886 if (bp->bif_ifp != ifp)
1887 continue;
1888 if (bfl->bfl_list != NULL) {
1889 if (n >= bfl->bfl_len)
1890 return ENOMEM;
1891 error = copyout(&bp->bif_dlt,
1892 bfl->bfl_list + n, sizeof(u_int));
1893 }
1894 n++;
1895 }
1896 bfl->bfl_len = n;
1897 return error;
1898 }
1899
1900 /*
1901 * Set the data link type of a BPF instance.
1902 */
1903 static int
1904 bpf_setdlt(struct bpf_d *d, u_int dlt)
1905 {
1906 int s, error, opromisc;
1907 struct ifnet *ifp;
1908 struct bpf_if *bp;
1909
1910 KASSERT(mutex_owned(&bpf_mtx));
1911
1912 if (d->bd_bif->bif_dlt == dlt)
1913 return 0;
1914 ifp = d->bd_bif->bif_ifp;
1915 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) {
1916 if (bp->bif_ifp == ifp && bp->bif_dlt == dlt)
1917 break;
1918 }
1919 if (bp == NULL)
1920 return EINVAL;
1921 s = splnet();
1922 opromisc = d->bd_promisc;
1923 bpf_detachd(d);
1924 bpf_attachd(d, bp);
1925 reset_d(d);
1926 if (opromisc) {
1927 error = ifpromisc(bp->bif_ifp, 1);
1928 if (error)
1929 printf("%s: bpf_setdlt: ifpromisc failed (%d)\n",
1930 bp->bif_ifp->if_xname, error);
1931 else
1932 d->bd_promisc = 1;
1933 }
1934 splx(s);
1935 return 0;
1936 }
1937
1938 static int
1939 sysctl_net_bpf_maxbufsize(SYSCTLFN_ARGS)
1940 {
1941 int newsize, error;
1942 struct sysctlnode node;
1943
1944 node = *rnode;
1945 node.sysctl_data = &newsize;
1946 newsize = bpf_maxbufsize;
1947 error = sysctl_lookup(SYSCTLFN_CALL(&node));
1948 if (error || newp == NULL)
1949 return (error);
1950
1951 if (newsize < BPF_MINBUFSIZE || newsize > BPF_MAXBUFSIZE)
1952 return (EINVAL);
1953
1954 bpf_maxbufsize = newsize;
1955
1956 return (0);
1957 }
1958
1959 #if defined(MODULAR) || defined(BPFJIT)
1960 static int
1961 sysctl_net_bpf_jit(SYSCTLFN_ARGS)
1962 {
1963 bool newval;
1964 int error;
1965 struct sysctlnode node;
1966
1967 node = *rnode;
1968 node.sysctl_data = &newval;
1969 newval = bpf_jit;
1970 error = sysctl_lookup(SYSCTLFN_CALL(&node));
1971 if (error != 0 || newp == NULL)
1972 return error;
1973
1974 bpf_jit = newval;
1975
1976 /*
1977 * Do a full sync to publish new bpf_jit value and
1978 * update bpfjit_module_ops.bj_generate_code variable.
1979 */
1980 membar_sync();
1981
1982 if (newval && bpfjit_module_ops.bj_generate_code == NULL) {
1983 printf("JIT compilation is postponed "
1984 "until after bpfjit module is loaded\n");
1985 }
1986
1987 return 0;
1988 }
1989 #endif
1990
1991 static int
1992 sysctl_net_bpf_peers(SYSCTLFN_ARGS)
1993 {
1994 int error, elem_count;
1995 struct bpf_d *dp;
1996 struct bpf_d_ext dpe;
1997 size_t len, needed, elem_size, out_size;
1998 char *sp;
1999
2000 if (namelen == 1 && name[0] == CTL_QUERY)
2001 return (sysctl_query(SYSCTLFN_CALL(rnode)));
2002
2003 if (namelen != 2)
2004 return (EINVAL);
2005
2006 /* BPF peers is privileged information. */
2007 error = kauth_authorize_network(l->l_cred, KAUTH_NETWORK_INTERFACE,
2008 KAUTH_REQ_NETWORK_INTERFACE_GETPRIV, NULL, NULL, NULL);
2009 if (error)
2010 return (EPERM);
2011
2012 len = (oldp != NULL) ? *oldlenp : 0;
2013 sp = oldp;
2014 elem_size = name[0];
2015 elem_count = name[1];
2016 out_size = MIN(sizeof(dpe), elem_size);
2017 needed = 0;
2018
2019 if (elem_size < 1 || elem_count < 0)
2020 return (EINVAL);
2021
2022 mutex_enter(&bpf_mtx);
2023 LIST_FOREACH(dp, &bpf_list, bd_list) {
2024 if (len >= elem_size && elem_count > 0) {
2025 #define BPF_EXT(field) dpe.bde_ ## field = dp->bd_ ## field
2026 BPF_EXT(bufsize);
2027 BPF_EXT(promisc);
2028 BPF_EXT(state);
2029 BPF_EXT(immediate);
2030 BPF_EXT(hdrcmplt);
2031 BPF_EXT(seesent);
2032 BPF_EXT(pid);
2033 BPF_EXT(rcount);
2034 BPF_EXT(dcount);
2035 BPF_EXT(ccount);
2036 #undef BPF_EXT
2037 if (dp->bd_bif)
2038 (void)strlcpy(dpe.bde_ifname,
2039 dp->bd_bif->bif_ifp->if_xname,
2040 IFNAMSIZ - 1);
2041 else
2042 dpe.bde_ifname[0] = '\0';
2043
2044 error = copyout(&dpe, sp, out_size);
2045 if (error)
2046 break;
2047 sp += elem_size;
2048 len -= elem_size;
2049 }
2050 needed += elem_size;
2051 if (elem_count > 0 && elem_count != INT_MAX)
2052 elem_count--;
2053 }
2054 mutex_exit(&bpf_mtx);
2055
2056 *oldlenp = needed;
2057
2058 return (error);
2059 }
2060
2061 static struct sysctllog *bpf_sysctllog;
2062 static void
2063 sysctl_net_bpf_setup(void)
2064 {
2065 const struct sysctlnode *node;
2066
2067 node = NULL;
2068 sysctl_createv(&bpf_sysctllog, 0, NULL, &node,
2069 CTLFLAG_PERMANENT,
2070 CTLTYPE_NODE, "bpf",
2071 SYSCTL_DESCR("BPF options"),
2072 NULL, 0, NULL, 0,
2073 CTL_NET, CTL_CREATE, CTL_EOL);
2074 if (node != NULL) {
2075 #if defined(MODULAR) || defined(BPFJIT)
2076 sysctl_createv(&bpf_sysctllog, 0, NULL, NULL,
2077 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
2078 CTLTYPE_BOOL, "jit",
2079 SYSCTL_DESCR("Toggle Just-In-Time compilation"),
2080 sysctl_net_bpf_jit, 0, &bpf_jit, 0,
2081 CTL_NET, node->sysctl_num, CTL_CREATE, CTL_EOL);
2082 #endif
2083 sysctl_createv(&bpf_sysctllog, 0, NULL, NULL,
2084 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
2085 CTLTYPE_INT, "maxbufsize",
2086 SYSCTL_DESCR("Maximum size for data capture buffer"),
2087 sysctl_net_bpf_maxbufsize, 0, &bpf_maxbufsize, 0,
2088 CTL_NET, node->sysctl_num, CTL_CREATE, CTL_EOL);
2089 sysctl_createv(&bpf_sysctllog, 0, NULL, NULL,
2090 CTLFLAG_PERMANENT,
2091 CTLTYPE_STRUCT, "stats",
2092 SYSCTL_DESCR("BPF stats"),
2093 NULL, 0, &bpf_gstats, sizeof(bpf_gstats),
2094 CTL_NET, node->sysctl_num, CTL_CREATE, CTL_EOL);
2095 sysctl_createv(&bpf_sysctllog, 0, NULL, NULL,
2096 CTLFLAG_PERMANENT,
2097 CTLTYPE_STRUCT, "peers",
2098 SYSCTL_DESCR("BPF peers"),
2099 sysctl_net_bpf_peers, 0, NULL, 0,
2100 CTL_NET, node->sysctl_num, CTL_CREATE, CTL_EOL);
2101 }
2102
2103 }
2104
2105 struct bpf_ops bpf_ops_kernel = {
2106 .bpf_attach = _bpfattach,
2107 .bpf_detach = _bpfdetach,
2108 .bpf_change_type = _bpf_change_type,
2109
2110 .bpf_tap = _bpf_tap,
2111 .bpf_mtap = _bpf_mtap,
2112 .bpf_mtap2 = _bpf_mtap2,
2113 .bpf_mtap_af = _bpf_mtap_af,
2114 .bpf_mtap_sl_in = _bpf_mtap_sl_in,
2115 .bpf_mtap_sl_out = _bpf_mtap_sl_out,
2116 };
2117
2118 MODULE(MODULE_CLASS_DRIVER, bpf, "bpf_filter");
2119
2120 static int
2121 bpf_modcmd(modcmd_t cmd, void *arg)
2122 {
2123 #ifdef _MODULE
2124 devmajor_t bmajor, cmajor;
2125 #endif
2126 int error = 0;
2127
2128 switch (cmd) {
2129 case MODULE_CMD_INIT:
2130 bpfilterattach(0);
2131 #ifdef _MODULE
2132 bmajor = cmajor = NODEVMAJOR;
2133 error = devsw_attach("bpf", NULL, &bmajor,
2134 &bpf_cdevsw, &cmajor);
2135 #endif
2136 if (error)
2137 break;
2138
2139 bpf_ops_handover_enter(&bpf_ops_kernel);
2140 atomic_swap_ptr(&bpf_ops, &bpf_ops_kernel);
2141 bpf_ops_handover_exit();
2142 sysctl_net_bpf_setup();
2143 break;
2144
2145 case MODULE_CMD_FINI:
2146 /*
2147 * While there is no reference counting for bpf callers,
2148 * unload could at least in theory be done similarly to
2149 * system call disestablishment. This should even be
2150 * a little simpler:
2151 *
2152 * 1) replace op vector with stubs
2153 * 2) post update to all cpus with xc
2154 * 3) check that nobody is in bpf anymore
2155 * (it's doubtful we'd want something like l_sysent,
2156 * but we could do something like *signed* percpu
2157 * counters. if the sum is 0, we're good).
2158 * 4) if fail, unroll changes
2159 *
2160 * NOTE: change won't be atomic to the outside. some
2161 * packets may be not captured even if unload is
2162 * not succesful. I think packet capture not working
2163 * is a perfectly logical consequence of trying to
2164 * disable packet capture.
2165 */
2166 error = EOPNOTSUPP;
2167 /* insert sysctl teardown */
2168 break;
2169
2170 default:
2171 error = ENOTTY;
2172 break;
2173 }
2174
2175 return error;
2176 }
2177