bpf.c revision 1.190 1 /* $NetBSD: bpf.c,v 1.190 2014/12/29 13:38:13 ozaki-r Exp $ */
2
3 /*
4 * Copyright (c) 1990, 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from the Stanford/CMU enet packet filter,
8 * (net/enet.c) distributed as part of 4.3BSD, and code contributed
9 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
10 * Berkeley Laboratory.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)bpf.c 8.4 (Berkeley) 1/9/95
37 * static char rcsid[] =
38 * "Header: bpf.c,v 1.67 96/09/26 22:00:52 leres Exp ";
39 */
40
41 #include <sys/cdefs.h>
42 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.190 2014/12/29 13:38:13 ozaki-r Exp $");
43
44 #if defined(_KERNEL_OPT)
45 #include "opt_bpf.h"
46 #include "sl.h"
47 #include "strip.h"
48 #endif
49
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/mbuf.h>
53 #include <sys/buf.h>
54 #include <sys/time.h>
55 #include <sys/proc.h>
56 #include <sys/ioctl.h>
57 #include <sys/conf.h>
58 #include <sys/vnode.h>
59 #include <sys/queue.h>
60 #include <sys/stat.h>
61 #include <sys/module.h>
62 #include <sys/once.h>
63 #include <sys/atomic.h>
64
65 #include <sys/file.h>
66 #include <sys/filedesc.h>
67 #include <sys/tty.h>
68 #include <sys/uio.h>
69
70 #include <sys/protosw.h>
71 #include <sys/socket.h>
72 #include <sys/errno.h>
73 #include <sys/kernel.h>
74 #include <sys/poll.h>
75 #include <sys/sysctl.h>
76 #include <sys/kauth.h>
77
78 #include <net/if.h>
79 #include <net/slip.h>
80
81 #include <net/bpf.h>
82 #include <net/bpfdesc.h>
83 #include <net/bpfjit.h>
84
85 #include <net/if_arc.h>
86 #include <net/if_ether.h>
87
88 #include <netinet/in.h>
89 #include <netinet/if_inarp.h>
90
91
92 #include <compat/sys/sockio.h>
93
94 #ifndef BPF_BUFSIZE
95 /*
96 * 4096 is too small for FDDI frames. 8192 is too small for gigabit Ethernet
97 * jumbos (circa 9k), ATM, or Intel gig/10gig ethernet jumbos (16k).
98 */
99 # define BPF_BUFSIZE 32768
100 #endif
101
102 #define PRINET 26 /* interruptible */
103
104 /*
105 * The default read buffer size, and limit for BIOCSBLEN, is sysctl'able.
106 * XXX the default values should be computed dynamically based
107 * on available memory size and available mbuf clusters.
108 */
109 int bpf_bufsize = BPF_BUFSIZE;
110 int bpf_maxbufsize = BPF_DFLTBUFSIZE; /* XXX set dynamically, see above */
111 bool bpf_jit = false;
112
113 struct bpfjit_ops bpfjit_module_ops = {
114 .bj_generate_code = NULL,
115 .bj_free_code = NULL
116 };
117
118 /*
119 * Global BPF statistics returned by net.bpf.stats sysctl.
120 */
121 struct bpf_stat bpf_gstats;
122
123 /*
124 * Use a mutex to avoid a race condition between gathering the stats/peers
125 * and opening/closing the device.
126 */
127 static kmutex_t bpf_mtx;
128
129 /*
130 * bpf_iflist is the list of interfaces; each corresponds to an ifnet
131 * bpf_dtab holds the descriptors, indexed by minor device #
132 */
133 struct bpf_if *bpf_iflist;
134 LIST_HEAD(, bpf_d) bpf_list;
135
136 static int bpf_allocbufs(struct bpf_d *);
137 static void bpf_deliver(struct bpf_if *,
138 void *(*cpfn)(void *, const void *, size_t),
139 void *, u_int, u_int, const bool);
140 static void bpf_freed(struct bpf_d *);
141 static void bpf_ifname(struct ifnet *, struct ifreq *);
142 static void *bpf_mcpy(void *, const void *, size_t);
143 static int bpf_movein(struct uio *, int, uint64_t,
144 struct mbuf **, struct sockaddr *);
145 static void bpf_attachd(struct bpf_d *, struct bpf_if *);
146 static void bpf_detachd(struct bpf_d *);
147 static int bpf_setif(struct bpf_d *, struct ifreq *);
148 static void bpf_timed_out(void *);
149 static inline void
150 bpf_wakeup(struct bpf_d *);
151 static int bpf_hdrlen(struct bpf_d *);
152 static void catchpacket(struct bpf_d *, u_char *, u_int, u_int,
153 void *(*)(void *, const void *, size_t), struct timespec *);
154 static void reset_d(struct bpf_d *);
155 static int bpf_getdltlist(struct bpf_d *, struct bpf_dltlist *);
156 static int bpf_setdlt(struct bpf_d *, u_int);
157
158 static int bpf_read(struct file *, off_t *, struct uio *, kauth_cred_t,
159 int);
160 static int bpf_write(struct file *, off_t *, struct uio *, kauth_cred_t,
161 int);
162 static int bpf_ioctl(struct file *, u_long, void *);
163 static int bpf_poll(struct file *, int);
164 static int bpf_stat(struct file *, struct stat *);
165 static int bpf_close(struct file *);
166 static int bpf_kqfilter(struct file *, struct knote *);
167 static void bpf_softintr(void *);
168
169 static const struct fileops bpf_fileops = {
170 .fo_read = bpf_read,
171 .fo_write = bpf_write,
172 .fo_ioctl = bpf_ioctl,
173 .fo_fcntl = fnullop_fcntl,
174 .fo_poll = bpf_poll,
175 .fo_stat = bpf_stat,
176 .fo_close = bpf_close,
177 .fo_kqfilter = bpf_kqfilter,
178 .fo_restart = fnullop_restart,
179 };
180
181 dev_type_open(bpfopen);
182
183 const struct cdevsw bpf_cdevsw = {
184 .d_open = bpfopen,
185 .d_close = noclose,
186 .d_read = noread,
187 .d_write = nowrite,
188 .d_ioctl = noioctl,
189 .d_stop = nostop,
190 .d_tty = notty,
191 .d_poll = nopoll,
192 .d_mmap = nommap,
193 .d_kqfilter = nokqfilter,
194 .d_discard = nodiscard,
195 .d_flag = D_OTHER
196 };
197
198 bpfjit_func_t
199 bpf_jit_generate(bpf_ctx_t *bc, void *code, size_t size)
200 {
201
202 membar_consumer();
203 if (bpfjit_module_ops.bj_generate_code != NULL) {
204 return bpfjit_module_ops.bj_generate_code(bc, code, size);
205 }
206 return NULL;
207 }
208
209 void
210 bpf_jit_freecode(bpfjit_func_t jcode)
211 {
212 KASSERT(bpfjit_module_ops.bj_free_code != NULL);
213 bpfjit_module_ops.bj_free_code(jcode);
214 }
215
216 static int
217 bpf_movein(struct uio *uio, int linktype, uint64_t mtu, struct mbuf **mp,
218 struct sockaddr *sockp)
219 {
220 struct mbuf *m;
221 int error;
222 size_t len;
223 size_t hlen;
224 size_t align;
225
226 /*
227 * Build a sockaddr based on the data link layer type.
228 * We do this at this level because the ethernet header
229 * is copied directly into the data field of the sockaddr.
230 * In the case of SLIP, there is no header and the packet
231 * is forwarded as is.
232 * Also, we are careful to leave room at the front of the mbuf
233 * for the link level header.
234 */
235 switch (linktype) {
236
237 case DLT_SLIP:
238 sockp->sa_family = AF_INET;
239 hlen = 0;
240 align = 0;
241 break;
242
243 case DLT_PPP:
244 sockp->sa_family = AF_UNSPEC;
245 hlen = 0;
246 align = 0;
247 break;
248
249 case DLT_EN10MB:
250 sockp->sa_family = AF_UNSPEC;
251 /* XXX Would MAXLINKHDR be better? */
252 /* 6(dst)+6(src)+2(type) */
253 hlen = sizeof(struct ether_header);
254 align = 2;
255 break;
256
257 case DLT_ARCNET:
258 sockp->sa_family = AF_UNSPEC;
259 hlen = ARC_HDRLEN;
260 align = 5;
261 break;
262
263 case DLT_FDDI:
264 sockp->sa_family = AF_LINK;
265 /* XXX 4(FORMAC)+6(dst)+6(src) */
266 hlen = 16;
267 align = 0;
268 break;
269
270 case DLT_ECONET:
271 sockp->sa_family = AF_UNSPEC;
272 hlen = 6;
273 align = 2;
274 break;
275
276 case DLT_NULL:
277 sockp->sa_family = AF_UNSPEC;
278 hlen = 0;
279 align = 0;
280 break;
281
282 default:
283 return (EIO);
284 }
285
286 len = uio->uio_resid;
287 /*
288 * If there aren't enough bytes for a link level header or the
289 * packet length exceeds the interface mtu, return an error.
290 */
291 if (len - hlen > mtu)
292 return (EMSGSIZE);
293
294 /*
295 * XXX Avoid complicated buffer chaining ---
296 * bail if it won't fit in a single mbuf.
297 * (Take into account possible alignment bytes)
298 */
299 if (len + align > MCLBYTES)
300 return (EIO);
301
302 m = m_gethdr(M_WAIT, MT_DATA);
303 m->m_pkthdr.rcvif = NULL;
304 m->m_pkthdr.len = (int)(len - hlen);
305 if (len + align > MHLEN) {
306 m_clget(m, M_WAIT);
307 if ((m->m_flags & M_EXT) == 0) {
308 error = ENOBUFS;
309 goto bad;
310 }
311 }
312
313 /* Insure the data is properly aligned */
314 if (align > 0) {
315 m->m_data += align;
316 m->m_len -= (int)align;
317 }
318
319 error = uiomove(mtod(m, void *), len, uio);
320 if (error)
321 goto bad;
322 if (hlen != 0) {
323 memcpy(sockp->sa_data, mtod(m, void *), hlen);
324 m->m_data += hlen; /* XXX */
325 len -= hlen;
326 }
327 m->m_len = (int)len;
328 *mp = m;
329 return (0);
330
331 bad:
332 m_freem(m);
333 return (error);
334 }
335
336 /*
337 * Attach file to the bpf interface, i.e. make d listen on bp.
338 * Must be called at splnet.
339 */
340 static void
341 bpf_attachd(struct bpf_d *d, struct bpf_if *bp)
342 {
343 /*
344 * Point d at bp, and add d to the interface's list of listeners.
345 * Finally, point the driver's bpf cookie at the interface so
346 * it will divert packets to bpf.
347 */
348 d->bd_bif = bp;
349 d->bd_next = bp->bif_dlist;
350 bp->bif_dlist = d;
351
352 *bp->bif_driverp = bp;
353 }
354
355 /*
356 * Detach a file from its interface.
357 */
358 static void
359 bpf_detachd(struct bpf_d *d)
360 {
361 struct bpf_d **p;
362 struct bpf_if *bp;
363
364 bp = d->bd_bif;
365 /*
366 * Check if this descriptor had requested promiscuous mode.
367 * If so, turn it off.
368 */
369 if (d->bd_promisc) {
370 int error __diagused;
371
372 d->bd_promisc = 0;
373 /*
374 * Take device out of promiscuous mode. Since we were
375 * able to enter promiscuous mode, we should be able
376 * to turn it off. But we can get an error if
377 * the interface was configured down, so only panic
378 * if we don't get an unexpected error.
379 */
380 error = ifpromisc(bp->bif_ifp, 0);
381 #ifdef DIAGNOSTIC
382 if (error)
383 printf("%s: ifpromisc failed: %d", __func__, error);
384 #endif
385 }
386 /* Remove d from the interface's descriptor list. */
387 p = &bp->bif_dlist;
388 while (*p != d) {
389 p = &(*p)->bd_next;
390 if (*p == NULL)
391 panic("%s: descriptor not in list", __func__);
392 }
393 *p = (*p)->bd_next;
394 if (bp->bif_dlist == NULL)
395 /*
396 * Let the driver know that there are no more listeners.
397 */
398 *d->bd_bif->bif_driverp = NULL;
399 d->bd_bif = NULL;
400 }
401
402 static int
403 doinit(void)
404 {
405
406 mutex_init(&bpf_mtx, MUTEX_DEFAULT, IPL_NONE);
407
408 LIST_INIT(&bpf_list);
409
410 bpf_gstats.bs_recv = 0;
411 bpf_gstats.bs_drop = 0;
412 bpf_gstats.bs_capt = 0;
413
414 return 0;
415 }
416
417 /*
418 * bpfilterattach() is called at boot time.
419 */
420 /* ARGSUSED */
421 void
422 bpfilterattach(int n)
423 {
424 static ONCE_DECL(control);
425
426 RUN_ONCE(&control, doinit);
427 }
428
429 /*
430 * Open ethernet device. Clones.
431 */
432 /* ARGSUSED */
433 int
434 bpfopen(dev_t dev, int flag, int mode, struct lwp *l)
435 {
436 struct bpf_d *d;
437 struct file *fp;
438 int error, fd;
439
440 /* falloc() will use the descriptor for us. */
441 if ((error = fd_allocfile(&fp, &fd)) != 0)
442 return error;
443
444 d = malloc(sizeof(*d), M_DEVBUF, M_WAITOK|M_ZERO);
445 d->bd_bufsize = bpf_bufsize;
446 d->bd_seesent = 1;
447 d->bd_feedback = 0;
448 d->bd_pid = l->l_proc->p_pid;
449 #ifdef _LP64
450 if (curproc->p_flag & PK_32)
451 d->bd_compat32 = 1;
452 #endif
453 getnanotime(&d->bd_btime);
454 d->bd_atime = d->bd_mtime = d->bd_btime;
455 callout_init(&d->bd_callout, 0);
456 selinit(&d->bd_sel);
457 d->bd_sih = softint_establish(SOFTINT_CLOCK, bpf_softintr, d);
458 d->bd_jitcode = NULL;
459
460 mutex_enter(&bpf_mtx);
461 LIST_INSERT_HEAD(&bpf_list, d, bd_list);
462 mutex_exit(&bpf_mtx);
463
464 return fd_clone(fp, fd, flag, &bpf_fileops, d);
465 }
466
467 /*
468 * Close the descriptor by detaching it from its interface,
469 * deallocating its buffers, and marking it free.
470 */
471 /* ARGSUSED */
472 static int
473 bpf_close(struct file *fp)
474 {
475 struct bpf_d *d = fp->f_bpf;
476 int s;
477
478 KERNEL_LOCK(1, NULL);
479
480 /*
481 * Refresh the PID associated with this bpf file.
482 */
483 d->bd_pid = curproc->p_pid;
484
485 s = splnet();
486 if (d->bd_state == BPF_WAITING)
487 callout_stop(&d->bd_callout);
488 d->bd_state = BPF_IDLE;
489 if (d->bd_bif)
490 bpf_detachd(d);
491 splx(s);
492 bpf_freed(d);
493 mutex_enter(&bpf_mtx);
494 LIST_REMOVE(d, bd_list);
495 mutex_exit(&bpf_mtx);
496 callout_destroy(&d->bd_callout);
497 seldestroy(&d->bd_sel);
498 softint_disestablish(d->bd_sih);
499 free(d, M_DEVBUF);
500 fp->f_bpf = NULL;
501
502 KERNEL_UNLOCK_ONE(NULL);
503
504 return (0);
505 }
506
507 /*
508 * Rotate the packet buffers in descriptor d. Move the store buffer
509 * into the hold slot, and the free buffer into the store slot.
510 * Zero the length of the new store buffer.
511 */
512 #define ROTATE_BUFFERS(d) \
513 (d)->bd_hbuf = (d)->bd_sbuf; \
514 (d)->bd_hlen = (d)->bd_slen; \
515 (d)->bd_sbuf = (d)->bd_fbuf; \
516 (d)->bd_slen = 0; \
517 (d)->bd_fbuf = NULL;
518 /*
519 * bpfread - read next chunk of packets from buffers
520 */
521 static int
522 bpf_read(struct file *fp, off_t *offp, struct uio *uio,
523 kauth_cred_t cred, int flags)
524 {
525 struct bpf_d *d = fp->f_bpf;
526 int timed_out;
527 int error;
528 int s;
529
530 getnanotime(&d->bd_atime);
531 /*
532 * Restrict application to use a buffer the same size as
533 * the kernel buffers.
534 */
535 if (uio->uio_resid != d->bd_bufsize)
536 return (EINVAL);
537
538 KERNEL_LOCK(1, NULL);
539 s = splnet();
540 if (d->bd_state == BPF_WAITING)
541 callout_stop(&d->bd_callout);
542 timed_out = (d->bd_state == BPF_TIMED_OUT);
543 d->bd_state = BPF_IDLE;
544 /*
545 * If the hold buffer is empty, then do a timed sleep, which
546 * ends when the timeout expires or when enough packets
547 * have arrived to fill the store buffer.
548 */
549 while (d->bd_hbuf == NULL) {
550 if (fp->f_flag & FNONBLOCK) {
551 if (d->bd_slen == 0) {
552 splx(s);
553 KERNEL_UNLOCK_ONE(NULL);
554 return (EWOULDBLOCK);
555 }
556 ROTATE_BUFFERS(d);
557 break;
558 }
559
560 if ((d->bd_immediate || timed_out) && d->bd_slen != 0) {
561 /*
562 * A packet(s) either arrived since the previous
563 * read or arrived while we were asleep.
564 * Rotate the buffers and return what's here.
565 */
566 ROTATE_BUFFERS(d);
567 break;
568 }
569 error = tsleep(d, PRINET|PCATCH, "bpf",
570 d->bd_rtout);
571 if (error == EINTR || error == ERESTART) {
572 splx(s);
573 KERNEL_UNLOCK_ONE(NULL);
574 return (error);
575 }
576 if (error == EWOULDBLOCK) {
577 /*
578 * On a timeout, return what's in the buffer,
579 * which may be nothing. If there is something
580 * in the store buffer, we can rotate the buffers.
581 */
582 if (d->bd_hbuf)
583 /*
584 * We filled up the buffer in between
585 * getting the timeout and arriving
586 * here, so we don't need to rotate.
587 */
588 break;
589
590 if (d->bd_slen == 0) {
591 splx(s);
592 KERNEL_UNLOCK_ONE(NULL);
593 return (0);
594 }
595 ROTATE_BUFFERS(d);
596 break;
597 }
598 if (error != 0)
599 goto done;
600 }
601 /*
602 * At this point, we know we have something in the hold slot.
603 */
604 splx(s);
605
606 /*
607 * Move data from hold buffer into user space.
608 * We know the entire buffer is transferred since
609 * we checked above that the read buffer is bpf_bufsize bytes.
610 */
611 error = uiomove(d->bd_hbuf, d->bd_hlen, uio);
612
613 s = splnet();
614 d->bd_fbuf = d->bd_hbuf;
615 d->bd_hbuf = NULL;
616 d->bd_hlen = 0;
617 done:
618 splx(s);
619 KERNEL_UNLOCK_ONE(NULL);
620 return (error);
621 }
622
623
624 /*
625 * If there are processes sleeping on this descriptor, wake them up.
626 */
627 static inline void
628 bpf_wakeup(struct bpf_d *d)
629 {
630 wakeup(d);
631 if (d->bd_async)
632 softint_schedule(d->bd_sih);
633 selnotify(&d->bd_sel, 0, 0);
634 }
635
636 static void
637 bpf_softintr(void *cookie)
638 {
639 struct bpf_d *d;
640
641 d = cookie;
642 if (d->bd_async)
643 fownsignal(d->bd_pgid, SIGIO, 0, 0, NULL);
644 }
645
646 static void
647 bpf_timed_out(void *arg)
648 {
649 struct bpf_d *d = arg;
650 int s;
651
652 s = splnet();
653 if (d->bd_state == BPF_WAITING) {
654 d->bd_state = BPF_TIMED_OUT;
655 if (d->bd_slen != 0)
656 bpf_wakeup(d);
657 }
658 splx(s);
659 }
660
661
662 static int
663 bpf_write(struct file *fp, off_t *offp, struct uio *uio,
664 kauth_cred_t cred, int flags)
665 {
666 struct bpf_d *d = fp->f_bpf;
667 struct ifnet *ifp;
668 struct mbuf *m, *mc;
669 int error, s;
670 static struct sockaddr_storage dst;
671
672 m = NULL; /* XXX gcc */
673
674 KERNEL_LOCK(1, NULL);
675
676 if (d->bd_bif == NULL) {
677 KERNEL_UNLOCK_ONE(NULL);
678 return (ENXIO);
679 }
680 getnanotime(&d->bd_mtime);
681
682 ifp = d->bd_bif->bif_ifp;
683
684 if (uio->uio_resid == 0) {
685 KERNEL_UNLOCK_ONE(NULL);
686 return (0);
687 }
688
689 error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, ifp->if_mtu, &m,
690 (struct sockaddr *) &dst);
691 if (error) {
692 KERNEL_UNLOCK_ONE(NULL);
693 return (error);
694 }
695
696 if (m->m_pkthdr.len > ifp->if_mtu) {
697 KERNEL_UNLOCK_ONE(NULL);
698 m_freem(m);
699 return (EMSGSIZE);
700 }
701
702 if (d->bd_hdrcmplt)
703 dst.ss_family = pseudo_AF_HDRCMPLT;
704
705 if (d->bd_feedback) {
706 mc = m_dup(m, 0, M_COPYALL, M_NOWAIT);
707 if (mc != NULL)
708 mc->m_pkthdr.rcvif = ifp;
709 /* Set M_PROMISC for outgoing packets to be discarded. */
710 if (1 /*d->bd_direction == BPF_D_INOUT*/)
711 m->m_flags |= M_PROMISC;
712 } else
713 mc = NULL;
714
715 s = splsoftnet();
716 error = (*ifp->if_output)(ifp, m, (struct sockaddr *) &dst, NULL);
717
718 if (mc != NULL) {
719 if (error == 0)
720 (*ifp->if_input)(ifp, mc);
721 m_freem(mc);
722 }
723 splx(s);
724 KERNEL_UNLOCK_ONE(NULL);
725 /*
726 * The driver frees the mbuf.
727 */
728 return (error);
729 }
730
731 /*
732 * Reset a descriptor by flushing its packet buffer and clearing the
733 * receive and drop counts. Should be called at splnet.
734 */
735 static void
736 reset_d(struct bpf_d *d)
737 {
738 if (d->bd_hbuf) {
739 /* Free the hold buffer. */
740 d->bd_fbuf = d->bd_hbuf;
741 d->bd_hbuf = NULL;
742 }
743 d->bd_slen = 0;
744 d->bd_hlen = 0;
745 d->bd_rcount = 0;
746 d->bd_dcount = 0;
747 d->bd_ccount = 0;
748 }
749
750 /*
751 * FIONREAD Check for read packet available.
752 * BIOCGBLEN Get buffer len [for read()].
753 * BIOCSETF Set ethernet read filter.
754 * BIOCFLUSH Flush read packet buffer.
755 * BIOCPROMISC Put interface into promiscuous mode.
756 * BIOCGDLT Get link layer type.
757 * BIOCGETIF Get interface name.
758 * BIOCSETIF Set interface.
759 * BIOCSRTIMEOUT Set read timeout.
760 * BIOCGRTIMEOUT Get read timeout.
761 * BIOCGSTATS Get packet stats.
762 * BIOCIMMEDIATE Set immediate mode.
763 * BIOCVERSION Get filter language version.
764 * BIOCGHDRCMPLT Get "header already complete" flag.
765 * BIOCSHDRCMPLT Set "header already complete" flag.
766 * BIOCSFEEDBACK Set packet feedback mode.
767 * BIOCGFEEDBACK Get packet feedback mode.
768 * BIOCGSEESENT Get "see sent packets" mode.
769 * BIOCSSEESENT Set "see sent packets" mode.
770 */
771 /* ARGSUSED */
772 static int
773 bpf_ioctl(struct file *fp, u_long cmd, void *addr)
774 {
775 struct bpf_d *d = fp->f_bpf;
776 int s, error = 0;
777
778 /*
779 * Refresh the PID associated with this bpf file.
780 */
781 KERNEL_LOCK(1, NULL);
782 d->bd_pid = curproc->p_pid;
783 #ifdef _LP64
784 if (curproc->p_flag & PK_32)
785 d->bd_compat32 = 1;
786 else
787 d->bd_compat32 = 0;
788 #endif
789
790 s = splnet();
791 if (d->bd_state == BPF_WAITING)
792 callout_stop(&d->bd_callout);
793 d->bd_state = BPF_IDLE;
794 splx(s);
795
796 switch (cmd) {
797
798 default:
799 error = EINVAL;
800 break;
801
802 /*
803 * Check for read packet available.
804 */
805 case FIONREAD:
806 {
807 int n;
808
809 s = splnet();
810 n = d->bd_slen;
811 if (d->bd_hbuf)
812 n += d->bd_hlen;
813 splx(s);
814
815 *(int *)addr = n;
816 break;
817 }
818
819 /*
820 * Get buffer len [for read()].
821 */
822 case BIOCGBLEN:
823 *(u_int *)addr = d->bd_bufsize;
824 break;
825
826 /*
827 * Set buffer length.
828 */
829 case BIOCSBLEN:
830 if (d->bd_bif != NULL)
831 error = EINVAL;
832 else {
833 u_int size = *(u_int *)addr;
834
835 if (size > bpf_maxbufsize)
836 *(u_int *)addr = size = bpf_maxbufsize;
837 else if (size < BPF_MINBUFSIZE)
838 *(u_int *)addr = size = BPF_MINBUFSIZE;
839 d->bd_bufsize = size;
840 }
841 break;
842
843 /*
844 * Set link layer read filter.
845 */
846 case BIOCSETF:
847 error = bpf_setf(d, addr);
848 break;
849
850 /*
851 * Flush read packet buffer.
852 */
853 case BIOCFLUSH:
854 s = splnet();
855 reset_d(d);
856 splx(s);
857 break;
858
859 /*
860 * Put interface into promiscuous mode.
861 */
862 case BIOCPROMISC:
863 if (d->bd_bif == NULL) {
864 /*
865 * No interface attached yet.
866 */
867 error = EINVAL;
868 break;
869 }
870 s = splnet();
871 if (d->bd_promisc == 0) {
872 error = ifpromisc(d->bd_bif->bif_ifp, 1);
873 if (error == 0)
874 d->bd_promisc = 1;
875 }
876 splx(s);
877 break;
878
879 /*
880 * Get device parameters.
881 */
882 case BIOCGDLT:
883 if (d->bd_bif == NULL)
884 error = EINVAL;
885 else
886 *(u_int *)addr = d->bd_bif->bif_dlt;
887 break;
888
889 /*
890 * Get a list of supported device parameters.
891 */
892 case BIOCGDLTLIST:
893 if (d->bd_bif == NULL)
894 error = EINVAL;
895 else
896 error = bpf_getdltlist(d, addr);
897 break;
898
899 /*
900 * Set device parameters.
901 */
902 case BIOCSDLT:
903 if (d->bd_bif == NULL)
904 error = EINVAL;
905 else
906 error = bpf_setdlt(d, *(u_int *)addr);
907 break;
908
909 /*
910 * Set interface name.
911 */
912 #ifdef OBIOCGETIF
913 case OBIOCGETIF:
914 #endif
915 case BIOCGETIF:
916 if (d->bd_bif == NULL)
917 error = EINVAL;
918 else
919 bpf_ifname(d->bd_bif->bif_ifp, addr);
920 break;
921
922 /*
923 * Set interface.
924 */
925 #ifdef OBIOCSETIF
926 case OBIOCSETIF:
927 #endif
928 case BIOCSETIF:
929 error = bpf_setif(d, addr);
930 break;
931
932 /*
933 * Set read timeout.
934 */
935 case BIOCSRTIMEOUT:
936 {
937 struct timeval *tv = addr;
938
939 /* Compute number of ticks. */
940 d->bd_rtout = tv->tv_sec * hz + tv->tv_usec / tick;
941 if ((d->bd_rtout == 0) && (tv->tv_usec != 0))
942 d->bd_rtout = 1;
943 break;
944 }
945
946 #ifdef BIOCGORTIMEOUT
947 /*
948 * Get read timeout.
949 */
950 case BIOCGORTIMEOUT:
951 {
952 struct timeval50 *tv = addr;
953
954 tv->tv_sec = d->bd_rtout / hz;
955 tv->tv_usec = (d->bd_rtout % hz) * tick;
956 break;
957 }
958 #endif
959
960 #ifdef BIOCSORTIMEOUT
961 /*
962 * Set read timeout.
963 */
964 case BIOCSORTIMEOUT:
965 {
966 struct timeval50 *tv = addr;
967
968 /* Compute number of ticks. */
969 d->bd_rtout = tv->tv_sec * hz + tv->tv_usec / tick;
970 if ((d->bd_rtout == 0) && (tv->tv_usec != 0))
971 d->bd_rtout = 1;
972 break;
973 }
974 #endif
975
976 /*
977 * Get read timeout.
978 */
979 case BIOCGRTIMEOUT:
980 {
981 struct timeval *tv = addr;
982
983 tv->tv_sec = d->bd_rtout / hz;
984 tv->tv_usec = (d->bd_rtout % hz) * tick;
985 break;
986 }
987 /*
988 * Get packet stats.
989 */
990 case BIOCGSTATS:
991 {
992 struct bpf_stat *bs = addr;
993
994 bs->bs_recv = d->bd_rcount;
995 bs->bs_drop = d->bd_dcount;
996 bs->bs_capt = d->bd_ccount;
997 break;
998 }
999
1000 case BIOCGSTATSOLD:
1001 {
1002 struct bpf_stat_old *bs = addr;
1003
1004 bs->bs_recv = d->bd_rcount;
1005 bs->bs_drop = d->bd_dcount;
1006 break;
1007 }
1008
1009 /*
1010 * Set immediate mode.
1011 */
1012 case BIOCIMMEDIATE:
1013 d->bd_immediate = *(u_int *)addr;
1014 break;
1015
1016 case BIOCVERSION:
1017 {
1018 struct bpf_version *bv = addr;
1019
1020 bv->bv_major = BPF_MAJOR_VERSION;
1021 bv->bv_minor = BPF_MINOR_VERSION;
1022 break;
1023 }
1024
1025 case BIOCGHDRCMPLT: /* get "header already complete" flag */
1026 *(u_int *)addr = d->bd_hdrcmplt;
1027 break;
1028
1029 case BIOCSHDRCMPLT: /* set "header already complete" flag */
1030 d->bd_hdrcmplt = *(u_int *)addr ? 1 : 0;
1031 break;
1032
1033 /*
1034 * Get "see sent packets" flag
1035 */
1036 case BIOCGSEESENT:
1037 *(u_int *)addr = d->bd_seesent;
1038 break;
1039
1040 /*
1041 * Set "see sent" packets flag
1042 */
1043 case BIOCSSEESENT:
1044 d->bd_seesent = *(u_int *)addr;
1045 break;
1046
1047 /*
1048 * Set "feed packets from bpf back to input" mode
1049 */
1050 case BIOCSFEEDBACK:
1051 d->bd_feedback = *(u_int *)addr;
1052 break;
1053
1054 /*
1055 * Get "feed packets from bpf back to input" mode
1056 */
1057 case BIOCGFEEDBACK:
1058 *(u_int *)addr = d->bd_feedback;
1059 break;
1060
1061 case FIONBIO: /* Non-blocking I/O */
1062 /*
1063 * No need to do anything special as we use IO_NDELAY in
1064 * bpfread() as an indication of whether or not to block
1065 * the read.
1066 */
1067 break;
1068
1069 case FIOASYNC: /* Send signal on receive packets */
1070 d->bd_async = *(int *)addr;
1071 break;
1072
1073 case TIOCSPGRP: /* Process or group to send signals to */
1074 case FIOSETOWN:
1075 error = fsetown(&d->bd_pgid, cmd, addr);
1076 break;
1077
1078 case TIOCGPGRP:
1079 case FIOGETOWN:
1080 error = fgetown(d->bd_pgid, cmd, addr);
1081 break;
1082 }
1083 KERNEL_UNLOCK_ONE(NULL);
1084 return (error);
1085 }
1086
1087 /*
1088 * Set d's packet filter program to fp. If this file already has a filter,
1089 * free it and replace it. Returns EINVAL for bogus requests.
1090 */
1091 int
1092 bpf_setf(struct bpf_d *d, struct bpf_program *fp)
1093 {
1094 struct bpf_insn *fcode, *old;
1095 bpfjit_func_t jcode, oldj;
1096 size_t flen, size;
1097 int s;
1098
1099 jcode = NULL;
1100 flen = fp->bf_len;
1101
1102 if ((fp->bf_insns == NULL && flen) || flen > BPF_MAXINSNS) {
1103 return EINVAL;
1104 }
1105
1106 if (flen) {
1107 /*
1108 * Allocate the buffer, copy the byte-code from
1109 * userspace and validate it.
1110 */
1111 size = flen * sizeof(*fp->bf_insns);
1112 fcode = malloc(size, M_DEVBUF, M_WAITOK);
1113 if (copyin(fp->bf_insns, fcode, size) != 0 ||
1114 !bpf_validate(fcode, (int)flen)) {
1115 free(fcode, M_DEVBUF);
1116 return EINVAL;
1117 }
1118 membar_consumer();
1119 if (bpf_jit)
1120 jcode = bpf_jit_generate(NULL, fcode, flen);
1121 } else {
1122 fcode = NULL;
1123 }
1124
1125 s = splnet();
1126 old = d->bd_filter;
1127 d->bd_filter = fcode;
1128 oldj = d->bd_jitcode;
1129 d->bd_jitcode = jcode;
1130 reset_d(d);
1131 splx(s);
1132
1133 if (old) {
1134 free(old, M_DEVBUF);
1135 }
1136 if (oldj) {
1137 bpf_jit_freecode(oldj);
1138 }
1139
1140 return 0;
1141 }
1142
1143 /*
1144 * Detach a file from its current interface (if attached at all) and attach
1145 * to the interface indicated by the name stored in ifr.
1146 * Return an errno or 0.
1147 */
1148 static int
1149 bpf_setif(struct bpf_d *d, struct ifreq *ifr)
1150 {
1151 struct bpf_if *bp;
1152 char *cp;
1153 int unit_seen, i, s, error;
1154
1155 /*
1156 * Make sure the provided name has a unit number, and default
1157 * it to '0' if not specified.
1158 * XXX This is ugly ... do this differently?
1159 */
1160 unit_seen = 0;
1161 cp = ifr->ifr_name;
1162 cp[sizeof(ifr->ifr_name) - 1] = '\0'; /* sanity */
1163 while (*cp++)
1164 if (*cp >= '0' && *cp <= '9')
1165 unit_seen = 1;
1166 if (!unit_seen) {
1167 /* Make sure to leave room for the '\0'. */
1168 for (i = 0; i < (IFNAMSIZ - 1); ++i) {
1169 if ((ifr->ifr_name[i] >= 'a' &&
1170 ifr->ifr_name[i] <= 'z') ||
1171 (ifr->ifr_name[i] >= 'A' &&
1172 ifr->ifr_name[i] <= 'Z'))
1173 continue;
1174 ifr->ifr_name[i] = '0';
1175 }
1176 }
1177
1178 /*
1179 * Look through attached interfaces for the named one.
1180 */
1181 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) {
1182 struct ifnet *ifp = bp->bif_ifp;
1183
1184 if (ifp == NULL ||
1185 strcmp(ifp->if_xname, ifr->ifr_name) != 0)
1186 continue;
1187 /* skip additional entry */
1188 if (bp->bif_driverp != &ifp->if_bpf)
1189 continue;
1190 /*
1191 * We found the requested interface.
1192 * Allocate the packet buffers if we need to.
1193 * If we're already attached to requested interface,
1194 * just flush the buffer.
1195 */
1196 if (d->bd_sbuf == NULL) {
1197 error = bpf_allocbufs(d);
1198 if (error != 0)
1199 return (error);
1200 }
1201 s = splnet();
1202 if (bp != d->bd_bif) {
1203 if (d->bd_bif)
1204 /*
1205 * Detach if attached to something else.
1206 */
1207 bpf_detachd(d);
1208
1209 bpf_attachd(d, bp);
1210 }
1211 reset_d(d);
1212 splx(s);
1213 return (0);
1214 }
1215 /* Not found. */
1216 return (ENXIO);
1217 }
1218
1219 /*
1220 * Copy the interface name to the ifreq.
1221 */
1222 static void
1223 bpf_ifname(struct ifnet *ifp, struct ifreq *ifr)
1224 {
1225 memcpy(ifr->ifr_name, ifp->if_xname, IFNAMSIZ);
1226 }
1227
1228 static int
1229 bpf_stat(struct file *fp, struct stat *st)
1230 {
1231 struct bpf_d *d = fp->f_bpf;
1232
1233 (void)memset(st, 0, sizeof(*st));
1234 KERNEL_LOCK(1, NULL);
1235 st->st_dev = makedev(cdevsw_lookup_major(&bpf_cdevsw), d->bd_pid);
1236 st->st_atimespec = d->bd_atime;
1237 st->st_mtimespec = d->bd_mtime;
1238 st->st_ctimespec = st->st_birthtimespec = d->bd_btime;
1239 st->st_uid = kauth_cred_geteuid(fp->f_cred);
1240 st->st_gid = kauth_cred_getegid(fp->f_cred);
1241 st->st_mode = S_IFCHR;
1242 KERNEL_UNLOCK_ONE(NULL);
1243 return 0;
1244 }
1245
1246 /*
1247 * Support for poll() system call
1248 *
1249 * Return true iff the specific operation will not block indefinitely - with
1250 * the assumption that it is safe to positively acknowledge a request for the
1251 * ability to write to the BPF device.
1252 * Otherwise, return false but make a note that a selnotify() must be done.
1253 */
1254 static int
1255 bpf_poll(struct file *fp, int events)
1256 {
1257 struct bpf_d *d = fp->f_bpf;
1258 int s = splnet();
1259 int revents;
1260
1261 /*
1262 * Refresh the PID associated with this bpf file.
1263 */
1264 KERNEL_LOCK(1, NULL);
1265 d->bd_pid = curproc->p_pid;
1266
1267 revents = events & (POLLOUT | POLLWRNORM);
1268 if (events & (POLLIN | POLLRDNORM)) {
1269 /*
1270 * An imitation of the FIONREAD ioctl code.
1271 */
1272 if (d->bd_hlen != 0 ||
1273 ((d->bd_immediate || d->bd_state == BPF_TIMED_OUT) &&
1274 d->bd_slen != 0)) {
1275 revents |= events & (POLLIN | POLLRDNORM);
1276 } else {
1277 selrecord(curlwp, &d->bd_sel);
1278 /* Start the read timeout if necessary */
1279 if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
1280 callout_reset(&d->bd_callout, d->bd_rtout,
1281 bpf_timed_out, d);
1282 d->bd_state = BPF_WAITING;
1283 }
1284 }
1285 }
1286
1287 KERNEL_UNLOCK_ONE(NULL);
1288 splx(s);
1289 return (revents);
1290 }
1291
1292 static void
1293 filt_bpfrdetach(struct knote *kn)
1294 {
1295 struct bpf_d *d = kn->kn_hook;
1296 int s;
1297
1298 KERNEL_LOCK(1, NULL);
1299 s = splnet();
1300 SLIST_REMOVE(&d->bd_sel.sel_klist, kn, knote, kn_selnext);
1301 splx(s);
1302 KERNEL_UNLOCK_ONE(NULL);
1303 }
1304
1305 static int
1306 filt_bpfread(struct knote *kn, long hint)
1307 {
1308 struct bpf_d *d = kn->kn_hook;
1309 int rv;
1310
1311 KERNEL_LOCK(1, NULL);
1312 kn->kn_data = d->bd_hlen;
1313 if (d->bd_immediate)
1314 kn->kn_data += d->bd_slen;
1315 rv = (kn->kn_data > 0);
1316 KERNEL_UNLOCK_ONE(NULL);
1317 return rv;
1318 }
1319
1320 static const struct filterops bpfread_filtops =
1321 { 1, NULL, filt_bpfrdetach, filt_bpfread };
1322
1323 static int
1324 bpf_kqfilter(struct file *fp, struct knote *kn)
1325 {
1326 struct bpf_d *d = fp->f_bpf;
1327 struct klist *klist;
1328 int s;
1329
1330 KERNEL_LOCK(1, NULL);
1331
1332 switch (kn->kn_filter) {
1333 case EVFILT_READ:
1334 klist = &d->bd_sel.sel_klist;
1335 kn->kn_fop = &bpfread_filtops;
1336 break;
1337
1338 default:
1339 KERNEL_UNLOCK_ONE(NULL);
1340 return (EINVAL);
1341 }
1342
1343 kn->kn_hook = d;
1344
1345 s = splnet();
1346 SLIST_INSERT_HEAD(klist, kn, kn_selnext);
1347 splx(s);
1348 KERNEL_UNLOCK_ONE(NULL);
1349
1350 return (0);
1351 }
1352
1353 /*
1354 * Copy data from an mbuf chain into a buffer. This code is derived
1355 * from m_copydata in sys/uipc_mbuf.c.
1356 */
1357 static void *
1358 bpf_mcpy(void *dst_arg, const void *src_arg, size_t len)
1359 {
1360 const struct mbuf *m;
1361 u_int count;
1362 u_char *dst;
1363
1364 m = src_arg;
1365 dst = dst_arg;
1366 while (len > 0) {
1367 if (m == NULL)
1368 panic("bpf_mcpy");
1369 count = min(m->m_len, len);
1370 memcpy(dst, mtod(m, const void *), count);
1371 m = m->m_next;
1372 dst += count;
1373 len -= count;
1374 }
1375 return dst_arg;
1376 }
1377
1378 /*
1379 * Dispatch a packet to all the listeners on interface bp.
1380 *
1381 * pkt pointer to the packet, either a data buffer or an mbuf chain
1382 * buflen buffer length, if pkt is a data buffer
1383 * cpfn a function that can copy pkt into the listener's buffer
1384 * pktlen length of the packet
1385 * rcv true if packet came in
1386 */
1387 static inline void
1388 bpf_deliver(struct bpf_if *bp, void *(*cpfn)(void *, const void *, size_t),
1389 void *pkt, u_int pktlen, u_int buflen, const bool rcv)
1390 {
1391 uint32_t mem[BPF_MEMWORDS];
1392 bpf_args_t args = {
1393 .pkt = (const uint8_t *)pkt,
1394 .wirelen = pktlen,
1395 .buflen = buflen,
1396 .mem = mem,
1397 .arg = NULL
1398 };
1399 bool gottime = false;
1400 struct timespec ts;
1401
1402 /*
1403 * Note that the IPL does not have to be raised at this point.
1404 * The only problem that could arise here is that if two different
1405 * interfaces shared any data. This is not the case.
1406 */
1407 for (struct bpf_d *d = bp->bif_dlist; d != NULL; d = d->bd_next) {
1408 u_int slen;
1409
1410 if (!d->bd_seesent && !rcv) {
1411 continue;
1412 }
1413 d->bd_rcount++;
1414 bpf_gstats.bs_recv++;
1415
1416 if (d->bd_jitcode)
1417 slen = d->bd_jitcode(NULL, &args);
1418 else
1419 slen = bpf_filter_ext(NULL, d->bd_filter, &args);
1420
1421 if (!slen) {
1422 continue;
1423 }
1424 if (!gottime) {
1425 gottime = true;
1426 nanotime(&ts);
1427 }
1428 catchpacket(d, pkt, pktlen, slen, cpfn, &ts);
1429 }
1430 }
1431
1432 /*
1433 * Incoming linkage from device drivers. Process the packet pkt, of length
1434 * pktlen, which is stored in a contiguous buffer. The packet is parsed
1435 * by each process' filter, and if accepted, stashed into the corresponding
1436 * buffer.
1437 */
1438 static void
1439 _bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen)
1440 {
1441
1442 bpf_deliver(bp, memcpy, pkt, pktlen, pktlen, true);
1443 }
1444
1445 /*
1446 * Incoming linkage from device drivers, when the head of the packet is in
1447 * a buffer, and the tail is in an mbuf chain.
1448 */
1449 static void
1450 _bpf_mtap2(struct bpf_if *bp, void *data, u_int dlen, struct mbuf *m)
1451 {
1452 u_int pktlen;
1453 struct mbuf mb;
1454
1455 /* Skip outgoing duplicate packets. */
1456 if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif == NULL) {
1457 m->m_flags &= ~M_PROMISC;
1458 return;
1459 }
1460
1461 pktlen = m_length(m) + dlen;
1462
1463 /*
1464 * Craft on-stack mbuf suitable for passing to bpf_filter.
1465 * Note that we cut corners here; we only setup what's
1466 * absolutely needed--this mbuf should never go anywhere else.
1467 */
1468 (void)memset(&mb, 0, sizeof(mb));
1469 mb.m_next = m;
1470 mb.m_data = data;
1471 mb.m_len = dlen;
1472
1473 bpf_deliver(bp, bpf_mcpy, &mb, pktlen, 0, m->m_pkthdr.rcvif != NULL);
1474 }
1475
1476 /*
1477 * Incoming linkage from device drivers, when packet is in an mbuf chain.
1478 */
1479 static void
1480 _bpf_mtap(struct bpf_if *bp, struct mbuf *m)
1481 {
1482 void *(*cpfn)(void *, const void *, size_t);
1483 u_int pktlen, buflen;
1484 void *marg;
1485
1486 /* Skip outgoing duplicate packets. */
1487 if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif == NULL) {
1488 m->m_flags &= ~M_PROMISC;
1489 return;
1490 }
1491
1492 pktlen = m_length(m);
1493
1494 if (pktlen == m->m_len) {
1495 cpfn = (void *)memcpy;
1496 marg = mtod(m, void *);
1497 buflen = pktlen;
1498 } else {
1499 cpfn = bpf_mcpy;
1500 marg = m;
1501 buflen = 0;
1502 }
1503
1504 bpf_deliver(bp, cpfn, marg, pktlen, buflen, m->m_pkthdr.rcvif != NULL);
1505 }
1506
1507 /*
1508 * We need to prepend the address family as
1509 * a four byte field. Cons up a dummy header
1510 * to pacify bpf. This is safe because bpf
1511 * will only read from the mbuf (i.e., it won't
1512 * try to free it or keep a pointer a to it).
1513 */
1514 static void
1515 _bpf_mtap_af(struct bpf_if *bp, uint32_t af, struct mbuf *m)
1516 {
1517 struct mbuf m0;
1518
1519 m0.m_flags = 0;
1520 m0.m_next = m;
1521 m0.m_len = 4;
1522 m0.m_data = (char *)⁡
1523
1524 _bpf_mtap(bp, &m0);
1525 }
1526
1527 /*
1528 * Put the SLIP pseudo-"link header" in place.
1529 * Note this M_PREPEND() should never fail,
1530 * swince we know we always have enough space
1531 * in the input buffer.
1532 */
1533 static void
1534 _bpf_mtap_sl_in(struct bpf_if *bp, u_char *chdr, struct mbuf **m)
1535 {
1536 int s;
1537 u_char *hp;
1538
1539 M_PREPEND(*m, SLIP_HDRLEN, M_DONTWAIT);
1540 if (*m == NULL)
1541 return;
1542
1543 hp = mtod(*m, u_char *);
1544 hp[SLX_DIR] = SLIPDIR_IN;
1545 (void)memcpy(&hp[SLX_CHDR], chdr, CHDR_LEN);
1546
1547 s = splnet();
1548 _bpf_mtap(bp, *m);
1549 splx(s);
1550
1551 m_adj(*m, SLIP_HDRLEN);
1552 }
1553
1554 /*
1555 * Put the SLIP pseudo-"link header" in
1556 * place. The compressed header is now
1557 * at the beginning of the mbuf.
1558 */
1559 static void
1560 _bpf_mtap_sl_out(struct bpf_if *bp, u_char *chdr, struct mbuf *m)
1561 {
1562 struct mbuf m0;
1563 u_char *hp;
1564 int s;
1565
1566 m0.m_flags = 0;
1567 m0.m_next = m;
1568 m0.m_data = m0.m_dat;
1569 m0.m_len = SLIP_HDRLEN;
1570
1571 hp = mtod(&m0, u_char *);
1572
1573 hp[SLX_DIR] = SLIPDIR_OUT;
1574 (void)memcpy(&hp[SLX_CHDR], chdr, CHDR_LEN);
1575
1576 s = splnet();
1577 _bpf_mtap(bp, &m0);
1578 splx(s);
1579 m_freem(m);
1580 }
1581
1582 static int
1583 bpf_hdrlen(struct bpf_d *d)
1584 {
1585 int hdrlen = d->bd_bif->bif_hdrlen;
1586 /*
1587 * Compute the length of the bpf header. This is not necessarily
1588 * equal to SIZEOF_BPF_HDR because we want to insert spacing such
1589 * that the network layer header begins on a longword boundary (for
1590 * performance reasons and to alleviate alignment restrictions).
1591 */
1592 #ifdef _LP64
1593 if (d->bd_compat32)
1594 return (BPF_WORDALIGN32(hdrlen + SIZEOF_BPF_HDR32) - hdrlen);
1595 else
1596 #endif
1597 return (BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen);
1598 }
1599
1600 /*
1601 * Move the packet data from interface memory (pkt) into the
1602 * store buffer. Call the wakeup functions if it's time to wakeup
1603 * a listener (buffer full), "cpfn" is the routine called to do the
1604 * actual data transfer. memcpy is passed in to copy contiguous chunks,
1605 * while bpf_mcpy is passed in to copy mbuf chains. In the latter case,
1606 * pkt is really an mbuf.
1607 */
1608 static void
1609 catchpacket(struct bpf_d *d, u_char *pkt, u_int pktlen, u_int snaplen,
1610 void *(*cpfn)(void *, const void *, size_t), struct timespec *ts)
1611 {
1612 char *h;
1613 int totlen, curlen, caplen;
1614 int hdrlen = bpf_hdrlen(d);
1615 int do_wakeup = 0;
1616
1617 ++d->bd_ccount;
1618 ++bpf_gstats.bs_capt;
1619 /*
1620 * Figure out how many bytes to move. If the packet is
1621 * greater or equal to the snapshot length, transfer that
1622 * much. Otherwise, transfer the whole packet (unless
1623 * we hit the buffer size limit).
1624 */
1625 totlen = hdrlen + min(snaplen, pktlen);
1626 if (totlen > d->bd_bufsize)
1627 totlen = d->bd_bufsize;
1628 /*
1629 * If we adjusted totlen to fit the bufsize, it could be that
1630 * totlen is smaller than hdrlen because of the link layer header.
1631 */
1632 caplen = totlen - hdrlen;
1633 if (caplen < 0)
1634 caplen = 0;
1635
1636 /*
1637 * Round up the end of the previous packet to the next longword.
1638 */
1639 #ifdef _LP64
1640 if (d->bd_compat32)
1641 curlen = BPF_WORDALIGN32(d->bd_slen);
1642 else
1643 #endif
1644 curlen = BPF_WORDALIGN(d->bd_slen);
1645 if (curlen + totlen > d->bd_bufsize) {
1646 /*
1647 * This packet will overflow the storage buffer.
1648 * Rotate the buffers if we can, then wakeup any
1649 * pending reads.
1650 */
1651 if (d->bd_fbuf == NULL) {
1652 /*
1653 * We haven't completed the previous read yet,
1654 * so drop the packet.
1655 */
1656 ++d->bd_dcount;
1657 ++bpf_gstats.bs_drop;
1658 return;
1659 }
1660 ROTATE_BUFFERS(d);
1661 do_wakeup = 1;
1662 curlen = 0;
1663 } else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT) {
1664 /*
1665 * Immediate mode is set, or the read timeout has
1666 * already expired during a select call. A packet
1667 * arrived, so the reader should be woken up.
1668 */
1669 do_wakeup = 1;
1670 }
1671
1672 /*
1673 * Append the bpf header.
1674 */
1675 h = (char *)d->bd_sbuf + curlen;
1676 #ifdef _LP64
1677 if (d->bd_compat32) {
1678 struct bpf_hdr32 *hp32;
1679
1680 hp32 = (struct bpf_hdr32 *)h;
1681 hp32->bh_tstamp.tv_sec = ts->tv_sec;
1682 hp32->bh_tstamp.tv_usec = ts->tv_nsec / 1000;
1683 hp32->bh_datalen = pktlen;
1684 hp32->bh_hdrlen = hdrlen;
1685 hp32->bh_caplen = caplen;
1686 } else
1687 #endif
1688 {
1689 struct bpf_hdr *hp;
1690
1691 hp = (struct bpf_hdr *)h;
1692 hp->bh_tstamp.tv_sec = ts->tv_sec;
1693 hp->bh_tstamp.tv_usec = ts->tv_nsec / 1000;
1694 hp->bh_datalen = pktlen;
1695 hp->bh_hdrlen = hdrlen;
1696 hp->bh_caplen = caplen;
1697 }
1698
1699 /*
1700 * Copy the packet data into the store buffer and update its length.
1701 */
1702 (*cpfn)(h + hdrlen, pkt, caplen);
1703 d->bd_slen = curlen + totlen;
1704
1705 /*
1706 * Call bpf_wakeup after bd_slen has been updated so that kevent(2)
1707 * will cause filt_bpfread() to be called with it adjusted.
1708 */
1709 if (do_wakeup)
1710 bpf_wakeup(d);
1711 }
1712
1713 /*
1714 * Initialize all nonzero fields of a descriptor.
1715 */
1716 static int
1717 bpf_allocbufs(struct bpf_d *d)
1718 {
1719
1720 d->bd_fbuf = malloc(d->bd_bufsize, M_DEVBUF, M_WAITOK | M_CANFAIL);
1721 if (!d->bd_fbuf)
1722 return (ENOBUFS);
1723 d->bd_sbuf = malloc(d->bd_bufsize, M_DEVBUF, M_WAITOK | M_CANFAIL);
1724 if (!d->bd_sbuf) {
1725 free(d->bd_fbuf, M_DEVBUF);
1726 return (ENOBUFS);
1727 }
1728 d->bd_slen = 0;
1729 d->bd_hlen = 0;
1730 return (0);
1731 }
1732
1733 /*
1734 * Free buffers currently in use by a descriptor.
1735 * Called on close.
1736 */
1737 static void
1738 bpf_freed(struct bpf_d *d)
1739 {
1740 /*
1741 * We don't need to lock out interrupts since this descriptor has
1742 * been detached from its interface and it yet hasn't been marked
1743 * free.
1744 */
1745 if (d->bd_sbuf != NULL) {
1746 free(d->bd_sbuf, M_DEVBUF);
1747 if (d->bd_hbuf != NULL)
1748 free(d->bd_hbuf, M_DEVBUF);
1749 if (d->bd_fbuf != NULL)
1750 free(d->bd_fbuf, M_DEVBUF);
1751 }
1752 if (d->bd_filter)
1753 free(d->bd_filter, M_DEVBUF);
1754
1755 if (d->bd_jitcode != NULL) {
1756 bpf_jit_freecode(d->bd_jitcode);
1757 }
1758 }
1759
1760 /*
1761 * Attach an interface to bpf. dlt is the link layer type;
1762 * hdrlen is the fixed size of the link header for the specified dlt
1763 * (variable length headers not yet supported).
1764 */
1765 static void
1766 _bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp)
1767 {
1768 struct bpf_if *bp;
1769 bp = malloc(sizeof(*bp), M_DEVBUF, M_DONTWAIT);
1770 if (bp == NULL)
1771 panic("bpfattach");
1772
1773 bp->bif_dlist = NULL;
1774 bp->bif_driverp = driverp;
1775 bp->bif_ifp = ifp;
1776 bp->bif_dlt = dlt;
1777
1778 bp->bif_next = bpf_iflist;
1779 bpf_iflist = bp;
1780
1781 *bp->bif_driverp = NULL;
1782
1783 bp->bif_hdrlen = hdrlen;
1784 #if 0
1785 printf("bpf: %s attached\n", ifp->if_xname);
1786 #endif
1787 }
1788
1789 /*
1790 * Remove an interface from bpf.
1791 */
1792 static void
1793 _bpfdetach(struct ifnet *ifp)
1794 {
1795 struct bpf_if *bp, **pbp;
1796 struct bpf_d *d;
1797 int s;
1798
1799 /* Nuke the vnodes for any open instances */
1800 LIST_FOREACH(d, &bpf_list, bd_list) {
1801 if (d->bd_bif != NULL && d->bd_bif->bif_ifp == ifp) {
1802 /*
1803 * Detach the descriptor from an interface now.
1804 * It will be free'ed later by close routine.
1805 */
1806 s = splnet();
1807 d->bd_promisc = 0; /* we can't touch device. */
1808 bpf_detachd(d);
1809 splx(s);
1810 }
1811 }
1812
1813 again:
1814 for (bp = bpf_iflist, pbp = &bpf_iflist;
1815 bp != NULL; pbp = &bp->bif_next, bp = bp->bif_next) {
1816 if (bp->bif_ifp == ifp) {
1817 *pbp = bp->bif_next;
1818 free(bp, M_DEVBUF);
1819 goto again;
1820 }
1821 }
1822 }
1823
1824 /*
1825 * Change the data link type of a interface.
1826 */
1827 static void
1828 _bpf_change_type(struct ifnet *ifp, u_int dlt, u_int hdrlen)
1829 {
1830 struct bpf_if *bp;
1831
1832 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) {
1833 if (bp->bif_driverp == &ifp->if_bpf)
1834 break;
1835 }
1836 if (bp == NULL)
1837 panic("bpf_change_type");
1838
1839 bp->bif_dlt = dlt;
1840
1841 bp->bif_hdrlen = hdrlen;
1842 }
1843
1844 /*
1845 * Get a list of available data link type of the interface.
1846 */
1847 static int
1848 bpf_getdltlist(struct bpf_d *d, struct bpf_dltlist *bfl)
1849 {
1850 int n, error;
1851 struct ifnet *ifp;
1852 struct bpf_if *bp;
1853
1854 ifp = d->bd_bif->bif_ifp;
1855 n = 0;
1856 error = 0;
1857 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) {
1858 if (bp->bif_ifp != ifp)
1859 continue;
1860 if (bfl->bfl_list != NULL) {
1861 if (n >= bfl->bfl_len)
1862 return ENOMEM;
1863 error = copyout(&bp->bif_dlt,
1864 bfl->bfl_list + n, sizeof(u_int));
1865 }
1866 n++;
1867 }
1868 bfl->bfl_len = n;
1869 return error;
1870 }
1871
1872 /*
1873 * Set the data link type of a BPF instance.
1874 */
1875 static int
1876 bpf_setdlt(struct bpf_d *d, u_int dlt)
1877 {
1878 int s, error, opromisc;
1879 struct ifnet *ifp;
1880 struct bpf_if *bp;
1881
1882 if (d->bd_bif->bif_dlt == dlt)
1883 return 0;
1884 ifp = d->bd_bif->bif_ifp;
1885 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) {
1886 if (bp->bif_ifp == ifp && bp->bif_dlt == dlt)
1887 break;
1888 }
1889 if (bp == NULL)
1890 return EINVAL;
1891 s = splnet();
1892 opromisc = d->bd_promisc;
1893 bpf_detachd(d);
1894 bpf_attachd(d, bp);
1895 reset_d(d);
1896 if (opromisc) {
1897 error = ifpromisc(bp->bif_ifp, 1);
1898 if (error)
1899 printf("%s: bpf_setdlt: ifpromisc failed (%d)\n",
1900 bp->bif_ifp->if_xname, error);
1901 else
1902 d->bd_promisc = 1;
1903 }
1904 splx(s);
1905 return 0;
1906 }
1907
1908 static int
1909 sysctl_net_bpf_maxbufsize(SYSCTLFN_ARGS)
1910 {
1911 int newsize, error;
1912 struct sysctlnode node;
1913
1914 node = *rnode;
1915 node.sysctl_data = &newsize;
1916 newsize = bpf_maxbufsize;
1917 error = sysctl_lookup(SYSCTLFN_CALL(&node));
1918 if (error || newp == NULL)
1919 return (error);
1920
1921 if (newsize < BPF_MINBUFSIZE || newsize > BPF_MAXBUFSIZE)
1922 return (EINVAL);
1923
1924 bpf_maxbufsize = newsize;
1925
1926 return (0);
1927 }
1928
1929 #if defined(MODULAR) || defined(BPFJIT)
1930 static int
1931 sysctl_net_bpf_jit(SYSCTLFN_ARGS)
1932 {
1933 bool newval;
1934 int error;
1935 struct sysctlnode node;
1936
1937 node = *rnode;
1938 node.sysctl_data = &newval;
1939 newval = bpf_jit;
1940 error = sysctl_lookup(SYSCTLFN_CALL(&node));
1941 if (error != 0 || newp == NULL)
1942 return error;
1943
1944 bpf_jit = newval;
1945
1946 /*
1947 * Do a full sync to publish new bpf_jit value and
1948 * update bpfjit_module_ops.bj_generate_code variable.
1949 */
1950 membar_sync();
1951
1952 if (newval && bpfjit_module_ops.bj_generate_code == NULL) {
1953 printf("JIT compilation is postponed "
1954 "until after bpfjit module is loaded\n");
1955 }
1956
1957 return 0;
1958 }
1959 #endif
1960
1961 static int
1962 sysctl_net_bpf_peers(SYSCTLFN_ARGS)
1963 {
1964 int error, elem_count;
1965 struct bpf_d *dp;
1966 struct bpf_d_ext dpe;
1967 size_t len, needed, elem_size, out_size;
1968 char *sp;
1969
1970 if (namelen == 1 && name[0] == CTL_QUERY)
1971 return (sysctl_query(SYSCTLFN_CALL(rnode)));
1972
1973 if (namelen != 2)
1974 return (EINVAL);
1975
1976 /* BPF peers is privileged information. */
1977 error = kauth_authorize_network(l->l_cred, KAUTH_NETWORK_INTERFACE,
1978 KAUTH_REQ_NETWORK_INTERFACE_GETPRIV, NULL, NULL, NULL);
1979 if (error)
1980 return (EPERM);
1981
1982 len = (oldp != NULL) ? *oldlenp : 0;
1983 sp = oldp;
1984 elem_size = name[0];
1985 elem_count = name[1];
1986 out_size = MIN(sizeof(dpe), elem_size);
1987 needed = 0;
1988
1989 if (elem_size < 1 || elem_count < 0)
1990 return (EINVAL);
1991
1992 mutex_enter(&bpf_mtx);
1993 LIST_FOREACH(dp, &bpf_list, bd_list) {
1994 if (len >= elem_size && elem_count > 0) {
1995 #define BPF_EXT(field) dpe.bde_ ## field = dp->bd_ ## field
1996 BPF_EXT(bufsize);
1997 BPF_EXT(promisc);
1998 BPF_EXT(state);
1999 BPF_EXT(immediate);
2000 BPF_EXT(hdrcmplt);
2001 BPF_EXT(seesent);
2002 BPF_EXT(pid);
2003 BPF_EXT(rcount);
2004 BPF_EXT(dcount);
2005 BPF_EXT(ccount);
2006 #undef BPF_EXT
2007 if (dp->bd_bif)
2008 (void)strlcpy(dpe.bde_ifname,
2009 dp->bd_bif->bif_ifp->if_xname,
2010 IFNAMSIZ - 1);
2011 else
2012 dpe.bde_ifname[0] = '\0';
2013
2014 error = copyout(&dpe, sp, out_size);
2015 if (error)
2016 break;
2017 sp += elem_size;
2018 len -= elem_size;
2019 }
2020 needed += elem_size;
2021 if (elem_count > 0 && elem_count != INT_MAX)
2022 elem_count--;
2023 }
2024 mutex_exit(&bpf_mtx);
2025
2026 *oldlenp = needed;
2027
2028 return (error);
2029 }
2030
2031 static struct sysctllog *bpf_sysctllog;
2032 static void
2033 sysctl_net_bpf_setup(void)
2034 {
2035 const struct sysctlnode *node;
2036
2037 node = NULL;
2038 sysctl_createv(&bpf_sysctllog, 0, NULL, &node,
2039 CTLFLAG_PERMANENT,
2040 CTLTYPE_NODE, "bpf",
2041 SYSCTL_DESCR("BPF options"),
2042 NULL, 0, NULL, 0,
2043 CTL_NET, CTL_CREATE, CTL_EOL);
2044 if (node != NULL) {
2045 #if defined(MODULAR) || defined(BPFJIT)
2046 sysctl_createv(&bpf_sysctllog, 0, NULL, NULL,
2047 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
2048 CTLTYPE_BOOL, "jit",
2049 SYSCTL_DESCR("Toggle Just-In-Time compilation"),
2050 sysctl_net_bpf_jit, 0, &bpf_jit, 0,
2051 CTL_NET, node->sysctl_num, CTL_CREATE, CTL_EOL);
2052 #endif
2053 sysctl_createv(&bpf_sysctllog, 0, NULL, NULL,
2054 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
2055 CTLTYPE_INT, "maxbufsize",
2056 SYSCTL_DESCR("Maximum size for data capture buffer"),
2057 sysctl_net_bpf_maxbufsize, 0, &bpf_maxbufsize, 0,
2058 CTL_NET, node->sysctl_num, CTL_CREATE, CTL_EOL);
2059 sysctl_createv(&bpf_sysctllog, 0, NULL, NULL,
2060 CTLFLAG_PERMANENT,
2061 CTLTYPE_STRUCT, "stats",
2062 SYSCTL_DESCR("BPF stats"),
2063 NULL, 0, &bpf_gstats, sizeof(bpf_gstats),
2064 CTL_NET, node->sysctl_num, CTL_CREATE, CTL_EOL);
2065 sysctl_createv(&bpf_sysctllog, 0, NULL, NULL,
2066 CTLFLAG_PERMANENT,
2067 CTLTYPE_STRUCT, "peers",
2068 SYSCTL_DESCR("BPF peers"),
2069 sysctl_net_bpf_peers, 0, NULL, 0,
2070 CTL_NET, node->sysctl_num, CTL_CREATE, CTL_EOL);
2071 }
2072
2073 }
2074
2075 struct bpf_ops bpf_ops_kernel = {
2076 .bpf_attach = _bpfattach,
2077 .bpf_detach = _bpfdetach,
2078 .bpf_change_type = _bpf_change_type,
2079
2080 .bpf_tap = _bpf_tap,
2081 .bpf_mtap = _bpf_mtap,
2082 .bpf_mtap2 = _bpf_mtap2,
2083 .bpf_mtap_af = _bpf_mtap_af,
2084 .bpf_mtap_sl_in = _bpf_mtap_sl_in,
2085 .bpf_mtap_sl_out = _bpf_mtap_sl_out,
2086 };
2087
2088 MODULE(MODULE_CLASS_DRIVER, bpf, NULL);
2089
2090 static int
2091 bpf_modcmd(modcmd_t cmd, void *arg)
2092 {
2093 devmajor_t bmajor, cmajor;
2094 int error;
2095
2096 bmajor = cmajor = NODEVMAJOR;
2097
2098 switch (cmd) {
2099 case MODULE_CMD_INIT:
2100 bpfilterattach(0);
2101 error = devsw_attach("bpf", NULL, &bmajor,
2102 &bpf_cdevsw, &cmajor);
2103 if (error == EEXIST)
2104 error = 0; /* maybe built-in ... improve eventually */
2105 if (error)
2106 break;
2107
2108 bpf_ops_handover_enter(&bpf_ops_kernel);
2109 atomic_swap_ptr(&bpf_ops, &bpf_ops_kernel);
2110 bpf_ops_handover_exit();
2111 sysctl_net_bpf_setup();
2112 break;
2113
2114 case MODULE_CMD_FINI:
2115 /*
2116 * While there is no reference counting for bpf callers,
2117 * unload could at least in theory be done similarly to
2118 * system call disestablishment. This should even be
2119 * a little simpler:
2120 *
2121 * 1) replace op vector with stubs
2122 * 2) post update to all cpus with xc
2123 * 3) check that nobody is in bpf anymore
2124 * (it's doubtful we'd want something like l_sysent,
2125 * but we could do something like *signed* percpu
2126 * counters. if the sum is 0, we're good).
2127 * 4) if fail, unroll changes
2128 *
2129 * NOTE: change won't be atomic to the outside. some
2130 * packets may be not captured even if unload is
2131 * not succesful. I think packet capture not working
2132 * is a perfectly logical consequence of trying to
2133 * disable packet capture.
2134 */
2135 error = EOPNOTSUPP;
2136 /* insert sysctl teardown */
2137 break;
2138
2139 default:
2140 error = ENOTTY;
2141 break;
2142 }
2143
2144 return error;
2145 }
2146