bpf.c revision 1.229.2.2 1 /* $NetBSD: bpf.c,v 1.229.2.2 2023/02/22 19:50:33 martin Exp $ */
2
3 /*
4 * Copyright (c) 1990, 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from the Stanford/CMU enet packet filter,
8 * (net/enet.c) distributed as part of 4.3BSD, and code contributed
9 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
10 * Berkeley Laboratory.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)bpf.c 8.4 (Berkeley) 1/9/95
37 * static char rcsid[] =
38 * "Header: bpf.c,v 1.67 96/09/26 22:00:52 leres Exp ";
39 */
40
41 #include <sys/cdefs.h>
42 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.229.2.2 2023/02/22 19:50:33 martin Exp $");
43
44 #if defined(_KERNEL_OPT)
45 #include "opt_bpf.h"
46 #include "sl.h"
47 #include "strip.h"
48 #include "opt_net_mpsafe.h"
49 #endif
50
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/mbuf.h>
54 #include <sys/buf.h>
55 #include <sys/time.h>
56 #include <sys/proc.h>
57 #include <sys/ioctl.h>
58 #include <sys/conf.h>
59 #include <sys/vnode.h>
60 #include <sys/queue.h>
61 #include <sys/stat.h>
62 #include <sys/module.h>
63 #include <sys/atomic.h>
64 #include <sys/cpu.h>
65
66 #include <sys/file.h>
67 #include <sys/filedesc.h>
68 #include <sys/tty.h>
69 #include <sys/uio.h>
70
71 #include <sys/protosw.h>
72 #include <sys/socket.h>
73 #include <sys/errno.h>
74 #include <sys/kernel.h>
75 #include <sys/poll.h>
76 #include <sys/sysctl.h>
77 #include <sys/kauth.h>
78 #include <sys/syslog.h>
79 #include <sys/percpu.h>
80 #include <sys/pserialize.h>
81 #include <sys/lwp.h>
82
83 #include <net/if.h>
84 #include <net/slip.h>
85
86 #include <net/bpf.h>
87 #include <net/bpfdesc.h>
88 #include <net/bpfjit.h>
89
90 #include <net/if_arc.h>
91 #include <net/if_ether.h>
92
93 #include <netinet/in.h>
94 #include <netinet/if_inarp.h>
95
96
97 #include <compat/sys/sockio.h>
98
99 #ifndef BPF_BUFSIZE
100 /*
101 * 4096 is too small for FDDI frames. 8192 is too small for gigabit Ethernet
102 * jumbos (circa 9k), ATM, or Intel gig/10gig ethernet jumbos (16k).
103 */
104 # define BPF_BUFSIZE 32768
105 #endif
106
107 #define PRINET 26 /* interruptible */
108
109 /*
110 * The default read buffer size, and limit for BIOCSBLEN, is sysctl'able.
111 * XXX the default values should be computed dynamically based
112 * on available memory size and available mbuf clusters.
113 */
114 static int bpf_bufsize = BPF_BUFSIZE;
115 static int bpf_maxbufsize = BPF_DFLTBUFSIZE; /* XXX set dynamically, see above */
116 static bool bpf_jit = false;
117
118 struct bpfjit_ops bpfjit_module_ops = {
119 .bj_generate_code = NULL,
120 .bj_free_code = NULL
121 };
122
123 /*
124 * Global BPF statistics returned by net.bpf.stats sysctl.
125 */
126 static struct percpu *bpf_gstats_percpu; /* struct bpf_stat */
127
128 #define BPF_STATINC(id) \
129 { \
130 struct bpf_stat *__stats = \
131 percpu_getref(bpf_gstats_percpu); \
132 __stats->bs_##id++; \
133 percpu_putref(bpf_gstats_percpu); \
134 }
135
136 /*
137 * Locking notes:
138 * - bpf_mtx (adaptive mutex) protects:
139 * - Gobal lists: bpf_iflist and bpf_dlist
140 * - struct bpf_if
141 * - bpf_close
142 * - bpf_psz (pserialize)
143 * - struct bpf_d has two mutexes:
144 * - bd_buf_mtx (spin mutex) protects the buffers that can be accessed
145 * on packet tapping
146 * - bd_mtx (adaptive mutex) protects member variables other than the buffers
147 * - Locking order: bpf_mtx => bpf_d#bd_mtx => bpf_d#bd_buf_mtx
148 * - struct bpf_d obtained via fp->f_bpf in bpf_read and bpf_write is
149 * never freed because struct bpf_d is only freed in bpf_close and
150 * bpf_close never be called while executing bpf_read and bpf_write
151 * - A filter that is assigned to bpf_d can be replaced with another filter
152 * while tapping packets, so it needs to be done atomically
153 * - struct bpf_d is iterated on bpf_dlist with psz
154 * - struct bpf_if is iterated on bpf_iflist with psz or psref
155 */
156 /*
157 * Use a mutex to avoid a race condition between gathering the stats/peers
158 * and opening/closing the device.
159 */
160 static kmutex_t bpf_mtx;
161
162 static struct psref_class *bpf_psref_class __read_mostly;
163 static pserialize_t bpf_psz;
164
165 static inline void
166 bpf_if_acquire(struct bpf_if *bp, struct psref *psref)
167 {
168
169 psref_acquire(psref, &bp->bif_psref, bpf_psref_class);
170 }
171
172 static inline void
173 bpf_if_release(struct bpf_if *bp, struct psref *psref)
174 {
175
176 psref_release(psref, &bp->bif_psref, bpf_psref_class);
177 }
178
179 /*
180 * bpf_iflist is the list of interfaces; each corresponds to an ifnet
181 * bpf_dtab holds the descriptors, indexed by minor device #
182 */
183 static struct pslist_head bpf_iflist;
184 static struct pslist_head bpf_dlist;
185
186 /* Macros for bpf_d on bpf_dlist */
187 #define BPF_DLIST_WRITER_INSERT_HEAD(__d) \
188 PSLIST_WRITER_INSERT_HEAD(&bpf_dlist, (__d), bd_bpf_dlist_entry)
189 #define BPF_DLIST_READER_FOREACH(__d) \
190 PSLIST_READER_FOREACH((__d), &bpf_dlist, struct bpf_d, \
191 bd_bpf_dlist_entry)
192 #define BPF_DLIST_WRITER_FOREACH(__d) \
193 PSLIST_WRITER_FOREACH((__d), &bpf_dlist, struct bpf_d, \
194 bd_bpf_dlist_entry)
195 #define BPF_DLIST_ENTRY_INIT(__d) \
196 PSLIST_ENTRY_INIT((__d), bd_bpf_dlist_entry)
197 #define BPF_DLIST_WRITER_REMOVE(__d) \
198 PSLIST_WRITER_REMOVE((__d), bd_bpf_dlist_entry)
199 #define BPF_DLIST_ENTRY_DESTROY(__d) \
200 PSLIST_ENTRY_DESTROY((__d), bd_bpf_dlist_entry)
201
202 /* Macros for bpf_if on bpf_iflist */
203 #define BPF_IFLIST_WRITER_INSERT_HEAD(__bp) \
204 PSLIST_WRITER_INSERT_HEAD(&bpf_iflist, (__bp), bif_iflist_entry)
205 #define BPF_IFLIST_READER_FOREACH(__bp) \
206 PSLIST_READER_FOREACH((__bp), &bpf_iflist, struct bpf_if, \
207 bif_iflist_entry)
208 #define BPF_IFLIST_WRITER_FOREACH(__bp) \
209 PSLIST_WRITER_FOREACH((__bp), &bpf_iflist, struct bpf_if, \
210 bif_iflist_entry)
211 #define BPF_IFLIST_WRITER_REMOVE(__bp) \
212 PSLIST_WRITER_REMOVE((__bp), bif_iflist_entry)
213 #define BPF_IFLIST_ENTRY_INIT(__bp) \
214 PSLIST_ENTRY_INIT((__bp), bif_iflist_entry)
215 #define BPF_IFLIST_ENTRY_DESTROY(__bp) \
216 PSLIST_ENTRY_DESTROY((__bp), bif_iflist_entry)
217
218 /* Macros for bpf_d on bpf_if#bif_dlist_pslist */
219 #define BPFIF_DLIST_READER_FOREACH(__d, __bp) \
220 PSLIST_READER_FOREACH((__d), &(__bp)->bif_dlist_head, struct bpf_d, \
221 bd_bif_dlist_entry)
222 #define BPFIF_DLIST_WRITER_INSERT_HEAD(__bp, __d) \
223 PSLIST_WRITER_INSERT_HEAD(&(__bp)->bif_dlist_head, (__d), \
224 bd_bif_dlist_entry)
225 #define BPFIF_DLIST_WRITER_REMOVE(__d) \
226 PSLIST_WRITER_REMOVE((__d), bd_bif_dlist_entry)
227 #define BPFIF_DLIST_ENTRY_INIT(__d) \
228 PSLIST_ENTRY_INIT((__d), bd_bif_dlist_entry)
229 #define BPFIF_DLIST_READER_EMPTY(__bp) \
230 (PSLIST_READER_FIRST(&(__bp)->bif_dlist_head, struct bpf_d, \
231 bd_bif_dlist_entry) == NULL)
232 #define BPFIF_DLIST_WRITER_EMPTY(__bp) \
233 (PSLIST_WRITER_FIRST(&(__bp)->bif_dlist_head, struct bpf_d, \
234 bd_bif_dlist_entry) == NULL)
235 #define BPFIF_DLIST_ENTRY_DESTROY(__d) \
236 PSLIST_ENTRY_DESTROY((__d), bd_bif_dlist_entry)
237
238 static int bpf_allocbufs(struct bpf_d *);
239 static void bpf_deliver(struct bpf_if *,
240 void *(*cpfn)(void *, const void *, size_t),
241 void *, u_int, u_int, const u_int);
242 static void bpf_freed(struct bpf_d *);
243 static void bpf_free_filter(struct bpf_filter *);
244 static void bpf_ifname(struct ifnet *, struct ifreq *);
245 static void *bpf_mcpy(void *, const void *, size_t);
246 static int bpf_movein(struct uio *, int, uint64_t,
247 struct mbuf **, struct sockaddr *);
248 static void bpf_attachd(struct bpf_d *, struct bpf_if *);
249 static void bpf_detachd(struct bpf_d *);
250 static int bpf_setif(struct bpf_d *, struct ifreq *);
251 static int bpf_setf(struct bpf_d *, struct bpf_program *);
252 static void bpf_timed_out(void *);
253 static inline void
254 bpf_wakeup(struct bpf_d *);
255 static int bpf_hdrlen(struct bpf_d *);
256 static void catchpacket(struct bpf_d *, u_char *, u_int, u_int,
257 void *(*)(void *, const void *, size_t), struct timespec *);
258 static void reset_d(struct bpf_d *);
259 static int bpf_getdltlist(struct bpf_d *, struct bpf_dltlist *);
260 static int bpf_setdlt(struct bpf_d *, u_int);
261
262 static int bpf_read(struct file *, off_t *, struct uio *, kauth_cred_t,
263 int);
264 static int bpf_write(struct file *, off_t *, struct uio *, kauth_cred_t,
265 int);
266 static int bpf_ioctl(struct file *, u_long, void *);
267 static int bpf_poll(struct file *, int);
268 static int bpf_stat(struct file *, struct stat *);
269 static int bpf_close(struct file *);
270 static int bpf_kqfilter(struct file *, struct knote *);
271
272 static const struct fileops bpf_fileops = {
273 .fo_name = "bpf",
274 .fo_read = bpf_read,
275 .fo_write = bpf_write,
276 .fo_ioctl = bpf_ioctl,
277 .fo_fcntl = fnullop_fcntl,
278 .fo_poll = bpf_poll,
279 .fo_stat = bpf_stat,
280 .fo_close = bpf_close,
281 .fo_kqfilter = bpf_kqfilter,
282 .fo_restart = fnullop_restart,
283 };
284
285 dev_type_open(bpfopen);
286
287 const struct cdevsw bpf_cdevsw = {
288 .d_open = bpfopen,
289 .d_close = noclose,
290 .d_read = noread,
291 .d_write = nowrite,
292 .d_ioctl = noioctl,
293 .d_stop = nostop,
294 .d_tty = notty,
295 .d_poll = nopoll,
296 .d_mmap = nommap,
297 .d_kqfilter = nokqfilter,
298 .d_discard = nodiscard,
299 .d_flag = D_OTHER | D_MPSAFE
300 };
301
302 bpfjit_func_t
303 bpf_jit_generate(bpf_ctx_t *bc, void *code, size_t size)
304 {
305
306 membar_consumer();
307 if (bpfjit_module_ops.bj_generate_code != NULL) {
308 return bpfjit_module_ops.bj_generate_code(bc, code, size);
309 }
310 return NULL;
311 }
312
313 void
314 bpf_jit_freecode(bpfjit_func_t jcode)
315 {
316 KASSERT(bpfjit_module_ops.bj_free_code != NULL);
317 bpfjit_module_ops.bj_free_code(jcode);
318 }
319
320 static int
321 bpf_movein(struct uio *uio, int linktype, uint64_t mtu, struct mbuf **mp,
322 struct sockaddr *sockp)
323 {
324 struct mbuf *m;
325 int error;
326 size_t len;
327 size_t hlen;
328 size_t align;
329
330 /*
331 * Build a sockaddr based on the data link layer type.
332 * We do this at this level because the ethernet header
333 * is copied directly into the data field of the sockaddr.
334 * In the case of SLIP, there is no header and the packet
335 * is forwarded as is.
336 * Also, we are careful to leave room at the front of the mbuf
337 * for the link level header.
338 */
339 switch (linktype) {
340
341 case DLT_SLIP:
342 sockp->sa_family = AF_INET;
343 hlen = 0;
344 align = 0;
345 break;
346
347 case DLT_PPP:
348 sockp->sa_family = AF_UNSPEC;
349 hlen = 0;
350 align = 0;
351 break;
352
353 case DLT_EN10MB:
354 sockp->sa_family = AF_UNSPEC;
355 /* XXX Would MAXLINKHDR be better? */
356 /* 6(dst)+6(src)+2(type) */
357 hlen = sizeof(struct ether_header);
358 align = 2;
359 break;
360
361 case DLT_ARCNET:
362 sockp->sa_family = AF_UNSPEC;
363 hlen = ARC_HDRLEN;
364 align = 5;
365 break;
366
367 case DLT_FDDI:
368 sockp->sa_family = AF_LINK;
369 /* XXX 4(FORMAC)+6(dst)+6(src) */
370 hlen = 16;
371 align = 0;
372 break;
373
374 case DLT_ECONET:
375 sockp->sa_family = AF_UNSPEC;
376 hlen = 6;
377 align = 2;
378 break;
379
380 case DLT_NULL:
381 sockp->sa_family = AF_UNSPEC;
382 hlen = 0;
383 align = 0;
384 break;
385
386 default:
387 return (EIO);
388 }
389
390 len = uio->uio_resid;
391 /*
392 * If there aren't enough bytes for a link level header or the
393 * packet length exceeds the interface mtu, return an error.
394 */
395 if (len - hlen > mtu)
396 return (EMSGSIZE);
397
398 /*
399 * XXX Avoid complicated buffer chaining ---
400 * bail if it won't fit in a single mbuf.
401 * (Take into account possible alignment bytes)
402 */
403 if (len + align > MCLBYTES)
404 return (EIO);
405
406 m = m_gethdr(M_WAIT, MT_DATA);
407 m_reset_rcvif(m);
408 m->m_pkthdr.len = (int)(len - hlen);
409 if (len + align > MHLEN) {
410 m_clget(m, M_WAIT);
411 if ((m->m_flags & M_EXT) == 0) {
412 error = ENOBUFS;
413 goto bad;
414 }
415 }
416
417 /* Insure the data is properly aligned */
418 if (align > 0) {
419 m->m_data += align;
420 m->m_len -= (int)align;
421 }
422
423 error = uiomove(mtod(m, void *), len, uio);
424 if (error)
425 goto bad;
426 if (hlen != 0) {
427 memcpy(sockp->sa_data, mtod(m, void *), hlen);
428 m->m_data += hlen; /* XXX */
429 len -= hlen;
430 }
431 m->m_len = (int)len;
432 *mp = m;
433 return (0);
434
435 bad:
436 m_freem(m);
437 return (error);
438 }
439
440 /*
441 * Attach file to the bpf interface, i.e. make d listen on bp.
442 */
443 static void
444 bpf_attachd(struct bpf_d *d, struct bpf_if *bp)
445 {
446
447 KASSERT(mutex_owned(&bpf_mtx));
448 KASSERT(mutex_owned(d->bd_mtx));
449 /*
450 * Point d at bp, and add d to the interface's list of listeners.
451 * Finally, point the driver's bpf cookie at the interface so
452 * it will divert packets to bpf.
453 */
454 d->bd_bif = bp;
455 BPFIF_DLIST_WRITER_INSERT_HEAD(bp, d);
456
457 *bp->bif_driverp = bp;
458 }
459
460 /*
461 * Detach a file from its interface.
462 */
463 static void
464 bpf_detachd(struct bpf_d *d)
465 {
466 struct bpf_if *bp;
467
468 KASSERT(mutex_owned(&bpf_mtx));
469 KASSERT(mutex_owned(d->bd_mtx));
470
471 bp = d->bd_bif;
472 /*
473 * Check if this descriptor had requested promiscuous mode.
474 * If so, turn it off.
475 */
476 if (d->bd_promisc) {
477 int error __diagused;
478
479 d->bd_promisc = 0;
480 /*
481 * Take device out of promiscuous mode. Since we were
482 * able to enter promiscuous mode, we should be able
483 * to turn it off. But we can get an error if
484 * the interface was configured down, so only panic
485 * if we don't get an unexpected error.
486 */
487 KERNEL_LOCK_UNLESS_NET_MPSAFE();
488 error = ifpromisc(bp->bif_ifp, 0);
489 KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
490 #ifdef DIAGNOSTIC
491 if (error)
492 printf("%s: ifpromisc failed: %d", __func__, error);
493 #endif
494 }
495
496 /* Remove d from the interface's descriptor list. */
497 BPFIF_DLIST_WRITER_REMOVE(d);
498
499 pserialize_perform(bpf_psz);
500
501 if (BPFIF_DLIST_WRITER_EMPTY(bp)) {
502 /*
503 * Let the driver know that there are no more listeners.
504 */
505 *d->bd_bif->bif_driverp = NULL;
506 }
507 d->bd_bif = NULL;
508 }
509
510 static void
511 bpf_init(void)
512 {
513
514 mutex_init(&bpf_mtx, MUTEX_DEFAULT, IPL_NONE);
515 bpf_psz = pserialize_create();
516 bpf_psref_class = psref_class_create("bpf", IPL_SOFTNET);
517
518 PSLIST_INIT(&bpf_iflist);
519 PSLIST_INIT(&bpf_dlist);
520
521 bpf_gstats_percpu = percpu_alloc(sizeof(struct bpf_stat));
522
523 return;
524 }
525
526 /*
527 * bpfilterattach() is called at boot time. We don't need to do anything
528 * here, since any initialization will happen as part of module init code.
529 */
530 /* ARGSUSED */
531 void
532 bpfilterattach(int n)
533 {
534
535 }
536
537 /*
538 * Open ethernet device. Clones.
539 */
540 /* ARGSUSED */
541 int
542 bpfopen(dev_t dev, int flag, int mode, struct lwp *l)
543 {
544 struct bpf_d *d;
545 struct file *fp;
546 int error, fd;
547
548 /* falloc() will fill in the descriptor for us. */
549 if ((error = fd_allocfile(&fp, &fd)) != 0)
550 return error;
551
552 d = kmem_zalloc(sizeof(*d), KM_SLEEP);
553 d->bd_bufsize = bpf_bufsize;
554 d->bd_direction = BPF_D_INOUT;
555 d->bd_feedback = 0;
556 d->bd_pid = l->l_proc->p_pid;
557 #ifdef _LP64
558 if (curproc->p_flag & PK_32)
559 d->bd_compat32 = 1;
560 #endif
561 getnanotime(&d->bd_btime);
562 d->bd_atime = d->bd_mtime = d->bd_btime;
563 callout_init(&d->bd_callout, CALLOUT_MPSAFE);
564 selinit(&d->bd_sel);
565 d->bd_jitcode = NULL;
566 d->bd_filter = NULL;
567 BPF_DLIST_ENTRY_INIT(d);
568 BPFIF_DLIST_ENTRY_INIT(d);
569 d->bd_mtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET);
570 d->bd_buf_mtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
571 cv_init(&d->bd_cv, "bpf");
572
573 mutex_enter(&bpf_mtx);
574 BPF_DLIST_WRITER_INSERT_HEAD(d);
575 mutex_exit(&bpf_mtx);
576
577 return fd_clone(fp, fd, flag, &bpf_fileops, d);
578 }
579
580 /*
581 * Close the descriptor by detaching it from its interface,
582 * deallocating its buffers, and marking it free.
583 */
584 /* ARGSUSED */
585 static int
586 bpf_close(struct file *fp)
587 {
588 struct bpf_d *d;
589
590 mutex_enter(&bpf_mtx);
591
592 if ((d = fp->f_bpf) == NULL) {
593 mutex_exit(&bpf_mtx);
594 return 0;
595 }
596
597 /*
598 * Refresh the PID associated with this bpf file.
599 */
600 d->bd_pid = curproc->p_pid;
601
602 mutex_enter(d->bd_mtx);
603 if (d->bd_state == BPF_WAITING)
604 callout_halt(&d->bd_callout, d->bd_mtx);
605 d->bd_state = BPF_IDLE;
606 if (d->bd_bif)
607 bpf_detachd(d);
608 mutex_exit(d->bd_mtx);
609
610 BPF_DLIST_WRITER_REMOVE(d);
611
612 pserialize_perform(bpf_psz);
613 mutex_exit(&bpf_mtx);
614
615 BPFIF_DLIST_ENTRY_DESTROY(d);
616 BPF_DLIST_ENTRY_DESTROY(d);
617 fp->f_bpf = NULL;
618 bpf_freed(d);
619 callout_destroy(&d->bd_callout);
620 seldestroy(&d->bd_sel);
621 mutex_obj_free(d->bd_mtx);
622 mutex_obj_free(d->bd_buf_mtx);
623 cv_destroy(&d->bd_cv);
624
625 kmem_free(d, sizeof(*d));
626
627 return (0);
628 }
629
630 /*
631 * Rotate the packet buffers in descriptor d. Move the store buffer
632 * into the hold slot, and the free buffer into the store slot.
633 * Zero the length of the new store buffer.
634 */
635 #define ROTATE_BUFFERS(d) \
636 (d)->bd_hbuf = (d)->bd_sbuf; \
637 (d)->bd_hlen = (d)->bd_slen; \
638 (d)->bd_sbuf = (d)->bd_fbuf; \
639 (d)->bd_slen = 0; \
640 (d)->bd_fbuf = NULL;
641 /*
642 * bpfread - read next chunk of packets from buffers
643 */
644 static int
645 bpf_read(struct file *fp, off_t *offp, struct uio *uio,
646 kauth_cred_t cred, int flags)
647 {
648 struct bpf_d *d = fp->f_bpf;
649 int timed_out;
650 int error;
651
652 getnanotime(&d->bd_atime);
653 /*
654 * Restrict application to use a buffer the same size as
655 * the kernel buffers.
656 */
657 if (uio->uio_resid != d->bd_bufsize)
658 return (EINVAL);
659
660 mutex_enter(d->bd_mtx);
661 if (d->bd_state == BPF_WAITING)
662 callout_halt(&d->bd_callout, d->bd_mtx);
663 timed_out = (d->bd_state == BPF_TIMED_OUT);
664 d->bd_state = BPF_IDLE;
665 mutex_exit(d->bd_mtx);
666 /*
667 * If the hold buffer is empty, then do a timed sleep, which
668 * ends when the timeout expires or when enough packets
669 * have arrived to fill the store buffer.
670 */
671 mutex_enter(d->bd_buf_mtx);
672 while (d->bd_hbuf == NULL) {
673 if (fp->f_flag & FNONBLOCK) {
674 if (d->bd_slen == 0) {
675 error = EWOULDBLOCK;
676 goto out;
677 }
678 ROTATE_BUFFERS(d);
679 break;
680 }
681
682 if ((d->bd_immediate || timed_out) && d->bd_slen != 0) {
683 /*
684 * A packet(s) either arrived since the previous
685 * read or arrived while we were asleep.
686 * Rotate the buffers and return what's here.
687 */
688 ROTATE_BUFFERS(d);
689 break;
690 }
691
692 error = cv_timedwait_sig(&d->bd_cv, d->bd_buf_mtx, d->bd_rtout);
693
694 if (error == EINTR || error == ERESTART)
695 goto out;
696
697 if (error == EWOULDBLOCK) {
698 /*
699 * On a timeout, return what's in the buffer,
700 * which may be nothing. If there is something
701 * in the store buffer, we can rotate the buffers.
702 */
703 if (d->bd_hbuf)
704 /*
705 * We filled up the buffer in between
706 * getting the timeout and arriving
707 * here, so we don't need to rotate.
708 */
709 break;
710
711 if (d->bd_slen == 0) {
712 error = 0;
713 goto out;
714 }
715 ROTATE_BUFFERS(d);
716 break;
717 }
718 if (error != 0)
719 goto out;
720 }
721 /*
722 * At this point, we know we have something in the hold slot.
723 */
724 mutex_exit(d->bd_buf_mtx);
725
726 /*
727 * Move data from hold buffer into user space.
728 * We know the entire buffer is transferred since
729 * we checked above that the read buffer is bpf_bufsize bytes.
730 */
731 error = uiomove(d->bd_hbuf, d->bd_hlen, uio);
732
733 mutex_enter(d->bd_buf_mtx);
734 d->bd_fbuf = d->bd_hbuf;
735 d->bd_hbuf = NULL;
736 d->bd_hlen = 0;
737 out:
738 mutex_exit(d->bd_buf_mtx);
739 return (error);
740 }
741
742
743 /*
744 * If there are processes sleeping on this descriptor, wake them up.
745 */
746 static inline void
747 bpf_wakeup(struct bpf_d *d)
748 {
749
750 mutex_enter(d->bd_buf_mtx);
751 cv_broadcast(&d->bd_cv);
752 mutex_exit(d->bd_buf_mtx);
753
754 if (d->bd_async)
755 fownsignal(d->bd_pgid, SIGIO, 0, 0, NULL);
756 selnotify(&d->bd_sel, 0, 0);
757 }
758
759 static void
760 bpf_timed_out(void *arg)
761 {
762 struct bpf_d *d = arg;
763
764 mutex_enter(d->bd_mtx);
765 if (d->bd_state == BPF_WAITING) {
766 d->bd_state = BPF_TIMED_OUT;
767 if (d->bd_slen != 0)
768 bpf_wakeup(d);
769 }
770 mutex_exit(d->bd_mtx);
771 }
772
773
774 static int
775 bpf_write(struct file *fp, off_t *offp, struct uio *uio,
776 kauth_cred_t cred, int flags)
777 {
778 struct bpf_d *d = fp->f_bpf;
779 struct bpf_if *bp;
780 struct ifnet *ifp;
781 struct mbuf *m, *mc;
782 int error;
783 static struct sockaddr_storage dst;
784 struct psref psref;
785 int bound;
786
787 m = NULL; /* XXX gcc */
788
789 bound = curlwp_bind();
790 mutex_enter(d->bd_mtx);
791 bp = d->bd_bif;
792 if (bp == NULL) {
793 mutex_exit(d->bd_mtx);
794 error = ENXIO;
795 goto out_bindx;
796 }
797 bpf_if_acquire(bp, &psref);
798 mutex_exit(d->bd_mtx);
799
800 getnanotime(&d->bd_mtime);
801
802 ifp = bp->bif_ifp;
803 if (if_is_deactivated(ifp)) {
804 error = ENXIO;
805 goto out;
806 }
807
808 if (uio->uio_resid == 0) {
809 error = 0;
810 goto out;
811 }
812
813 error = bpf_movein(uio, (int)bp->bif_dlt, ifp->if_mtu, &m,
814 (struct sockaddr *) &dst);
815 if (error)
816 goto out;
817
818 if (m->m_pkthdr.len > ifp->if_mtu) {
819 m_freem(m);
820 error = EMSGSIZE;
821 goto out;
822 }
823
824 if (d->bd_hdrcmplt)
825 dst.ss_family = pseudo_AF_HDRCMPLT;
826
827 if (d->bd_feedback) {
828 mc = m_dup(m, 0, M_COPYALL, M_NOWAIT);
829 if (mc != NULL)
830 m_set_rcvif(mc, ifp);
831 /* Set M_PROMISC for outgoing packets to be discarded. */
832 if (1 /*d->bd_direction == BPF_D_INOUT*/)
833 m->m_flags |= M_PROMISC;
834 } else
835 mc = NULL;
836
837 error = if_output_lock(ifp, ifp, m, (struct sockaddr *) &dst, NULL);
838
839 if (mc != NULL) {
840 if (error == 0) {
841 int s = splsoftnet();
842 KERNEL_LOCK_UNLESS_IFP_MPSAFE(ifp);
843 ifp->_if_input(ifp, mc);
844 KERNEL_UNLOCK_UNLESS_IFP_MPSAFE(ifp);
845 splx(s);
846 } else
847 m_freem(mc);
848 }
849 /*
850 * The driver frees the mbuf.
851 */
852 out:
853 bpf_if_release(bp, &psref);
854 out_bindx:
855 curlwp_bindx(bound);
856 return error;
857 }
858
859 /*
860 * Reset a descriptor by flushing its packet buffer and clearing the
861 * receive and drop counts.
862 */
863 static void
864 reset_d(struct bpf_d *d)
865 {
866
867 KASSERT(mutex_owned(d->bd_mtx));
868
869 mutex_enter(d->bd_buf_mtx);
870 if (d->bd_hbuf) {
871 /* Free the hold buffer. */
872 d->bd_fbuf = d->bd_hbuf;
873 d->bd_hbuf = NULL;
874 }
875 d->bd_slen = 0;
876 d->bd_hlen = 0;
877 d->bd_rcount = 0;
878 d->bd_dcount = 0;
879 d->bd_ccount = 0;
880 mutex_exit(d->bd_buf_mtx);
881 }
882
883 /*
884 * FIONREAD Check for read packet available.
885 * BIOCGBLEN Get buffer len [for read()].
886 * BIOCSETF Set ethernet read filter.
887 * BIOCFLUSH Flush read packet buffer.
888 * BIOCPROMISC Put interface into promiscuous mode.
889 * BIOCGDLT Get link layer type.
890 * BIOCGETIF Get interface name.
891 * BIOCSETIF Set interface.
892 * BIOCSRTIMEOUT Set read timeout.
893 * BIOCGRTIMEOUT Get read timeout.
894 * BIOCGSTATS Get packet stats.
895 * BIOCIMMEDIATE Set immediate mode.
896 * BIOCVERSION Get filter language version.
897 * BIOCGHDRCMPLT Get "header already complete" flag.
898 * BIOCSHDRCMPLT Set "header already complete" flag.
899 * BIOCSFEEDBACK Set packet feedback mode.
900 * BIOCGFEEDBACK Get packet feedback mode.
901 * BIOCGDIRECTION Get packet direction flag
902 * BIOCSDIRECTION Set packet direction flag
903 */
904 /* ARGSUSED */
905 static int
906 bpf_ioctl(struct file *fp, u_long cmd, void *addr)
907 {
908 struct bpf_d *d = fp->f_bpf;
909 int error = 0;
910
911 /*
912 * Refresh the PID associated with this bpf file.
913 */
914 d->bd_pid = curproc->p_pid;
915 #ifdef _LP64
916 if (curproc->p_flag & PK_32)
917 d->bd_compat32 = 1;
918 else
919 d->bd_compat32 = 0;
920 #endif
921
922 mutex_enter(d->bd_mtx);
923 if (d->bd_state == BPF_WAITING)
924 callout_halt(&d->bd_callout, d->bd_mtx);
925 d->bd_state = BPF_IDLE;
926 mutex_exit(d->bd_mtx);
927
928 switch (cmd) {
929
930 default:
931 error = EINVAL;
932 break;
933
934 /*
935 * Check for read packet available.
936 */
937 case FIONREAD:
938 {
939 int n;
940
941 mutex_enter(d->bd_buf_mtx);
942 n = d->bd_slen;
943 if (d->bd_hbuf)
944 n += d->bd_hlen;
945 mutex_exit(d->bd_buf_mtx);
946
947 *(int *)addr = n;
948 break;
949 }
950
951 /*
952 * Get buffer len [for read()].
953 */
954 case BIOCGBLEN:
955 *(u_int *)addr = d->bd_bufsize;
956 break;
957
958 /*
959 * Set buffer length.
960 */
961 case BIOCSBLEN:
962 /*
963 * Forbid to change the buffer length if buffers are already
964 * allocated.
965 */
966 mutex_enter(d->bd_mtx);
967 mutex_enter(d->bd_buf_mtx);
968 if (d->bd_bif != NULL || d->bd_sbuf != NULL)
969 error = EINVAL;
970 else {
971 u_int size = *(u_int *)addr;
972
973 if (size > bpf_maxbufsize)
974 *(u_int *)addr = size = bpf_maxbufsize;
975 else if (size < BPF_MINBUFSIZE)
976 *(u_int *)addr = size = BPF_MINBUFSIZE;
977 d->bd_bufsize = size;
978 }
979 mutex_exit(d->bd_buf_mtx);
980 mutex_exit(d->bd_mtx);
981 break;
982
983 /*
984 * Set link layer read filter.
985 */
986 case BIOCSETF:
987 error = bpf_setf(d, addr);
988 break;
989
990 /*
991 * Flush read packet buffer.
992 */
993 case BIOCFLUSH:
994 mutex_enter(d->bd_mtx);
995 reset_d(d);
996 mutex_exit(d->bd_mtx);
997 break;
998
999 /*
1000 * Put interface into promiscuous mode.
1001 */
1002 case BIOCPROMISC:
1003 mutex_enter(d->bd_mtx);
1004 if (d->bd_bif == NULL) {
1005 mutex_exit(d->bd_mtx);
1006 /*
1007 * No interface attached yet.
1008 */
1009 error = EINVAL;
1010 break;
1011 }
1012 if (d->bd_promisc == 0) {
1013 KERNEL_LOCK_UNLESS_NET_MPSAFE();
1014 error = ifpromisc(d->bd_bif->bif_ifp, 1);
1015 KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
1016 if (error == 0)
1017 d->bd_promisc = 1;
1018 }
1019 mutex_exit(d->bd_mtx);
1020 break;
1021
1022 /*
1023 * Get device parameters.
1024 */
1025 case BIOCGDLT:
1026 mutex_enter(d->bd_mtx);
1027 if (d->bd_bif == NULL)
1028 error = EINVAL;
1029 else
1030 *(u_int *)addr = d->bd_bif->bif_dlt;
1031 mutex_exit(d->bd_mtx);
1032 break;
1033
1034 /*
1035 * Get a list of supported device parameters.
1036 */
1037 case BIOCGDLTLIST:
1038 mutex_enter(d->bd_mtx);
1039 if (d->bd_bif == NULL)
1040 error = EINVAL;
1041 else
1042 error = bpf_getdltlist(d, addr);
1043 mutex_exit(d->bd_mtx);
1044 break;
1045
1046 /*
1047 * Set device parameters.
1048 */
1049 case BIOCSDLT:
1050 mutex_enter(&bpf_mtx);
1051 mutex_enter(d->bd_mtx);
1052 if (d->bd_bif == NULL)
1053 error = EINVAL;
1054 else
1055 error = bpf_setdlt(d, *(u_int *)addr);
1056 mutex_exit(d->bd_mtx);
1057 mutex_exit(&bpf_mtx);
1058 break;
1059
1060 /*
1061 * Set interface name.
1062 */
1063 #ifdef OBIOCGETIF
1064 case OBIOCGETIF:
1065 #endif
1066 case BIOCGETIF:
1067 mutex_enter(d->bd_mtx);
1068 if (d->bd_bif == NULL)
1069 error = EINVAL;
1070 else
1071 bpf_ifname(d->bd_bif->bif_ifp, addr);
1072 mutex_exit(d->bd_mtx);
1073 break;
1074
1075 /*
1076 * Set interface.
1077 */
1078 #ifdef OBIOCSETIF
1079 case OBIOCSETIF:
1080 #endif
1081 case BIOCSETIF:
1082 mutex_enter(&bpf_mtx);
1083 error = bpf_setif(d, addr);
1084 mutex_exit(&bpf_mtx);
1085 break;
1086
1087 /*
1088 * Set read timeout.
1089 */
1090 case BIOCSRTIMEOUT:
1091 {
1092 struct timeval *tv = addr;
1093
1094 /* Compute number of ticks. */
1095 if (tv->tv_sec < 0 ||
1096 tv->tv_usec < 0 || tv->tv_usec >= 1000000) {
1097 error = EINVAL;
1098 break;
1099 } else if (tv->tv_sec > INT_MAX/hz - 1) {
1100 d->bd_rtout = INT_MAX;
1101 } else {
1102 d->bd_rtout = tv->tv_sec * hz
1103 + tv->tv_usec / tick;
1104 }
1105 if ((d->bd_rtout == 0) && (tv->tv_usec != 0))
1106 d->bd_rtout = 1;
1107 break;
1108 }
1109
1110 #ifdef BIOCGORTIMEOUT
1111 /*
1112 * Get read timeout.
1113 */
1114 case BIOCGORTIMEOUT:
1115 {
1116 struct timeval50 *tv = addr;
1117
1118 tv->tv_sec = d->bd_rtout / hz;
1119 tv->tv_usec = (d->bd_rtout % hz) * tick;
1120 break;
1121 }
1122 #endif
1123
1124 #ifdef BIOCSORTIMEOUT
1125 /*
1126 * Set read timeout.
1127 */
1128 case BIOCSORTIMEOUT:
1129 {
1130 struct timeval50 *tv = addr;
1131
1132 /* Compute number of ticks. */
1133 if (tv->tv_sec < 0 ||
1134 tv->tv_usec < 0 || tv->tv_usec >= 1000000) {
1135 error = EINVAL;
1136 break;
1137 } else if (tv->tv_sec > INT_MAX/hz - 1) {
1138 d->bd_rtout = INT_MAX;
1139 } else {
1140 d->bd_rtout = tv->tv_sec * hz
1141 + tv->tv_usec / tick;
1142 }
1143 if ((d->bd_rtout == 0) && (tv->tv_usec != 0))
1144 d->bd_rtout = 1;
1145 break;
1146 }
1147 #endif
1148
1149 /*
1150 * Get read timeout.
1151 */
1152 case BIOCGRTIMEOUT:
1153 {
1154 struct timeval *tv = addr;
1155
1156 tv->tv_sec = d->bd_rtout / hz;
1157 tv->tv_usec = (d->bd_rtout % hz) * tick;
1158 break;
1159 }
1160 /*
1161 * Get packet stats.
1162 */
1163 case BIOCGSTATS:
1164 {
1165 struct bpf_stat *bs = addr;
1166
1167 bs->bs_recv = d->bd_rcount;
1168 bs->bs_drop = d->bd_dcount;
1169 bs->bs_capt = d->bd_ccount;
1170 break;
1171 }
1172
1173 case BIOCGSTATSOLD:
1174 {
1175 struct bpf_stat_old *bs = addr;
1176
1177 bs->bs_recv = d->bd_rcount;
1178 bs->bs_drop = d->bd_dcount;
1179 break;
1180 }
1181
1182 /*
1183 * Set immediate mode.
1184 */
1185 case BIOCIMMEDIATE:
1186 d->bd_immediate = *(u_int *)addr;
1187 break;
1188
1189 case BIOCVERSION:
1190 {
1191 struct bpf_version *bv = addr;
1192
1193 bv->bv_major = BPF_MAJOR_VERSION;
1194 bv->bv_minor = BPF_MINOR_VERSION;
1195 break;
1196 }
1197
1198 case BIOCGHDRCMPLT: /* get "header already complete" flag */
1199 *(u_int *)addr = d->bd_hdrcmplt;
1200 break;
1201
1202 case BIOCSHDRCMPLT: /* set "header already complete" flag */
1203 d->bd_hdrcmplt = *(u_int *)addr ? 1 : 0;
1204 break;
1205
1206 /*
1207 * Get packet direction flag
1208 */
1209 case BIOCGDIRECTION:
1210 *(u_int *)addr = d->bd_direction;
1211 break;
1212
1213 /*
1214 * Set packet direction flag
1215 */
1216 case BIOCSDIRECTION:
1217 {
1218 u_int direction;
1219
1220 direction = *(u_int *)addr;
1221 switch (direction) {
1222 case BPF_D_IN:
1223 case BPF_D_INOUT:
1224 case BPF_D_OUT:
1225 d->bd_direction = direction;
1226 break;
1227 default:
1228 error = EINVAL;
1229 }
1230 }
1231 break;
1232
1233 /*
1234 * Set "feed packets from bpf back to input" mode
1235 */
1236 case BIOCSFEEDBACK:
1237 d->bd_feedback = *(u_int *)addr;
1238 break;
1239
1240 /*
1241 * Get "feed packets from bpf back to input" mode
1242 */
1243 case BIOCGFEEDBACK:
1244 *(u_int *)addr = d->bd_feedback;
1245 break;
1246
1247 case FIONBIO: /* Non-blocking I/O */
1248 /*
1249 * No need to do anything special as we use IO_NDELAY in
1250 * bpfread() as an indication of whether or not to block
1251 * the read.
1252 */
1253 break;
1254
1255 case FIOASYNC: /* Send signal on receive packets */
1256 mutex_enter(d->bd_mtx);
1257 d->bd_async = *(int *)addr;
1258 mutex_exit(d->bd_mtx);
1259 break;
1260
1261 case TIOCSPGRP: /* Process or group to send signals to */
1262 case FIOSETOWN:
1263 error = fsetown(&d->bd_pgid, cmd, addr);
1264 break;
1265
1266 case TIOCGPGRP:
1267 case FIOGETOWN:
1268 error = fgetown(d->bd_pgid, cmd, addr);
1269 break;
1270 }
1271 return (error);
1272 }
1273
1274 /*
1275 * Set d's packet filter program to fp. If this file already has a filter,
1276 * free it and replace it. Returns EINVAL for bogus requests.
1277 */
1278 static int
1279 bpf_setf(struct bpf_d *d, struct bpf_program *fp)
1280 {
1281 struct bpf_insn *fcode;
1282 bpfjit_func_t jcode;
1283 size_t flen, size = 0;
1284 struct bpf_filter *oldf, *newf;
1285
1286 jcode = NULL;
1287 flen = fp->bf_len;
1288
1289 if ((fp->bf_insns == NULL && flen) || flen > BPF_MAXINSNS) {
1290 return EINVAL;
1291 }
1292
1293 if (flen) {
1294 /*
1295 * Allocate the buffer, copy the byte-code from
1296 * userspace and validate it.
1297 */
1298 size = flen * sizeof(*fp->bf_insns);
1299 fcode = kmem_alloc(size, KM_SLEEP);
1300 if (copyin(fp->bf_insns, fcode, size) != 0 ||
1301 !bpf_validate(fcode, (int)flen)) {
1302 kmem_free(fcode, size);
1303 return EINVAL;
1304 }
1305 membar_consumer();
1306 if (bpf_jit)
1307 jcode = bpf_jit_generate(NULL, fcode, flen);
1308 } else {
1309 fcode = NULL;
1310 }
1311
1312 newf = kmem_alloc(sizeof(*newf), KM_SLEEP);
1313 newf->bf_insn = fcode;
1314 newf->bf_size = size;
1315 newf->bf_jitcode = jcode;
1316 d->bd_jitcode = jcode; /* XXX just for kvm(3) users */
1317
1318 /* Need to hold bpf_mtx for pserialize_perform */
1319 mutex_enter(&bpf_mtx);
1320 mutex_enter(d->bd_mtx);
1321 oldf = d->bd_filter;
1322 d->bd_filter = newf;
1323 membar_producer();
1324 reset_d(d);
1325 pserialize_perform(bpf_psz);
1326 mutex_exit(d->bd_mtx);
1327 mutex_exit(&bpf_mtx);
1328
1329 if (oldf != NULL)
1330 bpf_free_filter(oldf);
1331
1332 return 0;
1333 }
1334
1335 /*
1336 * Detach a file from its current interface (if attached at all) and attach
1337 * to the interface indicated by the name stored in ifr.
1338 * Return an errno or 0.
1339 */
1340 static int
1341 bpf_setif(struct bpf_d *d, struct ifreq *ifr)
1342 {
1343 struct bpf_if *bp;
1344 char *cp;
1345 int unit_seen, i, error;
1346
1347 KASSERT(mutex_owned(&bpf_mtx));
1348 /*
1349 * Make sure the provided name has a unit number, and default
1350 * it to '0' if not specified.
1351 * XXX This is ugly ... do this differently?
1352 */
1353 unit_seen = 0;
1354 cp = ifr->ifr_name;
1355 cp[sizeof(ifr->ifr_name) - 1] = '\0'; /* sanity */
1356 while (*cp++)
1357 if (*cp >= '0' && *cp <= '9')
1358 unit_seen = 1;
1359 if (!unit_seen) {
1360 /* Make sure to leave room for the '\0'. */
1361 for (i = 0; i < (IFNAMSIZ - 1); ++i) {
1362 if ((ifr->ifr_name[i] >= 'a' &&
1363 ifr->ifr_name[i] <= 'z') ||
1364 (ifr->ifr_name[i] >= 'A' &&
1365 ifr->ifr_name[i] <= 'Z'))
1366 continue;
1367 ifr->ifr_name[i] = '0';
1368 }
1369 }
1370
1371 /*
1372 * Look through attached interfaces for the named one.
1373 */
1374 BPF_IFLIST_WRITER_FOREACH(bp) {
1375 struct ifnet *ifp = bp->bif_ifp;
1376
1377 if (ifp == NULL ||
1378 strcmp(ifp->if_xname, ifr->ifr_name) != 0)
1379 continue;
1380 /* skip additional entry */
1381 if (bp->bif_driverp != &ifp->if_bpf)
1382 continue;
1383 /*
1384 * We found the requested interface.
1385 * Allocate the packet buffers if we need to.
1386 * If we're already attached to requested interface,
1387 * just flush the buffer.
1388 */
1389 /*
1390 * bpf_allocbufs is called only here. bpf_mtx ensures that
1391 * no race condition happen on d->bd_sbuf.
1392 */
1393 if (d->bd_sbuf == NULL) {
1394 error = bpf_allocbufs(d);
1395 if (error != 0)
1396 return (error);
1397 }
1398 mutex_enter(d->bd_mtx);
1399 if (bp != d->bd_bif) {
1400 if (d->bd_bif) {
1401 /*
1402 * Detach if attached to something else.
1403 */
1404 bpf_detachd(d);
1405 BPFIF_DLIST_ENTRY_INIT(d);
1406 }
1407
1408 bpf_attachd(d, bp);
1409 }
1410 reset_d(d);
1411 mutex_exit(d->bd_mtx);
1412 return (0);
1413 }
1414 /* Not found. */
1415 return (ENXIO);
1416 }
1417
1418 /*
1419 * Copy the interface name to the ifreq.
1420 */
1421 static void
1422 bpf_ifname(struct ifnet *ifp, struct ifreq *ifr)
1423 {
1424 memcpy(ifr->ifr_name, ifp->if_xname, IFNAMSIZ);
1425 }
1426
1427 static int
1428 bpf_stat(struct file *fp, struct stat *st)
1429 {
1430 struct bpf_d *d = fp->f_bpf;
1431
1432 (void)memset(st, 0, sizeof(*st));
1433 mutex_enter(d->bd_mtx);
1434 st->st_dev = makedev(cdevsw_lookup_major(&bpf_cdevsw), d->bd_pid);
1435 st->st_atimespec = d->bd_atime;
1436 st->st_mtimespec = d->bd_mtime;
1437 st->st_ctimespec = st->st_birthtimespec = d->bd_btime;
1438 st->st_uid = kauth_cred_geteuid(fp->f_cred);
1439 st->st_gid = kauth_cred_getegid(fp->f_cred);
1440 st->st_mode = S_IFCHR;
1441 mutex_exit(d->bd_mtx);
1442 return 0;
1443 }
1444
1445 /*
1446 * Support for poll() system call
1447 *
1448 * Return true iff the specific operation will not block indefinitely - with
1449 * the assumption that it is safe to positively acknowledge a request for the
1450 * ability to write to the BPF device.
1451 * Otherwise, return false but make a note that a selnotify() must be done.
1452 */
1453 static int
1454 bpf_poll(struct file *fp, int events)
1455 {
1456 struct bpf_d *d = fp->f_bpf;
1457 int revents;
1458
1459 /*
1460 * Refresh the PID associated with this bpf file.
1461 */
1462 mutex_enter(&bpf_mtx);
1463 d->bd_pid = curproc->p_pid;
1464
1465 revents = events & (POLLOUT | POLLWRNORM);
1466 if (events & (POLLIN | POLLRDNORM)) {
1467 /*
1468 * An imitation of the FIONREAD ioctl code.
1469 */
1470 mutex_enter(d->bd_mtx);
1471 if (d->bd_hlen != 0 ||
1472 ((d->bd_immediate || d->bd_state == BPF_TIMED_OUT) &&
1473 d->bd_slen != 0)) {
1474 revents |= events & (POLLIN | POLLRDNORM);
1475 } else {
1476 selrecord(curlwp, &d->bd_sel);
1477 /* Start the read timeout if necessary */
1478 if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
1479 callout_reset(&d->bd_callout, d->bd_rtout,
1480 bpf_timed_out, d);
1481 d->bd_state = BPF_WAITING;
1482 }
1483 }
1484 mutex_exit(d->bd_mtx);
1485 }
1486
1487 mutex_exit(&bpf_mtx);
1488 return (revents);
1489 }
1490
1491 static void
1492 filt_bpfrdetach(struct knote *kn)
1493 {
1494 struct bpf_d *d = kn->kn_hook;
1495
1496 mutex_enter(d->bd_buf_mtx);
1497 SLIST_REMOVE(&d->bd_sel.sel_klist, kn, knote, kn_selnext);
1498 mutex_exit(d->bd_buf_mtx);
1499 }
1500
1501 static int
1502 filt_bpfread(struct knote *kn, long hint)
1503 {
1504 struct bpf_d *d = kn->kn_hook;
1505 int rv;
1506
1507 mutex_enter(d->bd_buf_mtx);
1508 kn->kn_data = d->bd_hlen;
1509 if (d->bd_immediate)
1510 kn->kn_data += d->bd_slen;
1511 rv = (kn->kn_data > 0);
1512 mutex_exit(d->bd_buf_mtx);
1513 return rv;
1514 }
1515
1516 static const struct filterops bpfread_filtops = {
1517 .f_isfd = 1,
1518 .f_attach = NULL,
1519 .f_detach = filt_bpfrdetach,
1520 .f_event = filt_bpfread,
1521 };
1522
1523 static int
1524 bpf_kqfilter(struct file *fp, struct knote *kn)
1525 {
1526 struct bpf_d *d = fp->f_bpf;
1527 struct klist *klist;
1528
1529 mutex_enter(d->bd_buf_mtx);
1530 switch (kn->kn_filter) {
1531 case EVFILT_READ:
1532 klist = &d->bd_sel.sel_klist;
1533 kn->kn_fop = &bpfread_filtops;
1534 break;
1535
1536 default:
1537 mutex_exit(d->bd_buf_mtx);
1538 return (EINVAL);
1539 }
1540
1541 kn->kn_hook = d;
1542
1543 SLIST_INSERT_HEAD(klist, kn, kn_selnext);
1544 mutex_exit(d->bd_buf_mtx);
1545
1546 return (0);
1547 }
1548
1549 /*
1550 * Copy data from an mbuf chain into a buffer. This code is derived
1551 * from m_copydata in sys/uipc_mbuf.c.
1552 */
1553 static void *
1554 bpf_mcpy(void *dst_arg, const void *src_arg, size_t len)
1555 {
1556 const struct mbuf *m;
1557 u_int count;
1558 u_char *dst;
1559
1560 m = src_arg;
1561 dst = dst_arg;
1562 while (len > 0) {
1563 if (m == NULL)
1564 panic("bpf_mcpy");
1565 count = uimin(m->m_len, len);
1566 memcpy(dst, mtod(m, const void *), count);
1567 m = m->m_next;
1568 dst += count;
1569 len -= count;
1570 }
1571 return dst_arg;
1572 }
1573
1574 /*
1575 * Dispatch a packet to all the listeners on interface bp.
1576 *
1577 * pkt pointer to the packet, either a data buffer or an mbuf chain
1578 * buflen buffer length, if pkt is a data buffer
1579 * cpfn a function that can copy pkt into the listener's buffer
1580 * pktlen length of the packet
1581 * direction BPF_D_IN or BPF_D_OUT
1582 */
1583 static inline void
1584 bpf_deliver(struct bpf_if *bp, void *(*cpfn)(void *, const void *, size_t),
1585 void *pkt, u_int pktlen, u_int buflen, const u_int direction)
1586 {
1587 uint32_t mem[BPF_MEMWORDS];
1588 bpf_args_t args = {
1589 .pkt = (const uint8_t *)pkt,
1590 .wirelen = pktlen,
1591 .buflen = buflen,
1592 .mem = mem,
1593 .arg = NULL
1594 };
1595 bool gottime = false;
1596 struct timespec ts;
1597 struct bpf_d *d;
1598 int s;
1599
1600 KASSERT(!cpu_intr_p());
1601
1602 /*
1603 * Note that the IPL does not have to be raised at this point.
1604 * The only problem that could arise here is that if two different
1605 * interfaces shared any data. This is not the case.
1606 */
1607 s = pserialize_read_enter();
1608 BPFIF_DLIST_READER_FOREACH(d, bp) {
1609 u_int slen = 0;
1610 struct bpf_filter *filter;
1611
1612 if (direction == BPF_D_IN) {
1613 if (d->bd_direction == BPF_D_OUT)
1614 continue;
1615 } else { /* BPF_D_OUT */
1616 if (d->bd_direction == BPF_D_IN)
1617 continue;
1618 }
1619
1620 atomic_inc_ulong(&d->bd_rcount);
1621 BPF_STATINC(recv);
1622
1623 filter = d->bd_filter;
1624 membar_datadep_consumer();
1625 if (filter != NULL) {
1626 if (filter->bf_jitcode != NULL)
1627 slen = filter->bf_jitcode(NULL, &args);
1628 else
1629 slen = bpf_filter_ext(NULL, filter->bf_insn,
1630 &args);
1631 }
1632
1633 if (!slen) {
1634 continue;
1635 }
1636 if (!gottime) {
1637 gottime = true;
1638 nanotime(&ts);
1639 }
1640 /* Assume catchpacket doesn't sleep */
1641 catchpacket(d, pkt, pktlen, slen, cpfn, &ts);
1642 }
1643 pserialize_read_exit(s);
1644 }
1645
1646 /*
1647 * Incoming linkage from device drivers, when the head of the packet is in
1648 * a buffer, and the tail is in an mbuf chain.
1649 */
1650 static void
1651 _bpf_mtap2(struct bpf_if *bp, void *data, u_int dlen, struct mbuf *m,
1652 u_int direction)
1653 {
1654 u_int pktlen;
1655 struct mbuf mb;
1656
1657 /* Skip outgoing duplicate packets. */
1658 if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif_index == 0) {
1659 m->m_flags &= ~M_PROMISC;
1660 return;
1661 }
1662
1663 pktlen = m_length(m) + dlen;
1664
1665 /*
1666 * Craft on-stack mbuf suitable for passing to bpf_filter.
1667 * Note that we cut corners here; we only setup what's
1668 * absolutely needed--this mbuf should never go anywhere else.
1669 */
1670 (void)memset(&mb, 0, sizeof(mb));
1671 mb.m_type = MT_DATA;
1672 mb.m_next = m;
1673 mb.m_data = data;
1674 mb.m_len = dlen;
1675
1676 bpf_deliver(bp, bpf_mcpy, &mb, pktlen, 0, direction);
1677 }
1678
1679 /*
1680 * Incoming linkage from device drivers, when packet is in an mbuf chain.
1681 */
1682 static void
1683 _bpf_mtap(struct bpf_if *bp, struct mbuf *m, u_int direction)
1684 {
1685 void *(*cpfn)(void *, const void *, size_t);
1686 u_int pktlen, buflen;
1687 void *marg;
1688
1689 /* Skip outgoing duplicate packets. */
1690 if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif_index == 0) {
1691 m->m_flags &= ~M_PROMISC;
1692 return;
1693 }
1694
1695 pktlen = m_length(m);
1696
1697 /* Skip zero-sized packets. */
1698 if (__predict_false(pktlen == 0)) {
1699 return;
1700 }
1701
1702 if (pktlen == m->m_len) {
1703 cpfn = (void *)memcpy;
1704 marg = mtod(m, void *);
1705 buflen = pktlen;
1706 KASSERT(buflen != 0);
1707 } else {
1708 cpfn = bpf_mcpy;
1709 marg = m;
1710 buflen = 0;
1711 }
1712
1713 bpf_deliver(bp, cpfn, marg, pktlen, buflen, direction);
1714 }
1715
1716 /*
1717 * We need to prepend the address family as
1718 * a four byte field. Cons up a dummy header
1719 * to pacify bpf. This is safe because bpf
1720 * will only read from the mbuf (i.e., it won't
1721 * try to free it or keep a pointer a to it).
1722 */
1723 static void
1724 _bpf_mtap_af(struct bpf_if *bp, uint32_t af, struct mbuf *m, u_int direction)
1725 {
1726 struct mbuf m0;
1727
1728 m0.m_type = MT_DATA;
1729 m0.m_flags = 0;
1730 m0.m_next = m;
1731 m0.m_nextpkt = NULL;
1732 m0.m_owner = NULL;
1733 m0.m_len = 4;
1734 m0.m_data = (char *)⁡
1735
1736 _bpf_mtap(bp, &m0, direction);
1737 }
1738
1739 /*
1740 * Put the SLIP pseudo-"link header" in place.
1741 * Note this M_PREPEND() should never fail,
1742 * swince we know we always have enough space
1743 * in the input buffer.
1744 */
1745 static void
1746 _bpf_mtap_sl_in(struct bpf_if *bp, u_char *chdr, struct mbuf **m)
1747 {
1748 u_char *hp;
1749
1750 M_PREPEND(*m, SLIP_HDRLEN, M_DONTWAIT);
1751 if (*m == NULL)
1752 return;
1753
1754 hp = mtod(*m, u_char *);
1755 hp[SLX_DIR] = SLIPDIR_IN;
1756 (void)memcpy(&hp[SLX_CHDR], chdr, CHDR_LEN);
1757
1758 _bpf_mtap(bp, *m, BPF_D_IN);
1759
1760 m_adj(*m, SLIP_HDRLEN);
1761 }
1762
1763 /*
1764 * Put the SLIP pseudo-"link header" in
1765 * place. The compressed header is now
1766 * at the beginning of the mbuf.
1767 */
1768 static void
1769 _bpf_mtap_sl_out(struct bpf_if *bp, u_char *chdr, struct mbuf *m)
1770 {
1771 struct mbuf m0;
1772 u_char *hp;
1773
1774 m0.m_type = MT_DATA;
1775 m0.m_flags = 0;
1776 m0.m_next = m;
1777 m0.m_nextpkt = NULL;
1778 m0.m_owner = NULL;
1779 m0.m_data = m0.m_dat;
1780 m0.m_len = SLIP_HDRLEN;
1781
1782 hp = mtod(&m0, u_char *);
1783
1784 hp[SLX_DIR] = SLIPDIR_OUT;
1785 (void)memcpy(&hp[SLX_CHDR], chdr, CHDR_LEN);
1786
1787 _bpf_mtap(bp, &m0, BPF_D_OUT);
1788 m_freem(m);
1789 }
1790
1791 static struct mbuf *
1792 bpf_mbuf_enqueue(struct bpf_if *bp, struct mbuf *m)
1793 {
1794 struct mbuf *dup;
1795
1796 dup = m_dup(m, 0, M_COPYALL, M_NOWAIT);
1797 if (dup == NULL)
1798 return NULL;
1799
1800 if (bp->bif_mbuf_tail != NULL) {
1801 bp->bif_mbuf_tail->m_nextpkt = dup;
1802 } else {
1803 bp->bif_mbuf_head = dup;
1804 }
1805 bp->bif_mbuf_tail = dup;
1806 #ifdef BPF_MTAP_SOFTINT_DEBUG
1807 log(LOG_DEBUG, "%s: enqueued mbuf=%p to %s\n",
1808 __func__, dup, bp->bif_ifp->if_xname);
1809 #endif
1810
1811 return dup;
1812 }
1813
1814 static struct mbuf *
1815 bpf_mbuf_dequeue(struct bpf_if *bp)
1816 {
1817 struct mbuf *m;
1818 int s;
1819
1820 /* XXX NOMPSAFE: assumed running on one CPU */
1821 s = splnet();
1822 m = bp->bif_mbuf_head;
1823 if (m != NULL) {
1824 bp->bif_mbuf_head = m->m_nextpkt;
1825 m->m_nextpkt = NULL;
1826
1827 if (bp->bif_mbuf_head == NULL)
1828 bp->bif_mbuf_tail = NULL;
1829 #ifdef BPF_MTAP_SOFTINT_DEBUG
1830 log(LOG_DEBUG, "%s: dequeued mbuf=%p from %s\n",
1831 __func__, m, bp->bif_ifp->if_xname);
1832 #endif
1833 }
1834 splx(s);
1835
1836 return m;
1837 }
1838
1839 static void
1840 bpf_mtap_si(void *arg)
1841 {
1842 struct bpf_if *bp = arg;
1843 struct mbuf *m;
1844
1845 while ((m = bpf_mbuf_dequeue(bp)) != NULL) {
1846 #ifdef BPF_MTAP_SOFTINT_DEBUG
1847 log(LOG_DEBUG, "%s: tapping mbuf=%p on %s\n",
1848 __func__, m, bp->bif_ifp->if_xname);
1849 #endif
1850 bpf_ops->bpf_mtap(bp, m, BPF_D_IN);
1851 m_freem(m);
1852 }
1853 }
1854
1855 static void
1856 _bpf_mtap_softint(struct ifnet *ifp, struct mbuf *m)
1857 {
1858 struct bpf_if *bp = ifp->if_bpf;
1859 struct mbuf *dup;
1860
1861 KASSERT(cpu_intr_p());
1862
1863 /* To avoid extra invocations of the softint */
1864 if (BPFIF_DLIST_READER_EMPTY(bp))
1865 return;
1866 KASSERT(bp->bif_si != NULL);
1867
1868 dup = bpf_mbuf_enqueue(bp, m);
1869 if (dup != NULL)
1870 softint_schedule(bp->bif_si);
1871 }
1872
1873 static int
1874 bpf_hdrlen(struct bpf_d *d)
1875 {
1876 int hdrlen = d->bd_bif->bif_hdrlen;
1877 /*
1878 * Compute the length of the bpf header. This is not necessarily
1879 * equal to SIZEOF_BPF_HDR because we want to insert spacing such
1880 * that the network layer header begins on a longword boundary (for
1881 * performance reasons and to alleviate alignment restrictions).
1882 */
1883 #ifdef _LP64
1884 if (d->bd_compat32)
1885 return (BPF_WORDALIGN32(hdrlen + SIZEOF_BPF_HDR32) - hdrlen);
1886 else
1887 #endif
1888 return (BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen);
1889 }
1890
1891 /*
1892 * Move the packet data from interface memory (pkt) into the
1893 * store buffer. Call the wakeup functions if it's time to wakeup
1894 * a listener (buffer full), "cpfn" is the routine called to do the
1895 * actual data transfer. memcpy is passed in to copy contiguous chunks,
1896 * while bpf_mcpy is passed in to copy mbuf chains. In the latter case,
1897 * pkt is really an mbuf.
1898 */
1899 static void
1900 catchpacket(struct bpf_d *d, u_char *pkt, u_int pktlen, u_int snaplen,
1901 void *(*cpfn)(void *, const void *, size_t), struct timespec *ts)
1902 {
1903 char *h;
1904 int totlen, curlen, caplen;
1905 int hdrlen = bpf_hdrlen(d);
1906 int do_wakeup = 0;
1907
1908 atomic_inc_ulong(&d->bd_ccount);
1909 BPF_STATINC(capt);
1910 /*
1911 * Figure out how many bytes to move. If the packet is
1912 * greater or equal to the snapshot length, transfer that
1913 * much. Otherwise, transfer the whole packet (unless
1914 * we hit the buffer size limit).
1915 */
1916 totlen = hdrlen + uimin(snaplen, pktlen);
1917 if (totlen > d->bd_bufsize)
1918 totlen = d->bd_bufsize;
1919 /*
1920 * If we adjusted totlen to fit the bufsize, it could be that
1921 * totlen is smaller than hdrlen because of the link layer header.
1922 */
1923 caplen = totlen - hdrlen;
1924 if (caplen < 0)
1925 caplen = 0;
1926
1927 mutex_enter(d->bd_buf_mtx);
1928 /*
1929 * Round up the end of the previous packet to the next longword.
1930 */
1931 #ifdef _LP64
1932 if (d->bd_compat32)
1933 curlen = BPF_WORDALIGN32(d->bd_slen);
1934 else
1935 #endif
1936 curlen = BPF_WORDALIGN(d->bd_slen);
1937 if (curlen + totlen > d->bd_bufsize) {
1938 /*
1939 * This packet will overflow the storage buffer.
1940 * Rotate the buffers if we can, then wakeup any
1941 * pending reads.
1942 */
1943 if (d->bd_fbuf == NULL) {
1944 mutex_exit(d->bd_buf_mtx);
1945 /*
1946 * We haven't completed the previous read yet,
1947 * so drop the packet.
1948 */
1949 atomic_inc_ulong(&d->bd_dcount);
1950 BPF_STATINC(drop);
1951 return;
1952 }
1953 ROTATE_BUFFERS(d);
1954 do_wakeup = 1;
1955 curlen = 0;
1956 } else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT) {
1957 /*
1958 * Immediate mode is set, or the read timeout has
1959 * already expired during a select call. A packet
1960 * arrived, so the reader should be woken up.
1961 */
1962 do_wakeup = 1;
1963 }
1964
1965 /*
1966 * Append the bpf header.
1967 */
1968 h = (char *)d->bd_sbuf + curlen;
1969 #ifdef _LP64
1970 if (d->bd_compat32) {
1971 struct bpf_hdr32 *hp32;
1972
1973 hp32 = (struct bpf_hdr32 *)h;
1974 hp32->bh_tstamp.tv_sec = ts->tv_sec;
1975 hp32->bh_tstamp.tv_usec = ts->tv_nsec / 1000;
1976 hp32->bh_datalen = pktlen;
1977 hp32->bh_hdrlen = hdrlen;
1978 hp32->bh_caplen = caplen;
1979 } else
1980 #endif
1981 {
1982 struct bpf_hdr *hp;
1983
1984 hp = (struct bpf_hdr *)h;
1985 hp->bh_tstamp.tv_sec = ts->tv_sec;
1986 hp->bh_tstamp.tv_usec = ts->tv_nsec / 1000;
1987 hp->bh_datalen = pktlen;
1988 hp->bh_hdrlen = hdrlen;
1989 hp->bh_caplen = caplen;
1990 }
1991
1992 /*
1993 * Copy the packet data into the store buffer and update its length.
1994 */
1995 (*cpfn)(h + hdrlen, pkt, caplen);
1996 d->bd_slen = curlen + totlen;
1997 mutex_exit(d->bd_buf_mtx);
1998
1999 /*
2000 * Call bpf_wakeup after bd_slen has been updated so that kevent(2)
2001 * will cause filt_bpfread() to be called with it adjusted.
2002 */
2003 if (do_wakeup)
2004 bpf_wakeup(d);
2005 }
2006
2007 /*
2008 * Initialize all nonzero fields of a descriptor.
2009 */
2010 static int
2011 bpf_allocbufs(struct bpf_d *d)
2012 {
2013
2014 d->bd_fbuf = kmem_zalloc(d->bd_bufsize, KM_NOSLEEP);
2015 if (!d->bd_fbuf)
2016 return (ENOBUFS);
2017 d->bd_sbuf = kmem_zalloc(d->bd_bufsize, KM_NOSLEEP);
2018 if (!d->bd_sbuf) {
2019 kmem_free(d->bd_fbuf, d->bd_bufsize);
2020 return (ENOBUFS);
2021 }
2022 d->bd_slen = 0;
2023 d->bd_hlen = 0;
2024 return (0);
2025 }
2026
2027 static void
2028 bpf_free_filter(struct bpf_filter *filter)
2029 {
2030
2031 KASSERT(filter != NULL);
2032 KASSERT(filter->bf_insn != NULL);
2033
2034 kmem_free(filter->bf_insn, filter->bf_size);
2035 if (filter->bf_jitcode != NULL)
2036 bpf_jit_freecode(filter->bf_jitcode);
2037 kmem_free(filter, sizeof(*filter));
2038 }
2039
2040 /*
2041 * Free buffers currently in use by a descriptor.
2042 * Called on close.
2043 */
2044 static void
2045 bpf_freed(struct bpf_d *d)
2046 {
2047 /*
2048 * We don't need to lock out interrupts since this descriptor has
2049 * been detached from its interface and it yet hasn't been marked
2050 * free.
2051 */
2052 if (d->bd_sbuf != NULL) {
2053 kmem_free(d->bd_sbuf, d->bd_bufsize);
2054 if (d->bd_hbuf != NULL)
2055 kmem_free(d->bd_hbuf, d->bd_bufsize);
2056 if (d->bd_fbuf != NULL)
2057 kmem_free(d->bd_fbuf, d->bd_bufsize);
2058 }
2059 if (d->bd_filter != NULL) {
2060 bpf_free_filter(d->bd_filter);
2061 d->bd_filter = NULL;
2062 }
2063 d->bd_jitcode = NULL;
2064 }
2065
2066 /*
2067 * Attach an interface to bpf. dlt is the link layer type;
2068 * hdrlen is the fixed size of the link header for the specified dlt
2069 * (variable length headers not yet supported).
2070 */
2071 static void
2072 _bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp)
2073 {
2074 struct bpf_if *bp;
2075 bp = kmem_alloc(sizeof(*bp), KM_NOSLEEP);
2076 if (bp == NULL)
2077 panic("bpfattach");
2078
2079 mutex_enter(&bpf_mtx);
2080 bp->bif_driverp = driverp;
2081 bp->bif_ifp = ifp;
2082 bp->bif_dlt = dlt;
2083 bp->bif_si = NULL;
2084 BPF_IFLIST_ENTRY_INIT(bp);
2085 PSLIST_INIT(&bp->bif_dlist_head);
2086 psref_target_init(&bp->bif_psref, bpf_psref_class);
2087
2088 BPF_IFLIST_WRITER_INSERT_HEAD(bp);
2089
2090 *bp->bif_driverp = NULL;
2091
2092 bp->bif_hdrlen = hdrlen;
2093 mutex_exit(&bpf_mtx);
2094 #if 0
2095 printf("bpf: %s attached\n", ifp->if_xname);
2096 #endif
2097 }
2098
2099 static void
2100 _bpf_mtap_softint_init(struct ifnet *ifp)
2101 {
2102 struct bpf_if *bp;
2103
2104 mutex_enter(&bpf_mtx);
2105 BPF_IFLIST_WRITER_FOREACH(bp) {
2106 if (bp->bif_ifp != ifp)
2107 continue;
2108
2109 bp->bif_mbuf_head = NULL;
2110 bp->bif_mbuf_tail = NULL;
2111 bp->bif_si = softint_establish(SOFTINT_NET, bpf_mtap_si, bp);
2112 if (bp->bif_si == NULL)
2113 panic("%s: softint_establish() failed", __func__);
2114 break;
2115 }
2116 mutex_exit(&bpf_mtx);
2117
2118 if (bp == NULL)
2119 panic("%s: no bpf_if found for %s", __func__, ifp->if_xname);
2120 }
2121
2122 /*
2123 * Remove an interface from bpf.
2124 */
2125 static void
2126 _bpfdetach(struct ifnet *ifp)
2127 {
2128 struct bpf_if *bp;
2129 struct bpf_d *d;
2130 int s;
2131
2132 mutex_enter(&bpf_mtx);
2133 /* Nuke the vnodes for any open instances */
2134 again_d:
2135 BPF_DLIST_WRITER_FOREACH(d) {
2136 mutex_enter(d->bd_mtx);
2137 if (d->bd_bif != NULL && d->bd_bif->bif_ifp == ifp) {
2138 /*
2139 * Detach the descriptor from an interface now.
2140 * It will be free'ed later by close routine.
2141 */
2142 d->bd_promisc = 0; /* we can't touch device. */
2143 bpf_detachd(d);
2144 mutex_exit(d->bd_mtx);
2145 goto again_d;
2146 }
2147 mutex_exit(d->bd_mtx);
2148 }
2149
2150 again:
2151 BPF_IFLIST_WRITER_FOREACH(bp) {
2152 if (bp->bif_ifp == ifp) {
2153 BPF_IFLIST_WRITER_REMOVE(bp);
2154
2155 pserialize_perform(bpf_psz);
2156 psref_target_destroy(&bp->bif_psref, bpf_psref_class);
2157
2158 BPF_IFLIST_ENTRY_DESTROY(bp);
2159 if (bp->bif_si != NULL) {
2160 /* XXX NOMPSAFE: assumed running on one CPU */
2161 s = splnet();
2162 while (bp->bif_mbuf_head != NULL) {
2163 struct mbuf *m = bp->bif_mbuf_head;
2164 bp->bif_mbuf_head = m->m_nextpkt;
2165 m_freem(m);
2166 }
2167 splx(s);
2168 softint_disestablish(bp->bif_si);
2169 }
2170 kmem_free(bp, sizeof(*bp));
2171 goto again;
2172 }
2173 }
2174 mutex_exit(&bpf_mtx);
2175 }
2176
2177 /*
2178 * Change the data link type of a interface.
2179 */
2180 static void
2181 _bpf_change_type(struct ifnet *ifp, u_int dlt, u_int hdrlen)
2182 {
2183 struct bpf_if *bp;
2184
2185 mutex_enter(&bpf_mtx);
2186 BPF_IFLIST_WRITER_FOREACH(bp) {
2187 if (bp->bif_driverp == &ifp->if_bpf)
2188 break;
2189 }
2190 if (bp == NULL)
2191 panic("bpf_change_type");
2192
2193 bp->bif_dlt = dlt;
2194
2195 bp->bif_hdrlen = hdrlen;
2196 mutex_exit(&bpf_mtx);
2197 }
2198
2199 /*
2200 * Get a list of available data link type of the interface.
2201 */
2202 static int
2203 bpf_getdltlist(struct bpf_d *d, struct bpf_dltlist *bfl)
2204 {
2205 int n, error;
2206 struct ifnet *ifp;
2207 struct bpf_if *bp;
2208 int s, bound;
2209
2210 KASSERT(mutex_owned(d->bd_mtx));
2211
2212 ifp = d->bd_bif->bif_ifp;
2213 n = 0;
2214 error = 0;
2215
2216 bound = curlwp_bind();
2217 s = pserialize_read_enter();
2218 BPF_IFLIST_READER_FOREACH(bp) {
2219 if (bp->bif_ifp != ifp)
2220 continue;
2221 if (bfl->bfl_list != NULL) {
2222 struct psref psref;
2223
2224 if (n >= bfl->bfl_len) {
2225 pserialize_read_exit(s);
2226 return ENOMEM;
2227 }
2228
2229 bpf_if_acquire(bp, &psref);
2230 pserialize_read_exit(s);
2231
2232 error = copyout(&bp->bif_dlt,
2233 bfl->bfl_list + n, sizeof(u_int));
2234
2235 s = pserialize_read_enter();
2236 bpf_if_release(bp, &psref);
2237 }
2238 n++;
2239 }
2240 pserialize_read_exit(s);
2241 curlwp_bindx(bound);
2242
2243 bfl->bfl_len = n;
2244 return error;
2245 }
2246
2247 /*
2248 * Set the data link type of a BPF instance.
2249 */
2250 static int
2251 bpf_setdlt(struct bpf_d *d, u_int dlt)
2252 {
2253 int error, opromisc;
2254 struct ifnet *ifp;
2255 struct bpf_if *bp;
2256
2257 KASSERT(mutex_owned(&bpf_mtx));
2258 KASSERT(mutex_owned(d->bd_mtx));
2259
2260 if (d->bd_bif->bif_dlt == dlt)
2261 return 0;
2262 ifp = d->bd_bif->bif_ifp;
2263 BPF_IFLIST_WRITER_FOREACH(bp) {
2264 if (bp->bif_ifp == ifp && bp->bif_dlt == dlt)
2265 break;
2266 }
2267 if (bp == NULL)
2268 return EINVAL;
2269 opromisc = d->bd_promisc;
2270 bpf_detachd(d);
2271 BPFIF_DLIST_ENTRY_INIT(d);
2272 bpf_attachd(d, bp);
2273 reset_d(d);
2274 if (opromisc) {
2275 KERNEL_LOCK_UNLESS_NET_MPSAFE();
2276 error = ifpromisc(bp->bif_ifp, 1);
2277 KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
2278 if (error)
2279 printf("%s: bpf_setdlt: ifpromisc failed (%d)\n",
2280 bp->bif_ifp->if_xname, error);
2281 else
2282 d->bd_promisc = 1;
2283 }
2284 return 0;
2285 }
2286
2287 static int
2288 sysctl_net_bpf_maxbufsize(SYSCTLFN_ARGS)
2289 {
2290 int newsize, error;
2291 struct sysctlnode node;
2292
2293 node = *rnode;
2294 node.sysctl_data = &newsize;
2295 newsize = bpf_maxbufsize;
2296 error = sysctl_lookup(SYSCTLFN_CALL(&node));
2297 if (error || newp == NULL)
2298 return (error);
2299
2300 if (newsize < BPF_MINBUFSIZE || newsize > BPF_MAXBUFSIZE)
2301 return (EINVAL);
2302
2303 bpf_maxbufsize = newsize;
2304
2305 return (0);
2306 }
2307
2308 #if defined(MODULAR) || defined(BPFJIT)
2309 static int
2310 sysctl_net_bpf_jit(SYSCTLFN_ARGS)
2311 {
2312 bool newval;
2313 int error;
2314 struct sysctlnode node;
2315
2316 node = *rnode;
2317 node.sysctl_data = &newval;
2318 newval = bpf_jit;
2319 error = sysctl_lookup(SYSCTLFN_CALL(&node));
2320 if (error != 0 || newp == NULL)
2321 return error;
2322
2323 bpf_jit = newval;
2324
2325 /*
2326 * Do a full sync to publish new bpf_jit value and
2327 * update bpfjit_module_ops.bj_generate_code variable.
2328 */
2329 membar_sync();
2330
2331 if (newval && bpfjit_module_ops.bj_generate_code == NULL) {
2332 printf("JIT compilation is postponed "
2333 "until after bpfjit module is loaded\n");
2334 }
2335
2336 return 0;
2337 }
2338 #endif
2339
2340 static int
2341 sysctl_net_bpf_peers(SYSCTLFN_ARGS)
2342 {
2343 int error, elem_count;
2344 struct bpf_d *dp;
2345 struct bpf_d_ext dpe;
2346 size_t len, needed, elem_size, out_size;
2347 char *sp;
2348
2349 if (namelen == 1 && name[0] == CTL_QUERY)
2350 return (sysctl_query(SYSCTLFN_CALL(rnode)));
2351
2352 if (namelen != 2)
2353 return (EINVAL);
2354
2355 /* BPF peers is privileged information. */
2356 error = kauth_authorize_network(l->l_cred, KAUTH_NETWORK_INTERFACE,
2357 KAUTH_REQ_NETWORK_INTERFACE_GETPRIV, NULL, NULL, NULL);
2358 if (error)
2359 return (EPERM);
2360
2361 len = (oldp != NULL) ? *oldlenp : 0;
2362 sp = oldp;
2363 elem_size = name[0];
2364 elem_count = name[1];
2365 out_size = MIN(sizeof(dpe), elem_size);
2366 needed = 0;
2367
2368 if (elem_size < 1 || elem_count < 0)
2369 return (EINVAL);
2370
2371 mutex_enter(&bpf_mtx);
2372 BPF_DLIST_WRITER_FOREACH(dp) {
2373 if (len >= elem_size && elem_count > 0) {
2374 #define BPF_EXT(field) dpe.bde_ ## field = dp->bd_ ## field
2375 BPF_EXT(bufsize);
2376 BPF_EXT(promisc);
2377 BPF_EXT(state);
2378 BPF_EXT(immediate);
2379 BPF_EXT(hdrcmplt);
2380 BPF_EXT(direction);
2381 BPF_EXT(pid);
2382 BPF_EXT(rcount);
2383 BPF_EXT(dcount);
2384 BPF_EXT(ccount);
2385 #undef BPF_EXT
2386 mutex_enter(dp->bd_mtx);
2387 if (dp->bd_bif)
2388 (void)strlcpy(dpe.bde_ifname,
2389 dp->bd_bif->bif_ifp->if_xname,
2390 IFNAMSIZ - 1);
2391 else
2392 dpe.bde_ifname[0] = '\0';
2393 mutex_exit(dp->bd_mtx);
2394
2395 error = copyout(&dpe, sp, out_size);
2396 if (error)
2397 break;
2398 sp += elem_size;
2399 len -= elem_size;
2400 }
2401 needed += elem_size;
2402 if (elem_count > 0 && elem_count != INT_MAX)
2403 elem_count--;
2404 }
2405 mutex_exit(&bpf_mtx);
2406
2407 *oldlenp = needed;
2408
2409 return (error);
2410 }
2411
2412 static void
2413 bpf_stats(void *p, void *arg, struct cpu_info *ci __unused)
2414 {
2415 struct bpf_stat *const stats = p;
2416 struct bpf_stat *sum = arg;
2417
2418 sum->bs_recv += stats->bs_recv;
2419 sum->bs_drop += stats->bs_drop;
2420 sum->bs_capt += stats->bs_capt;
2421 }
2422
2423 static int
2424 bpf_sysctl_gstats_handler(SYSCTLFN_ARGS)
2425 {
2426 struct sysctlnode node;
2427 int error;
2428 struct bpf_stat sum;
2429
2430 memset(&sum, 0, sizeof(sum));
2431 node = *rnode;
2432
2433 percpu_foreach(bpf_gstats_percpu, bpf_stats, &sum);
2434
2435 node.sysctl_data = ∑
2436 node.sysctl_size = sizeof(sum);
2437 error = sysctl_lookup(SYSCTLFN_CALL(&node));
2438 if (error != 0 || newp == NULL)
2439 return error;
2440
2441 return 0;
2442 }
2443
2444 static struct sysctllog *bpf_sysctllog;
2445 static void
2446 sysctl_net_bpf_setup(void)
2447 {
2448 const struct sysctlnode *node;
2449
2450 node = NULL;
2451 sysctl_createv(&bpf_sysctllog, 0, NULL, &node,
2452 CTLFLAG_PERMANENT,
2453 CTLTYPE_NODE, "bpf",
2454 SYSCTL_DESCR("BPF options"),
2455 NULL, 0, NULL, 0,
2456 CTL_NET, CTL_CREATE, CTL_EOL);
2457 if (node != NULL) {
2458 #if defined(MODULAR) || defined(BPFJIT)
2459 sysctl_createv(&bpf_sysctllog, 0, NULL, NULL,
2460 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
2461 CTLTYPE_BOOL, "jit",
2462 SYSCTL_DESCR("Toggle Just-In-Time compilation"),
2463 sysctl_net_bpf_jit, 0, &bpf_jit, 0,
2464 CTL_NET, node->sysctl_num, CTL_CREATE, CTL_EOL);
2465 #endif
2466 sysctl_createv(&bpf_sysctllog, 0, NULL, NULL,
2467 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
2468 CTLTYPE_INT, "maxbufsize",
2469 SYSCTL_DESCR("Maximum size for data capture buffer"),
2470 sysctl_net_bpf_maxbufsize, 0, &bpf_maxbufsize, 0,
2471 CTL_NET, node->sysctl_num, CTL_CREATE, CTL_EOL);
2472 sysctl_createv(&bpf_sysctllog, 0, NULL, NULL,
2473 CTLFLAG_PERMANENT,
2474 CTLTYPE_STRUCT, "stats",
2475 SYSCTL_DESCR("BPF stats"),
2476 bpf_sysctl_gstats_handler, 0, NULL, 0,
2477 CTL_NET, node->sysctl_num, CTL_CREATE, CTL_EOL);
2478 sysctl_createv(&bpf_sysctllog, 0, NULL, NULL,
2479 CTLFLAG_PERMANENT,
2480 CTLTYPE_STRUCT, "peers",
2481 SYSCTL_DESCR("BPF peers"),
2482 sysctl_net_bpf_peers, 0, NULL, 0,
2483 CTL_NET, node->sysctl_num, CTL_CREATE, CTL_EOL);
2484 }
2485
2486 }
2487
2488 struct bpf_ops bpf_ops_kernel = {
2489 .bpf_attach = _bpfattach,
2490 .bpf_detach = _bpfdetach,
2491 .bpf_change_type = _bpf_change_type,
2492
2493 .bpf_mtap = _bpf_mtap,
2494 .bpf_mtap2 = _bpf_mtap2,
2495 .bpf_mtap_af = _bpf_mtap_af,
2496 .bpf_mtap_sl_in = _bpf_mtap_sl_in,
2497 .bpf_mtap_sl_out = _bpf_mtap_sl_out,
2498
2499 .bpf_mtap_softint = _bpf_mtap_softint,
2500 .bpf_mtap_softint_init = _bpf_mtap_softint_init,
2501 };
2502
2503 MODULE(MODULE_CLASS_DRIVER, bpf, "bpf_filter");
2504
2505 static int
2506 bpf_modcmd(modcmd_t cmd, void *arg)
2507 {
2508 #ifdef _MODULE
2509 devmajor_t bmajor, cmajor;
2510 #endif
2511 int error = 0;
2512
2513 switch (cmd) {
2514 case MODULE_CMD_INIT:
2515 bpf_init();
2516 #ifdef _MODULE
2517 bmajor = cmajor = NODEVMAJOR;
2518 error = devsw_attach("bpf", NULL, &bmajor,
2519 &bpf_cdevsw, &cmajor);
2520 if (error)
2521 break;
2522 #endif
2523
2524 bpf_ops_handover_enter(&bpf_ops_kernel);
2525 atomic_swap_ptr(&bpf_ops, &bpf_ops_kernel);
2526 bpf_ops_handover_exit();
2527 sysctl_net_bpf_setup();
2528 break;
2529
2530 case MODULE_CMD_FINI:
2531 /*
2532 * While there is no reference counting for bpf callers,
2533 * unload could at least in theory be done similarly to
2534 * system call disestablishment. This should even be
2535 * a little simpler:
2536 *
2537 * 1) replace op vector with stubs
2538 * 2) post update to all cpus with xc
2539 * 3) check that nobody is in bpf anymore
2540 * (it's doubtful we'd want something like l_sysent,
2541 * but we could do something like *signed* percpu
2542 * counters. if the sum is 0, we're good).
2543 * 4) if fail, unroll changes
2544 *
2545 * NOTE: change won't be atomic to the outside. some
2546 * packets may be not captured even if unload is
2547 * not succesful. I think packet capture not working
2548 * is a perfectly logical consequence of trying to
2549 * disable packet capture.
2550 */
2551 error = EOPNOTSUPP;
2552 /* insert sysctl teardown */
2553 break;
2554
2555 default:
2556 error = ENOTTY;
2557 break;
2558 }
2559
2560 return error;
2561 }
2562