bpf.c revision 1.37 1 /* $NetBSD: bpf.c,v 1.37 1997/10/09 18:58:08 christos Exp $ */
2
3 /*
4 * Copyright (c) 1990, 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from the Stanford/CMU enet packet filter,
8 * (net/enet.c) distributed as part of 4.3BSD, and code contributed
9 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
10 * Berkeley Laboratory.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the University of
23 * California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 * @(#)bpf.c 8.2 (Berkeley) 3/28/94
41 * static char rcsid[] =
42 * "Header: bpf.c,v 1.67 96/09/26 22:00:52 leres Exp ";
43 */
44
45 #include "bpfilter.h"
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/mbuf.h>
50 #include <sys/buf.h>
51 #include <sys/time.h>
52 #include <sys/proc.h>
53 #include <sys/user.h>
54 #include <sys/ioctl.h>
55 #include <sys/map.h>
56 #include <sys/conf.h>
57
58 #include <sys/file.h>
59 #if defined(sparc) && BSD < 199103
60 #include <sys/stream.h>
61 #endif
62 #include <sys/tty.h>
63 #include <sys/uio.h>
64
65 #include <sys/protosw.h>
66 #include <sys/socket.h>
67 #include <sys/errno.h>
68 #include <sys/kernel.h>
69 #include <sys/poll.h>
70
71 #include <net/if.h>
72
73 #include <net/bpf.h>
74 #include <net/bpfdesc.h>
75
76 #include <net/if_arc.h>
77 #include <net/if_ether.h>
78
79 #include <netinet/in.h>
80 #include <netinet/if_inarp.h>
81
82 /*
83 * Older BSDs don't have kernel malloc.
84 */
85 #if BSD < 199103
86 extern bcopy();
87 static caddr_t bpf_alloc();
88 #include <net/bpf_compat.h>
89 #define BPF_BUFSIZE (MCLBYTES-8)
90 #define UIOMOVE(cp, len, code, uio) uiomove(cp, len, code, uio)
91 #else
92 #define BPF_BUFSIZE 4096
93 #define UIOMOVE(cp, len, code, uio) uiomove(cp, len, uio)
94 #endif
95
96 #define PRINET 26 /* interruptible */
97
98 /*
99 * The default read buffer size is patchable.
100 */
101 int bpf_bufsize = BPF_BUFSIZE;
102
103 /*
104 * bpf_iflist is the list of interfaces; each corresponds to an ifnet
105 * bpf_dtab holds the descriptors, indexed by minor device #
106 */
107 struct bpf_if *bpf_iflist;
108 struct bpf_d bpf_dtab[NBPFILTER];
109
110 #if BSD >= 199207 || NetBSD0_9 >= 2
111 /*
112 * bpfilterattach() is called at boot time in new systems. We do
113 * nothing here since old systems will not call this.
114 */
115 /* ARGSUSED */
116 void
117 bpfilterattach(n)
118 int n;
119 {
120 }
121 #endif
122
123 static int bpf_allocbufs __P((struct bpf_d *));
124 static int bpf_allocbufs __P((struct bpf_d *));
125 static void bpf_freed __P((struct bpf_d *));
126 static void bpf_freed __P((struct bpf_d *));
127 static void bpf_ifname __P((struct ifnet *, struct ifreq *));
128 static void bpf_ifname __P((struct ifnet *, struct ifreq *));
129 static void bpf_mcopy __P((const void *, void *, size_t));
130 static int bpf_movein __P((struct uio *, int, int,
131 struct mbuf **, struct sockaddr *));
132 static void bpf_attachd __P((struct bpf_d *, struct bpf_if *));
133 static void bpf_detachd __P((struct bpf_d *));
134 static int bpf_setif __P((struct bpf_d *, struct ifreq *));
135 int bpfpoll __P((dev_t, int, struct proc *));
136 static __inline void
137 bpf_wakeup __P((struct bpf_d *));
138 static void catchpacket __P((struct bpf_d *, u_char *, u_int, u_int,
139 void (*)(const void *, void *, size_t)));
140 static void reset_d __P((struct bpf_d *));
141
142 static int
143 bpf_movein(uio, linktype, mtu, mp, sockp)
144 register struct uio *uio;
145 int linktype;
146 int mtu;
147 register struct mbuf **mp;
148 register struct sockaddr *sockp;
149 {
150 struct mbuf *m;
151 int error;
152 int len;
153 int hlen;
154 int align;
155
156 /*
157 * Build a sockaddr based on the data link layer type.
158 * We do this at this level because the ethernet header
159 * is copied directly into the data field of the sockaddr.
160 * In the case of SLIP, there is no header and the packet
161 * is forwarded as is.
162 * Also, we are careful to leave room at the front of the mbuf
163 * for the link level header.
164 */
165 switch (linktype) {
166
167 case DLT_SLIP:
168 sockp->sa_family = AF_INET;
169 hlen = 0;
170 align = 0;
171 break;
172
173 case DLT_PPP:
174 sockp->sa_family = AF_UNSPEC;
175 hlen = 0;
176 align = 0;
177 break;
178
179 case DLT_EN10MB:
180 sockp->sa_family = AF_UNSPEC;
181 /* XXX Would MAXLINKHDR be better? */
182 /* 6(dst)+6(src)+2(type) */
183 hlen = sizeof(struct ether_header);
184 align = 2;
185 break;
186
187 case DLT_ARCNET:
188 sockp->sa_family = AF_UNSPEC;
189 hlen = ARC_HDRLEN;
190 align = 5;
191 break;
192
193 case DLT_FDDI:
194 sockp->sa_family = AF_UNSPEC;
195 /* XXX 4(FORMAC)+6(dst)+6(src)+3(LLC)+5(SNAP) */
196 hlen = 24;
197 align = 0;
198 break;
199
200 case DLT_NULL:
201 sockp->sa_family = AF_UNSPEC;
202 hlen = 0;
203 align = 0;
204 break;
205
206 default:
207 return (EIO);
208 }
209
210 len = uio->uio_resid;
211 /*
212 * If there aren't enough bytes for a link level header or the
213 * packet length exceeds the interface mtu, return an error.
214 */
215 if (len < hlen || len - hlen > mtu)
216 return (EMSGSIZE);
217
218 /*
219 * XXX Avoid complicated buffer chaining ---
220 * bail if it won't fit in a single mbuf.
221 * (Take into account possible alignment bytes)
222 */
223 if ((unsigned)len > MCLBYTES - align)
224 return (EIO);
225
226 MGETHDR(m, M_WAIT, MT_DATA);
227 if (m == 0)
228 return (ENOBUFS);
229 m->m_pkthdr.rcvif = 0;
230 m->m_pkthdr.len = len - hlen;
231 if (len > MHLEN - align) {
232 #if BSD >= 199103
233 MCLGET(m, M_WAIT);
234 if ((m->m_flags & M_EXT) == 0) {
235 #else
236 MCLGET(m);
237 if (m->m_len != MCLBYTES) {
238 #endif
239 error = ENOBUFS;
240 goto bad;
241 }
242 }
243
244 /* Insure the data is properly aligned */
245 if (align > 0) {
246 #if BSD >= 199103
247 m->m_data += align;
248 #else
249 m->m_off += align;
250 #endif
251 m->m_len -= align;
252 }
253
254 error = UIOMOVE(mtod(m, caddr_t), len, UIO_WRITE, uio);
255 if (error)
256 goto bad;
257 if (hlen != 0) {
258 bcopy(mtod(m, caddr_t), sockp->sa_data, hlen);
259 #if BSD >= 199103
260 m->m_data += hlen; /* XXX */
261 #else
262 m->m_off += hlen;
263 #endif
264 len -= hlen;
265 }
266 m->m_len = len;
267 *mp = m;
268 bad:
269 m_freem(m);
270 return (error);
271 }
272
273 /*
274 * Attach file to the bpf interface, i.e. make d listen on bp.
275 * Must be called at splimp.
276 */
277 static void
278 bpf_attachd(d, bp)
279 struct bpf_d *d;
280 struct bpf_if *bp;
281 {
282 /*
283 * Point d at bp, and add d to the interface's list of listeners.
284 * Finally, point the driver's bpf cookie at the interface so
285 * it will divert packets to bpf.
286 */
287 d->bd_bif = bp;
288 d->bd_next = bp->bif_dlist;
289 bp->bif_dlist = d;
290
291 *bp->bif_driverp = bp;
292 }
293
294 /*
295 * Detach a file from its interface.
296 */
297 static void
298 bpf_detachd(d)
299 struct bpf_d *d;
300 {
301 struct bpf_d **p;
302 struct bpf_if *bp;
303
304 bp = d->bd_bif;
305 /*
306 * Check if this descriptor had requested promiscuous mode.
307 * If so, turn it off.
308 */
309 if (d->bd_promisc) {
310 int error;
311
312 d->bd_promisc = 0;
313 /*
314 * Take device out of promiscuous mode. Since we were
315 * able to enter promiscuous mode, we should be able
316 * to turn it off. But we can get an error if
317 * the interface was configured down, so only panic
318 * if we don't get an unexpected error.
319 */
320 error = ifpromisc(bp->bif_ifp, 0);
321 if (error && error != EINVAL)
322 panic("bpf: ifpromisc failed");
323 }
324 /* Remove d from the interface's descriptor list. */
325 p = &bp->bif_dlist;
326 while (*p != d) {
327 p = &(*p)->bd_next;
328 if (*p == 0)
329 panic("bpf_detachd: descriptor not in list");
330 }
331 *p = (*p)->bd_next;
332 if (bp->bif_dlist == 0)
333 /*
334 * Let the driver know that there are no more listeners.
335 */
336 *d->bd_bif->bif_driverp = 0;
337 d->bd_bif = 0;
338 }
339
340
341 /*
342 * Mark a descriptor free by making it point to itself.
343 * This is probably cheaper than marking with a constant since
344 * the address should be in a register anyway.
345 */
346 #define D_ISFREE(d) ((d) == (d)->bd_next)
347 #define D_MARKFREE(d) ((d)->bd_next = (d))
348 #define D_MARKUSED(d) ((d)->bd_next = 0)
349
350 /*
351 * Open ethernet device. Returns ENXIO for illegal minor device number,
352 * EBUSY if file is open by another process.
353 */
354 /* ARGSUSED */
355 int
356 bpfopen(dev, flag, mode, p)
357 dev_t dev;
358 int flag;
359 int mode;
360 struct proc *p;
361 {
362 register struct bpf_d *d;
363
364 if (minor(dev) >= NBPFILTER)
365 return (ENXIO);
366 /*
367 * Each minor can be opened by only one process. If the requested
368 * minor is in use, return EBUSY.
369 */
370 d = &bpf_dtab[minor(dev)];
371 if (!D_ISFREE(d))
372 return (EBUSY);
373
374 /* Mark "free" and do most initialization. */
375 bzero((char *)d, sizeof(*d));
376 d->bd_bufsize = bpf_bufsize;
377
378 return (0);
379 }
380
381 /*
382 * Close the descriptor by detaching it from its interface,
383 * deallocating its buffers, and marking it free.
384 */
385 /* ARGSUSED */
386 int
387 bpfclose(dev, flag, mode, p)
388 dev_t dev;
389 int flag;
390 int mode;
391 struct proc *p;
392 {
393 register struct bpf_d *d = &bpf_dtab[minor(dev)];
394 register int s;
395
396 s = splimp();
397 if (d->bd_bif)
398 bpf_detachd(d);
399 splx(s);
400 bpf_freed(d);
401
402 return (0);
403 }
404
405 /*
406 * Support for SunOS, which does not have tsleep.
407 */
408 #if BSD < 199103
409 static
410 bpf_timeout(arg)
411 caddr_t arg;
412 {
413 struct bpf_d *d = (struct bpf_d *)arg;
414 d->bd_timedout = 1;
415 wakeup(arg);
416 }
417
418 #define BPF_SLEEP(chan, pri, s, t) bpf_sleep((struct bpf_d *)chan)
419
420 int
421 bpf_sleep(d)
422 register struct bpf_d *d;
423 {
424 register int rto = d->bd_rtout;
425 register int st;
426
427 if (rto != 0) {
428 d->bd_timedout = 0;
429 timeout(bpf_timeout, (caddr_t)d, rto);
430 }
431 st = sleep((caddr_t)d, PRINET|PCATCH);
432 if (rto != 0) {
433 if (d->bd_timedout == 0)
434 untimeout(bpf_timeout, (caddr_t)d);
435 else if (st == 0)
436 return EWOULDBLOCK;
437 }
438 return (st != 0) ? EINTR : 0;
439 }
440 #else
441 #define BPF_SLEEP tsleep
442 #endif
443
444 /*
445 * Rotate the packet buffers in descriptor d. Move the store buffer
446 * into the hold slot, and the free buffer into the store slot.
447 * Zero the length of the new store buffer.
448 */
449 #define ROTATE_BUFFERS(d) \
450 (d)->bd_hbuf = (d)->bd_sbuf; \
451 (d)->bd_hlen = (d)->bd_slen; \
452 (d)->bd_sbuf = (d)->bd_fbuf; \
453 (d)->bd_slen = 0; \
454 (d)->bd_fbuf = 0;
455 /*
456 * bpfread - read next chunk of packets from buffers
457 */
458 int
459 bpfread(dev, uio, ioflag)
460 dev_t dev;
461 register struct uio *uio;
462 int ioflag;
463 {
464 register struct bpf_d *d = &bpf_dtab[minor(dev)];
465 int error;
466 int s;
467
468 /*
469 * Restrict application to use a buffer the same size as
470 * as kernel buffers.
471 */
472 if (uio->uio_resid != d->bd_bufsize)
473 return (EINVAL);
474
475 s = splimp();
476 /*
477 * If the hold buffer is empty, then do a timed sleep, which
478 * ends when the timeout expires or when enough packets
479 * have arrived to fill the store buffer.
480 */
481 while (d->bd_hbuf == 0) {
482 if (d->bd_immediate) {
483 if (d->bd_slen == 0) {
484 splx(s);
485 return (EWOULDBLOCK);
486 }
487 /*
488 * A packet(s) either arrived since the previous
489 * read or arrived while we were asleep.
490 * Rotate the buffers and return what's here.
491 */
492 ROTATE_BUFFERS(d);
493 break;
494 }
495 if (d->bd_rtout != -1)
496 error = BPF_SLEEP((caddr_t)d, PRINET|PCATCH, "bpf",
497 d->bd_rtout);
498 else
499 error = EWOULDBLOCK; /* User requested non-blocking I/O */
500 if (error == EINTR || error == ERESTART) {
501 splx(s);
502 return (error);
503 }
504 if (error == EWOULDBLOCK) {
505 /*
506 * On a timeout, return what's in the buffer,
507 * which may be nothing. If there is something
508 * in the store buffer, we can rotate the buffers.
509 */
510 if (d->bd_hbuf)
511 /*
512 * We filled up the buffer in between
513 * getting the timeout and arriving
514 * here, so we don't need to rotate.
515 */
516 break;
517
518 if (d->bd_slen == 0) {
519 splx(s);
520 return (0);
521 }
522 ROTATE_BUFFERS(d);
523 break;
524 }
525 if (error != 0)
526 goto done;
527 }
528 /*
529 * At this point, we know we have something in the hold slot.
530 */
531 splx(s);
532
533 /*
534 * Move data from hold buffer into user space.
535 * We know the entire buffer is transferred since
536 * we checked above that the read buffer is bpf_bufsize bytes.
537 */
538 error = UIOMOVE(d->bd_hbuf, d->bd_hlen, UIO_READ, uio);
539
540 s = splimp();
541 d->bd_fbuf = d->bd_hbuf;
542 d->bd_hbuf = 0;
543 d->bd_hlen = 0;
544 done:
545 splx(s);
546 return (error);
547 }
548
549
550 /*
551 * If there are processes sleeping on this descriptor, wake them up.
552 */
553 static __inline void
554 bpf_wakeup(d)
555 register struct bpf_d *d;
556 {
557 struct proc *p;
558
559 wakeup((caddr_t)d);
560 if (d->bd_async)
561 if (d->bd_pgid > 0)
562 gsignal (d->bd_pgid, SIGIO);
563 else if ((p = pfind (-d->bd_pgid)) != NULL)
564 psignal (p, SIGIO);
565
566 #if BSD >= 199103
567 selwakeup(&d->bd_sel);
568 /* XXX */
569 d->bd_sel.si_pid = 0;
570 #else
571 if (d->bd_selproc) {
572 selwakeup(d->bd_selproc, (int)d->bd_selcoll);
573 d->bd_selcoll = 0;
574 d->bd_selproc = 0;
575 }
576 #endif
577 }
578
579 int
580 bpfwrite(dev, uio, ioflag)
581 dev_t dev;
582 struct uio *uio;
583 int ioflag;
584 {
585 register struct bpf_d *d = &bpf_dtab[minor(dev)];
586 struct ifnet *ifp;
587 struct mbuf *m;
588 int error, s;
589 static struct sockaddr dst;
590
591 if (d->bd_bif == 0)
592 return (ENXIO);
593
594 ifp = d->bd_bif->bif_ifp;
595
596 if (uio->uio_resid == 0)
597 return (0);
598
599 error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, ifp->if_mtu, &m, &dst);
600 if (error)
601 return (error);
602
603 if (m->m_pkthdr.len > ifp->if_mtu)
604 return (EMSGSIZE);
605
606 s = splsoftnet();
607 #if BSD >= 199103
608 error = (*ifp->if_output)(ifp, m, &dst, (struct rtentry *)0);
609 #else
610 error = (*ifp->if_output)(ifp, m, &dst);
611 #endif
612 splx(s);
613 /*
614 * The driver frees the mbuf.
615 */
616 return (error);
617 }
618
619 /*
620 * Reset a descriptor by flushing its packet buffer and clearing the
621 * receive and drop counts. Should be called at splimp.
622 */
623 static void
624 reset_d(d)
625 struct bpf_d *d;
626 {
627 if (d->bd_hbuf) {
628 /* Free the hold buffer. */
629 d->bd_fbuf = d->bd_hbuf;
630 d->bd_hbuf = 0;
631 }
632 d->bd_slen = 0;
633 d->bd_hlen = 0;
634 d->bd_rcount = 0;
635 d->bd_dcount = 0;
636 }
637
638 #ifdef BPF_KERN_FILTER
639 extern struct bpf_insn *bpf_tcp_filter;
640 extern struct bpf_insn *bpf_udp_filter;
641 #endif
642
643 /*
644 * FIONREAD Check for read packet available.
645 * BIOCGBLEN Get buffer len [for read()].
646 * BIOCSETF Set ethernet read filter.
647 * BIOCFLUSH Flush read packet buffer.
648 * BIOCPROMISC Put interface into promiscuous mode.
649 * BIOCGDLT Get link layer type.
650 * BIOCGETIF Get interface name.
651 * BIOCSETIF Set interface.
652 * BIOCSRTIMEOUT Set read timeout.
653 * BIOCGRTIMEOUT Get read timeout.
654 * BIOCGSTATS Get packet stats.
655 * BIOCIMMEDIATE Set immediate mode.
656 * BIOCVERSION Get filter language version.
657 */
658 /* ARGSUSED */
659 int
660 bpfioctl(dev, cmd, addr, flag, p)
661 dev_t dev;
662 u_long cmd;
663 caddr_t addr;
664 int flag;
665 struct proc *p;
666 {
667 register struct bpf_d *d = &bpf_dtab[minor(dev)];
668 int s, error = 0;
669 #ifdef BPF_KERN_FILTER
670 register struct bpf_insn **p;
671 #endif
672
673 switch (cmd) {
674
675 default:
676 error = EINVAL;
677 break;
678
679 /*
680 * Check for read packet available.
681 */
682 case FIONREAD:
683 {
684 int n;
685
686 s = splimp();
687 n = d->bd_slen;
688 if (d->bd_hbuf)
689 n += d->bd_hlen;
690 splx(s);
691
692 *(int *)addr = n;
693 break;
694 }
695
696 /*
697 * Get buffer len [for read()].
698 */
699 case BIOCGBLEN:
700 *(u_int *)addr = d->bd_bufsize;
701 break;
702
703 /*
704 * Set buffer length.
705 */
706 case BIOCSBLEN:
707 #if BSD < 199103
708 error = EINVAL;
709 #else
710 if (d->bd_bif != 0)
711 error = EINVAL;
712 else {
713 register u_int size = *(u_int *)addr;
714
715 if (size > BPF_MAXBUFSIZE)
716 *(u_int *)addr = size = BPF_MAXBUFSIZE;
717 else if (size < BPF_MINBUFSIZE)
718 *(u_int *)addr = size = BPF_MINBUFSIZE;
719 d->bd_bufsize = size;
720 }
721 #endif
722 break;
723
724 /*
725 * Set link layer read filter.
726 */
727 case BIOCSETF:
728 error = bpf_setf(d, (struct bpf_program *)addr);
729 break;
730
731 #ifdef BPF_KERN_FILTER
732 /*
733 * Set TCP or UDP reject filter.
734 */
735 case BIOCSTCPF:
736 case BIOCSUDPF:
737 if (!suser()) {
738 error = EPERM;
739 break;
740 }
741
742 /* Validate and store filter */
743 error = bpf_setf(d, (struct bpf_program *)addr);
744
745 /* Free possible old filter */
746 if (cmd == BIOCSTCPF)
747 p = &bpf_tcp_filter;
748 else
749 p = &bpf_udp_filter;
750 if (*p != NULL)
751 free((caddr_t)*p, M_DEVBUF);
752
753 /* Steal new filter (noop if error) */
754 s = splimp();
755 *p = d->bd_filter;
756 d->bd_filter = NULL;
757 splx(s);
758 break;
759 #endif
760
761 /*
762 * Flush read packet buffer.
763 */
764 case BIOCFLUSH:
765 s = splimp();
766 reset_d(d);
767 splx(s);
768 break;
769
770 /*
771 * Put interface into promiscuous mode.
772 */
773 case BIOCPROMISC:
774 if (d->bd_bif == 0) {
775 /*
776 * No interface attached yet.
777 */
778 error = EINVAL;
779 break;
780 }
781 s = splimp();
782 if (d->bd_promisc == 0) {
783 error = ifpromisc(d->bd_bif->bif_ifp, 1);
784 if (error == 0)
785 d->bd_promisc = 1;
786 }
787 splx(s);
788 break;
789
790 /*
791 * Get device parameters.
792 */
793 case BIOCGDLT:
794 if (d->bd_bif == 0)
795 error = EINVAL;
796 else
797 *(u_int *)addr = d->bd_bif->bif_dlt;
798 break;
799
800 /*
801 * Set interface name.
802 */
803 case BIOCGETIF:
804 if (d->bd_bif == 0)
805 error = EINVAL;
806 else
807 bpf_ifname(d->bd_bif->bif_ifp, (struct ifreq *)addr);
808 break;
809
810 /*
811 * Set interface.
812 */
813 case BIOCSETIF:
814 error = bpf_setif(d, (struct ifreq *)addr);
815 break;
816
817 /*
818 * Set read timeout.
819 */
820 case BIOCSRTIMEOUT:
821 {
822 struct timeval *tv = (struct timeval *)addr;
823
824 /* Compute number of ticks. */
825 d->bd_rtout = tv->tv_sec * hz + tv->tv_usec / tick;
826 if ((d->bd_rtout == 0) && (tv->tv_usec != 0))
827 d->bd_rtout = 1;
828 break;
829 }
830
831 /*
832 * Get read timeout.
833 */
834 case BIOCGRTIMEOUT:
835 {
836 struct timeval *tv = (struct timeval *)addr;
837
838 tv->tv_sec = d->bd_rtout / hz;
839 tv->tv_usec = (d->bd_rtout % hz) * tick;
840 break;
841 }
842
843 /*
844 * Get packet stats.
845 */
846 case BIOCGSTATS:
847 {
848 struct bpf_stat *bs = (struct bpf_stat *)addr;
849
850 bs->bs_recv = d->bd_rcount;
851 bs->bs_drop = d->bd_dcount;
852 break;
853 }
854
855 /*
856 * Set immediate mode.
857 */
858 case BIOCIMMEDIATE:
859 d->bd_immediate = *(u_int *)addr;
860 break;
861
862 case BIOCVERSION:
863 {
864 struct bpf_version *bv = (struct bpf_version *)addr;
865
866 bv->bv_major = BPF_MAJOR_VERSION;
867 bv->bv_minor = BPF_MINOR_VERSION;
868 break;
869 }
870
871
872 case FIONBIO: /* Non-blocking I/O */
873 if (*(int *)addr)
874 d->bd_rtout = -1;
875 else
876 d->bd_rtout = 0;
877 break;
878
879 case FIOASYNC: /* Send signal on receive packets */
880 d->bd_async = *(int *)addr;
881 break;
882
883 /*
884 * N.B. ioctl (FIOSETOWN) and fcntl (F_SETOWN) both end up doing
885 * the equivalent of a TIOCSPGRP and hence end up here. *However*
886 * TIOCSPGRP's arg is a process group if it's positive and a process
887 * id if it's negative. This is exactly the opposite of what the
888 * other two functions want! Therefore there is code in ioctl and
889 * fcntl to negate the arg before calling here.
890 */
891 case TIOCSPGRP: /* Process or group to send signals to */
892 d->bd_pgid = *(int *)addr;
893 break;
894
895 case TIOCGPGRP:
896 *(int *)addr = d->bd_pgid;
897 break;
898 }
899 return (error);
900 }
901
902 /*
903 * Set d's packet filter program to fp. If this file already has a filter,
904 * free it and replace it. Returns EINVAL for bogus requests.
905 */
906 int
907 bpf_setf(d, fp)
908 struct bpf_d *d;
909 struct bpf_program *fp;
910 {
911 struct bpf_insn *fcode, *old;
912 u_int flen, size;
913 int s;
914
915 old = d->bd_filter;
916 if (fp->bf_insns == 0) {
917 if (fp->bf_len != 0)
918 return (EINVAL);
919 s = splimp();
920 d->bd_filter = 0;
921 reset_d(d);
922 splx(s);
923 if (old != 0)
924 free((caddr_t)old, M_DEVBUF);
925 return (0);
926 }
927 flen = fp->bf_len;
928 if (flen > BPF_MAXINSNS)
929 return (EINVAL);
930
931 size = flen * sizeof(*fp->bf_insns);
932 fcode = (struct bpf_insn *)malloc(size, M_DEVBUF, M_WAITOK);
933 if (fcode == 0)
934 return (ENOMEM);
935 if (copyin((caddr_t)fp->bf_insns, (caddr_t)fcode, size) == 0 &&
936 bpf_validate(fcode, (int)flen)) {
937 s = splimp();
938 d->bd_filter = fcode;
939 reset_d(d);
940 splx(s);
941 if (old != 0)
942 free((caddr_t)old, M_DEVBUF);
943
944 return (0);
945 }
946 free((caddr_t)fcode, M_DEVBUF);
947 return (EINVAL);
948 }
949
950 /*
951 * Detach a file from its current interface (if attached at all) and attach
952 * to the interface indicated by the name stored in ifr.
953 * Return an errno or 0.
954 */
955 static int
956 bpf_setif(d, ifr)
957 struct bpf_d *d;
958 struct ifreq *ifr;
959 {
960 struct bpf_if *bp;
961 char *cp;
962 int unit_seen, i, s, error;
963
964 /*
965 * Make sure the provided name has a unit number, and default
966 * it to '0' if not specified.
967 * XXX This is ugly ... do this differently?
968 */
969 unit_seen = 0;
970 cp = ifr->ifr_name;
971 cp[sizeof(ifr->ifr_name) - 1] = '\0'; /* sanity */
972 while (*cp++)
973 if (*cp >= '0' && *cp <= '9')
974 unit_seen = 1;
975 if (!unit_seen) {
976 /* Make sure to leave room for the '\0'. */
977 for (i = 0; i < (IFNAMSIZ - 1); ++i) {
978 if ((ifr->ifr_name[i] >= 'a' &&
979 ifr->ifr_name[i] <= 'z') ||
980 (ifr->ifr_name[i] >= 'A' &&
981 ifr->ifr_name[i] <= 'Z'))
982 continue;
983 ifr->ifr_name[i] = '0';
984 }
985 }
986
987 /*
988 * Look through attached interfaces for the named one.
989 */
990 for (bp = bpf_iflist; bp != 0; bp = bp->bif_next) {
991 struct ifnet *ifp = bp->bif_ifp;
992
993 if (ifp == 0 ||
994 strcmp(ifp->if_xname, ifr->ifr_name) != 0)
995 continue;
996 /*
997 * We found the requested interface.
998 * If it's not up, return an error.
999 * Allocate the packet buffers if we need to.
1000 * If we're already attached to requested interface,
1001 * just flush the buffer.
1002 */
1003 if ((ifp->if_flags & IFF_UP) == 0)
1004 return (ENETDOWN);
1005
1006 if (d->bd_sbuf == 0) {
1007 error = bpf_allocbufs(d);
1008 if (error != 0)
1009 return (error);
1010 }
1011 s = splimp();
1012 if (bp != d->bd_bif) {
1013 if (d->bd_bif)
1014 /*
1015 * Detach if attached to something else.
1016 */
1017 bpf_detachd(d);
1018
1019 bpf_attachd(d, bp);
1020 }
1021 reset_d(d);
1022 splx(s);
1023 return (0);
1024 }
1025 /* Not found. */
1026 return (ENXIO);
1027 }
1028
1029 /*
1030 * Copy the interface name to the ifreq.
1031 */
1032 static void
1033 bpf_ifname(ifp, ifr)
1034 struct ifnet *ifp;
1035 struct ifreq *ifr;
1036 {
1037
1038 bcopy(ifp->if_xname, ifr->ifr_name, IFNAMSIZ);
1039 }
1040
1041 /*
1042 * Support for poll() system call
1043 *
1044 * Return true iff the specific operation will not block indefinitely.
1045 * Otherwise, return false but make a note that a selwakeup() must be done.
1046 */
1047 int
1048 bpfpoll(dev, events, p)
1049 register dev_t dev;
1050 int events;
1051 struct proc *p;
1052 {
1053 register struct bpf_d *d = &bpf_dtab[minor(dev)];
1054 int revents = 0;
1055 register int s = splimp();
1056
1057 /*
1058 * An imitation of the FIONREAD ioctl code.
1059 */
1060 if (events & (POLLIN | POLLRDNORM))
1061 if (d->bd_hlen != 0 || (d->bd_immediate && d->bd_slen != 0))
1062 revents |= events & (POLLIN | POLLRDNORM);
1063 else
1064 selrecord(p, &d->bd_sel);
1065
1066 splx(s);
1067 return (revents);
1068 }
1069
1070 /*
1071 * Incoming linkage from device drivers. Process the packet pkt, of length
1072 * pktlen, which is stored in a contiguous buffer. The packet is parsed
1073 * by each process' filter, and if accepted, stashed into the corresponding
1074 * buffer.
1075 */
1076 void
1077 bpf_tap(arg, pkt, pktlen)
1078 caddr_t arg;
1079 register u_char *pkt;
1080 register u_int pktlen;
1081 {
1082 struct bpf_if *bp;
1083 register struct bpf_d *d;
1084 register u_int slen;
1085 /*
1086 * Note that the ipl does not have to be raised at this point.
1087 * The only problem that could arise here is that if two different
1088 * interfaces shared any data. This is not the case.
1089 */
1090 bp = (struct bpf_if *)arg;
1091 for (d = bp->bif_dlist; d != 0; d = d->bd_next) {
1092 ++d->bd_rcount;
1093 slen = bpf_filter(d->bd_filter, pkt, pktlen, pktlen);
1094 if (slen != 0)
1095 catchpacket(d, pkt, pktlen, slen, bcopy);
1096 }
1097 }
1098
1099 /*
1100 * Copy data from an mbuf chain into a buffer. This code is derived
1101 * from m_copydata in sys/uipc_mbuf.c.
1102 */
1103 static void
1104 bpf_mcopy(src_arg, dst_arg, len)
1105 const void *src_arg;
1106 void *dst_arg;
1107 register size_t len;
1108 {
1109 register const struct mbuf *m;
1110 register u_int count;
1111 u_char *dst;
1112
1113 m = src_arg;
1114 dst = dst_arg;
1115 while (len > 0) {
1116 if (m == 0)
1117 panic("bpf_mcopy");
1118 count = min(m->m_len, len);
1119 bcopy(mtod(m, caddr_t), (caddr_t)dst, count);
1120 m = m->m_next;
1121 dst += count;
1122 len -= count;
1123 }
1124 }
1125
1126 /*
1127 * Incoming linkage from device drivers, when packet is in an mbuf chain.
1128 */
1129 void
1130 bpf_mtap(arg, m)
1131 caddr_t arg;
1132 struct mbuf *m;
1133 {
1134 struct bpf_if *bp = (struct bpf_if *)arg;
1135 struct bpf_d *d;
1136 u_int pktlen, slen;
1137 struct mbuf *m0;
1138
1139 pktlen = 0;
1140 for (m0 = m; m0 != 0; m0 = m0->m_next)
1141 pktlen += m0->m_len;
1142
1143 for (d = bp->bif_dlist; d != 0; d = d->bd_next) {
1144 ++d->bd_rcount;
1145 slen = bpf_filter(d->bd_filter, (u_char *)m, pktlen, 0);
1146 if (slen != 0)
1147 catchpacket(d, (u_char *)m, pktlen, slen, bpf_mcopy);
1148 }
1149 }
1150
1151 /*
1152 * Move the packet data from interface memory (pkt) into the
1153 * store buffer. Return 1 if it's time to wakeup a listener (buffer full),
1154 * otherwise 0. "copy" is the routine called to do the actual data
1155 * transfer. bcopy is passed in to copy contiguous chunks, while
1156 * bpf_mcopy is passed in to copy mbuf chains. In the latter case,
1157 * pkt is really an mbuf.
1158 */
1159 static void
1160 catchpacket(d, pkt, pktlen, snaplen, cpfn)
1161 register struct bpf_d *d;
1162 register u_char *pkt;
1163 register u_int pktlen, snaplen;
1164 register void (*cpfn) __P((const void *, void *, size_t));
1165 {
1166 register struct bpf_hdr *hp;
1167 register int totlen, curlen;
1168 register int hdrlen = d->bd_bif->bif_hdrlen;
1169 /*
1170 * Figure out how many bytes to move. If the packet is
1171 * greater or equal to the snapshot length, transfer that
1172 * much. Otherwise, transfer the whole packet (unless
1173 * we hit the buffer size limit).
1174 */
1175 totlen = hdrlen + min(snaplen, pktlen);
1176 if (totlen > d->bd_bufsize)
1177 totlen = d->bd_bufsize;
1178
1179 /*
1180 * Round up the end of the previous packet to the next longword.
1181 */
1182 curlen = BPF_WORDALIGN(d->bd_slen);
1183 if (curlen + totlen > d->bd_bufsize) {
1184 /*
1185 * This packet will overflow the storage buffer.
1186 * Rotate the buffers if we can, then wakeup any
1187 * pending reads.
1188 */
1189 if (d->bd_fbuf == 0) {
1190 /*
1191 * We haven't completed the previous read yet,
1192 * so drop the packet.
1193 */
1194 ++d->bd_dcount;
1195 return;
1196 }
1197 ROTATE_BUFFERS(d);
1198 bpf_wakeup(d);
1199 curlen = 0;
1200 }
1201 else if (d->bd_immediate)
1202 /*
1203 * Immediate mode is set. A packet arrived so any
1204 * reads should be woken up.
1205 */
1206 bpf_wakeup(d);
1207
1208 /*
1209 * Append the bpf header.
1210 */
1211 hp = (struct bpf_hdr *)(d->bd_sbuf + curlen);
1212 #if BSD >= 199103
1213 microtime(&hp->bh_tstamp);
1214 #elif defined(sun)
1215 uniqtime(&hp->bh_tstamp);
1216 #else
1217 hp->bh_tstamp = time;
1218 #endif
1219 hp->bh_datalen = pktlen;
1220 hp->bh_hdrlen = hdrlen;
1221 /*
1222 * Copy the packet data into the store buffer and update its length.
1223 */
1224 (*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen));
1225 d->bd_slen = curlen + totlen;
1226 }
1227
1228 /*
1229 * Initialize all nonzero fields of a descriptor.
1230 */
1231 static int
1232 bpf_allocbufs(d)
1233 register struct bpf_d *d;
1234 {
1235 d->bd_fbuf = (caddr_t)malloc(d->bd_bufsize, M_DEVBUF, M_WAITOK);
1236 if (d->bd_fbuf == 0)
1237 return (ENOBUFS);
1238
1239 d->bd_sbuf = (caddr_t)malloc(d->bd_bufsize, M_DEVBUF, M_WAITOK);
1240 if (d->bd_sbuf == 0) {
1241 free(d->bd_fbuf, M_DEVBUF);
1242 return (ENOBUFS);
1243 }
1244 d->bd_slen = 0;
1245 d->bd_hlen = 0;
1246 return (0);
1247 }
1248
1249 /*
1250 * Free buffers currently in use by a descriptor.
1251 * Called on close.
1252 */
1253 static void
1254 bpf_freed(d)
1255 register struct bpf_d *d;
1256 {
1257 /*
1258 * We don't need to lock out interrupts since this descriptor has
1259 * been detached from its interface and it yet hasn't been marked
1260 * free.
1261 */
1262 if (d->bd_sbuf != 0) {
1263 free(d->bd_sbuf, M_DEVBUF);
1264 if (d->bd_hbuf != 0)
1265 free(d->bd_hbuf, M_DEVBUF);
1266 if (d->bd_fbuf != 0)
1267 free(d->bd_fbuf, M_DEVBUF);
1268 }
1269 if (d->bd_filter)
1270 free((caddr_t)d->bd_filter, M_DEVBUF);
1271
1272 D_MARKFREE(d);
1273 }
1274
1275 /*
1276 * Attach an interface to bpf. driverp is a pointer to a (struct bpf_if *)
1277 * in the driver's softc; dlt is the link layer type; hdrlen is the fixed
1278 * size of the link header (variable length headers not yet supported).
1279 */
1280 void
1281 bpfattach(driverp, ifp, dlt, hdrlen)
1282 caddr_t *driverp;
1283 struct ifnet *ifp;
1284 u_int dlt, hdrlen;
1285 {
1286 struct bpf_if *bp;
1287 int i;
1288 #if BSD < 199103
1289 static struct bpf_if bpf_ifs[NBPFILTER];
1290 static int bpfifno;
1291
1292 bp = (bpfifno < NBPFILTER) ? &bpf_ifs[bpfifno++] : 0;
1293 #else
1294 bp = (struct bpf_if *)malloc(sizeof(*bp), M_DEVBUF, M_DONTWAIT);
1295 #endif
1296 if (bp == 0)
1297 panic("bpfattach");
1298
1299 bp->bif_dlist = 0;
1300 bp->bif_driverp = (struct bpf_if **)driverp;
1301 bp->bif_ifp = ifp;
1302 bp->bif_dlt = dlt;
1303
1304 bp->bif_next = bpf_iflist;
1305 bpf_iflist = bp;
1306
1307 *bp->bif_driverp = 0;
1308
1309 /*
1310 * Compute the length of the bpf header. This is not necessarily
1311 * equal to SIZEOF_BPF_HDR because we want to insert spacing such
1312 * that the network layer header begins on a longword boundary (for
1313 * performance reasons and to alleviate alignment restrictions).
1314 */
1315 bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen;
1316
1317 /*
1318 * Mark all the descriptors free if this hasn't been done.
1319 */
1320 if (!D_ISFREE(&bpf_dtab[0]))
1321 for (i = 0; i < NBPFILTER; ++i)
1322 D_MARKFREE(&bpf_dtab[i]);
1323
1324 #if 0
1325 printf("bpf: %s attached\n", ifp->if_xname);
1326 #endif
1327 }
1328
1329 #if BSD >= 199103
1330 /* XXX This routine belongs in net/if.c. */
1331 /*
1332 * Set/clear promiscuous mode on interface ifp based on the truth value
1333 * of pswitch. The calls are reference counted so that only the first
1334 * "on" request actually has an effect, as does the final "off" request.
1335 * Results are undefined if the "off" and "on" requests are not matched.
1336 */
1337 int
1338 ifpromisc(ifp, pswitch)
1339 register struct ifnet *ifp;
1340 register int pswitch;
1341 {
1342 register int pcount, ret;
1343 register short flags;
1344 struct ifreq ifr;
1345
1346 pcount = ifp->if_pcount;
1347 flags = ifp->if_flags;
1348 if (pswitch) {
1349 /*
1350 * If the device is not configured up, we cannot put it in
1351 * promiscuous mode.
1352 */
1353 if ((ifp->if_flags & IFF_UP) == 0)
1354 return (ENETDOWN);
1355 if (ifp->if_pcount++ != 0)
1356 return (0);
1357 ifp->if_flags |= IFF_PROMISC;
1358 } else {
1359 if (--ifp->if_pcount > 0)
1360 return (0);
1361 ifp->if_flags &= ~IFF_PROMISC;
1362 /*
1363 * If the device is not configured up, we should not need to
1364 * turn off promiscuous mode (device should have turned it
1365 * off when interface went down; and will look at IFF_PROMISC
1366 * again next time interface comes up).
1367 */
1368 if ((ifp->if_flags & IFF_UP) == 0)
1369 return (0);
1370 }
1371 bzero((caddr_t)&ifr, sizeof(ifr));
1372 ifr.ifr_flags = ifp->if_flags;
1373 ret = (*ifp->if_ioctl)(ifp, SIOCSIFFLAGS, (caddr_t)&ifr);
1374 /* Restore interface state if not successful */
1375 if (ret != 0) {
1376 ifp->if_pcount = pcount;
1377 ifp->if_flags = flags;
1378 }
1379 return (ret);
1380 }
1381 #endif
1382
1383 #if BSD < 199103
1384 /*
1385 * Allocate some memory for bpf. This is temporary SunOS support, and
1386 * is admittedly a hack.
1387 * If resources unavailable, return 0.
1388 */
1389 static caddr_t
1390 bpf_alloc(size, canwait)
1391 register int size;
1392 register int canwait;
1393 {
1394 register struct mbuf *m;
1395
1396 if ((unsigned)size > (MCLBYTES-8))
1397 return 0;
1398
1399 MGET(m, canwait, MT_DATA);
1400 if (m == 0)
1401 return 0;
1402 if ((unsigned)size > (MLEN-8)) {
1403 MCLGET(m);
1404 if (m->m_len != MCLBYTES) {
1405 m_freem(m);
1406 return 0;
1407 }
1408 }
1409 *mtod(m, struct mbuf **) = m;
1410 return mtod(m, caddr_t) + 8;
1411 }
1412 #endif
1413