bpf.c revision 1.12 1 /*
2 * Copyright (c) 1990, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from the Stanford/CMU enet packet filter,
6 * (net/enet.c) distributed as part of 4.3BSD, and code contributed
7 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
8 * Berkeley Laboratory.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * from: @(#)bpf.c 8.2 (Berkeley) 3/28/94
39 * $Id: bpf.c,v 1.12 1994/05/13 06:02:14 mycroft Exp $
40 */
41
42 #include "bpfilter.h"
43
44 #ifndef __GNUC__
45 #define inline
46 #else
47 #define inline __inline
48 #endif
49
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/mbuf.h>
53 #include <sys/buf.h>
54 #include <sys/time.h>
55 #include <sys/proc.h>
56 #include <sys/user.h>
57 #include <sys/ioctl.h>
58 #include <sys/map.h>
59
60 #include <sys/file.h>
61 #if defined(sparc) && BSD < 199103
62 #include <sys/stream.h>
63 #endif
64 #include <sys/tty.h>
65 #include <sys/uio.h>
66
67 #include <sys/protosw.h>
68 #include <sys/socket.h>
69 #include <net/if.h>
70
71 #include <net/bpf.h>
72 #include <net/bpfdesc.h>
73
74 #include <sys/errno.h>
75
76 #include <netinet/in.h>
77 #include <netinet/if_ether.h>
78 #include <sys/kernel.h>
79
80 /*
81 * Older BSDs don't have kernel malloc.
82 */
83 #if BSD < 199103
84 extern bcopy();
85 static caddr_t bpf_alloc();
86 #include <net/bpf_compat.h>
87 #define BPF_BUFSIZE (MCLBYTES-8)
88 #define UIOMOVE(cp, len, code, uio) uiomove(cp, len, code, uio)
89 #else
90 #define BPF_BUFSIZE 4096
91 #define UIOMOVE(cp, len, code, uio) uiomove(cp, len, uio)
92 #endif
93
94 #define PRINET 26 /* interruptible */
95
96 /*
97 * The default read buffer size is patchable.
98 */
99 int bpf_bufsize = BPF_BUFSIZE;
100
101 /*
102 * bpf_iflist is the list of interfaces; each corresponds to an ifnet
103 * bpf_dtab holds the descriptors, indexed by minor device #
104 */
105 struct bpf_if *bpf_iflist;
106 struct bpf_d bpf_dtab[NBPFILTER];
107
108 #if BSD >= 199207 || NetBSD0_9 >= 2
109 /*
110 * bpfilterattach() is called at boot time in new systems. We do
111 * nothing here since old systems will not call this.
112 */
113 /* ARGSUSED */
114 void
115 bpfilterattach(n)
116 int n;
117 {
118 }
119 #endif
120
121 static int bpf_allocbufs __P((struct bpf_d *));
122 static int bpf_allocbufs __P((struct bpf_d *));
123 static void bpf_freed __P((struct bpf_d *));
124 static void bpf_freed __P((struct bpf_d *));
125 static void bpf_ifname __P((struct ifnet *, struct ifreq *));
126 static void bpf_ifname __P((struct ifnet *, struct ifreq *));
127 static void bpf_mcopy __P((const void *, void *, u_int));
128 static int bpf_movein __P((struct uio *, int,
129 struct mbuf **, struct sockaddr *, int *));
130 static int bpf_setif __P((struct bpf_d *, struct ifreq *));
131 static int bpf_setif __P((struct bpf_d *, struct ifreq *));
132 static inline void
133 bpf_wakeup __P((struct bpf_d *));
134 static void catchpacket __P((struct bpf_d *, u_char *, u_int,
135 u_int, void (*)(const void *, void *, u_int)));
136 static void reset_d __P((struct bpf_d *));
137
138 static int
139 bpf_movein(uio, linktype, mp, sockp, datlen)
140 register struct uio *uio;
141 int linktype, *datlen;
142 register struct mbuf **mp;
143 register struct sockaddr *sockp;
144 {
145 struct mbuf *m;
146 int error;
147 int len;
148 int hlen;
149
150 /*
151 * Build a sockaddr based on the data link layer type.
152 * We do this at this level because the ethernet header
153 * is copied directly into the data field of the sockaddr.
154 * In the case of SLIP, there is no header and the packet
155 * is forwarded as is.
156 * Also, we are careful to leave room at the front of the mbuf
157 * for the link level header.
158 */
159 switch (linktype) {
160
161 case DLT_SLIP:
162 sockp->sa_family = AF_INET;
163 hlen = 0;
164 break;
165
166 case DLT_PPP:
167 sockp->sa_family = AF_UNSPEC;
168 hlen = 0;
169 break;
170
171 case DLT_EN10MB:
172 sockp->sa_family = AF_UNSPEC;
173 /* XXX Would MAXLINKHDR be better? */
174 hlen = sizeof(struct ether_header);
175 break;
176
177 case DLT_FDDI:
178 sockp->sa_family = AF_UNSPEC;
179 /* XXX 4(FORMAC)+6(dst)+6(src)+3(LLC)+5(SNAP) */
180 hlen = 24;
181 break;
182
183 case DLT_NULL:
184 sockp->sa_family = AF_UNSPEC;
185 hlen = 0;
186 break;
187
188 default:
189 return (EIO);
190 }
191
192 len = uio->uio_resid;
193 *datlen = len - hlen;
194 if ((unsigned)len > MCLBYTES)
195 return (EIO);
196
197 MGET(m, M_WAIT, MT_DATA);
198 if (m == 0)
199 return (ENOBUFS);
200 if (len > MLEN) {
201 #if BSD >= 199103
202 MCLGET(m, M_WAIT);
203 if ((m->m_flags & M_EXT) == 0) {
204 #else
205 MCLGET(m);
206 if (m->m_len != MCLBYTES) {
207 #endif
208 error = ENOBUFS;
209 goto bad;
210 }
211 }
212 m->m_len = len;
213 *mp = m;
214 /*
215 * Make room for link header.
216 */
217 if (hlen != 0) {
218 m->m_len -= hlen;
219 #if BSD >= 199103
220 m->m_data += hlen; /* XXX */
221 #else
222 m->m_off += hlen;
223 #endif
224 error = UIOMOVE((caddr_t)sockp->sa_data, hlen, UIO_WRITE, uio);
225 if (error)
226 goto bad;
227 }
228 error = UIOMOVE(mtod(m, caddr_t), len - hlen, UIO_WRITE, uio);
229 if (!error)
230 return (0);
231 bad:
232 m_freem(m);
233 return (error);
234 }
235
236 /*
237 * Attach file to the bpf interface, i.e. make d listen on bp.
238 * Must be called at splimp.
239 */
240 static void
241 bpf_attachd(d, bp)
242 struct bpf_d *d;
243 struct bpf_if *bp;
244 {
245 /*
246 * Point d at bp, and add d to the interface's list of listeners.
247 * Finally, point the driver's bpf cookie at the interface so
248 * it will divert packets to bpf.
249 */
250 d->bd_bif = bp;
251 d->bd_next = bp->bif_dlist;
252 bp->bif_dlist = d;
253
254 *bp->bif_driverp = bp;
255 }
256
257 /*
258 * Detach a file from its interface.
259 */
260 static void
261 bpf_detachd(d)
262 struct bpf_d *d;
263 {
264 struct bpf_d **p;
265 struct bpf_if *bp;
266
267 bp = d->bd_bif;
268 /*
269 * Check if this descriptor had requested promiscuous mode.
270 * If so, turn it off.
271 */
272 if (d->bd_promisc) {
273 d->bd_promisc = 0;
274 if (ifpromisc(bp->bif_ifp, 0))
275 /*
276 * Something is really wrong if we were able to put
277 * the driver into promiscuous mode, but can't
278 * take it out.
279 */
280 panic("bpf: ifpromisc failed");
281 }
282 /* Remove d from the interface's descriptor list. */
283 p = &bp->bif_dlist;
284 while (*p != d) {
285 p = &(*p)->bd_next;
286 if (*p == 0)
287 panic("bpf_detachd: descriptor not in list");
288 }
289 *p = (*p)->bd_next;
290 if (bp->bif_dlist == 0)
291 /*
292 * Let the driver know that there are no more listeners.
293 */
294 *d->bd_bif->bif_driverp = 0;
295 d->bd_bif = 0;
296 }
297
298
299 /*
300 * Mark a descriptor free by making it point to itself.
301 * This is probably cheaper than marking with a constant since
302 * the address should be in a register anyway.
303 */
304 #define D_ISFREE(d) ((d) == (d)->bd_next)
305 #define D_MARKFREE(d) ((d)->bd_next = (d))
306 #define D_MARKUSED(d) ((d)->bd_next = 0)
307
308 /*
309 * Open ethernet device. Returns ENXIO for illegal minor device number,
310 * EBUSY if file is open by another process.
311 */
312 /* ARGSUSED */
313 int
314 bpfopen(dev, flag)
315 dev_t dev;
316 int flag;
317 {
318 register struct bpf_d *d;
319
320 if (minor(dev) >= NBPFILTER)
321 return (ENXIO);
322 /*
323 * Each minor can be opened by only one process. If the requested
324 * minor is in use, return EBUSY.
325 */
326 d = &bpf_dtab[minor(dev)];
327 if (!D_ISFREE(d))
328 return (EBUSY);
329
330 /* Mark "free" and do most initialization. */
331 bzero((char *)d, sizeof(*d));
332 d->bd_bufsize = bpf_bufsize;
333
334 return (0);
335 }
336
337 /*
338 * Close the descriptor by detaching it from its interface,
339 * deallocating its buffers, and marking it free.
340 */
341 /* ARGSUSED */
342 int
343 bpfclose(dev, flag)
344 dev_t dev;
345 int flag;
346 {
347 register struct bpf_d *d = &bpf_dtab[minor(dev)];
348 register int s;
349
350 s = splimp();
351 if (d->bd_bif)
352 bpf_detachd(d);
353 splx(s);
354 bpf_freed(d);
355
356 return (0);
357 }
358
359 /*
360 * Support for SunOS, which does not have tsleep.
361 */
362 #if BSD < 199103
363 static
364 bpf_timeout(arg)
365 caddr_t arg;
366 {
367 struct bpf_d *d = (struct bpf_d *)arg;
368 d->bd_timedout = 1;
369 wakeup(arg);
370 }
371
372 #define BPF_SLEEP(chan, pri, s, t) bpf_sleep((struct bpf_d *)chan)
373
374 int
375 bpf_sleep(d)
376 register struct bpf_d *d;
377 {
378 register int rto = d->bd_rtout;
379 register int st;
380
381 if (rto != 0) {
382 d->bd_timedout = 0;
383 timeout(bpf_timeout, (caddr_t)d, rto);
384 }
385 st = sleep((caddr_t)d, PRINET|PCATCH);
386 if (rto != 0) {
387 if (d->bd_timedout == 0)
388 untimeout(bpf_timeout, (caddr_t)d);
389 else if (st == 0)
390 return EWOULDBLOCK;
391 }
392 return (st != 0) ? EINTR : 0;
393 }
394 #else
395 #define BPF_SLEEP tsleep
396 #endif
397
398 /*
399 * Rotate the packet buffers in descriptor d. Move the store buffer
400 * into the hold slot, and the free buffer into the store slot.
401 * Zero the length of the new store buffer.
402 */
403 #define ROTATE_BUFFERS(d) \
404 (d)->bd_hbuf = (d)->bd_sbuf; \
405 (d)->bd_hlen = (d)->bd_slen; \
406 (d)->bd_sbuf = (d)->bd_fbuf; \
407 (d)->bd_slen = 0; \
408 (d)->bd_fbuf = 0;
409 /*
410 * bpfread - read next chunk of packets from buffers
411 */
412 int
413 bpfread(dev, uio)
414 dev_t dev;
415 register struct uio *uio;
416 {
417 register struct bpf_d *d = &bpf_dtab[minor(dev)];
418 int error;
419 int s;
420
421 /*
422 * Restrict application to use a buffer the same size as
423 * as kernel buffers.
424 */
425 if (uio->uio_resid != d->bd_bufsize)
426 return (EINVAL);
427
428 s = splimp();
429 /*
430 * If the hold buffer is empty, then do a timed sleep, which
431 * ends when the timeout expires or when enough packets
432 * have arrived to fill the store buffer.
433 */
434 while (d->bd_hbuf == 0) {
435 if (d->bd_immediate && d->bd_slen != 0) {
436 /*
437 * A packet(s) either arrived since the previous
438 * read or arrived while we were asleep.
439 * Rotate the buffers and return what's here.
440 */
441 ROTATE_BUFFERS(d);
442 break;
443 }
444 error = BPF_SLEEP((caddr_t)d, PRINET|PCATCH, "bpf",
445 d->bd_rtout);
446 if (error == EINTR || error == ERESTART) {
447 splx(s);
448 return (error);
449 }
450 if (error == EWOULDBLOCK) {
451 /*
452 * On a timeout, return what's in the buffer,
453 * which may be nothing. If there is something
454 * in the store buffer, we can rotate the buffers.
455 */
456 if (d->bd_hbuf)
457 /*
458 * We filled up the buffer in between
459 * getting the timeout and arriving
460 * here, so we don't need to rotate.
461 */
462 break;
463
464 if (d->bd_slen == 0) {
465 splx(s);
466 return (0);
467 }
468 ROTATE_BUFFERS(d);
469 break;
470 }
471 }
472 /*
473 * At this point, we know we have something in the hold slot.
474 */
475 splx(s);
476
477 /*
478 * Move data from hold buffer into user space.
479 * We know the entire buffer is transferred since
480 * we checked above that the read buffer is bpf_bufsize bytes.
481 */
482 error = UIOMOVE(d->bd_hbuf, d->bd_hlen, UIO_READ, uio);
483
484 s = splimp();
485 d->bd_fbuf = d->bd_hbuf;
486 d->bd_hbuf = 0;
487 d->bd_hlen = 0;
488 splx(s);
489
490 return (error);
491 }
492
493
494 /*
495 * If there are processes sleeping on this descriptor, wake them up.
496 */
497 static inline void
498 bpf_wakeup(d)
499 register struct bpf_d *d;
500 {
501 wakeup((caddr_t)d);
502 #if BSD >= 199103
503 selwakeup(&d->bd_sel);
504 /* XXX */
505 d->bd_sel.si_pid = 0;
506 #else
507 if (d->bd_selproc) {
508 selwakeup(d->bd_selproc, (int)d->bd_selcoll);
509 d->bd_selcoll = 0;
510 d->bd_selproc = 0;
511 }
512 #endif
513 }
514
515 int
516 bpfwrite(dev, uio)
517 dev_t dev;
518 struct uio *uio;
519 {
520 register struct bpf_d *d = &bpf_dtab[minor(dev)];
521 struct ifnet *ifp;
522 struct mbuf *m;
523 int error, s;
524 static struct sockaddr dst;
525 int datlen;
526
527 if (d->bd_bif == 0)
528 return (ENXIO);
529
530 ifp = d->bd_bif->bif_ifp;
531
532 if (uio->uio_resid == 0)
533 return (0);
534
535 error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, &m, &dst, &datlen);
536 if (error)
537 return (error);
538
539 if (datlen > ifp->if_mtu)
540 return (EMSGSIZE);
541
542 s = splnet();
543 #if BSD >= 199103
544 error = (*ifp->if_output)(ifp, m, &dst, (struct rtentry *)0);
545 #else
546 error = (*ifp->if_output)(ifp, m, &dst);
547 #endif
548 splx(s);
549 /*
550 * The driver frees the mbuf.
551 */
552 return (error);
553 }
554
555 /*
556 * Reset a descriptor by flushing its packet buffer and clearing the
557 * receive and drop counts. Should be called at splimp.
558 */
559 static void
560 reset_d(d)
561 struct bpf_d *d;
562 {
563 if (d->bd_hbuf) {
564 /* Free the hold buffer. */
565 d->bd_fbuf = d->bd_hbuf;
566 d->bd_hbuf = 0;
567 }
568 d->bd_slen = 0;
569 d->bd_hlen = 0;
570 d->bd_rcount = 0;
571 d->bd_dcount = 0;
572 }
573
574 /*
575 * FIONREAD Check for read packet available.
576 * SIOCGIFADDR Get interface address - convenient hook to driver.
577 * BIOCGBLEN Get buffer len [for read()].
578 * BIOCSETF Set ethernet read filter.
579 * BIOCFLUSH Flush read packet buffer.
580 * BIOCPROMISC Put interface into promiscuous mode.
581 * BIOCGDLT Get link layer type.
582 * BIOCGETIF Get interface name.
583 * BIOCSETIF Set interface.
584 * BIOCSRTIMEOUT Set read timeout.
585 * BIOCGRTIMEOUT Get read timeout.
586 * BIOCGSTATS Get packet stats.
587 * BIOCIMMEDIATE Set immediate mode.
588 * BIOCVERSION Get filter language version.
589 */
590 /* ARGSUSED */
591 int
592 bpfioctl(dev, cmd, addr, flag)
593 dev_t dev;
594 int cmd;
595 caddr_t addr;
596 int flag;
597 {
598 register struct bpf_d *d = &bpf_dtab[minor(dev)];
599 int s, error = 0;
600
601 switch (cmd) {
602
603 default:
604 error = EINVAL;
605 break;
606
607 /*
608 * Check for read packet available.
609 */
610 case FIONREAD:
611 {
612 int n;
613
614 s = splimp();
615 n = d->bd_slen;
616 if (d->bd_hbuf)
617 n += d->bd_hlen;
618 splx(s);
619
620 *(int *)addr = n;
621 break;
622 }
623
624 case SIOCGIFADDR:
625 {
626 struct ifnet *ifp;
627
628 if (d->bd_bif == 0)
629 error = EINVAL;
630 else {
631 ifp = d->bd_bif->bif_ifp;
632 error = (*ifp->if_ioctl)(ifp, cmd, addr);
633 }
634 break;
635 }
636
637 /*
638 * Get buffer len [for read()].
639 */
640 case BIOCGBLEN:
641 *(u_int *)addr = d->bd_bufsize;
642 break;
643
644 /*
645 * Set buffer length.
646 */
647 case BIOCSBLEN:
648 #if BSD < 199103
649 error = EINVAL;
650 #else
651 if (d->bd_bif != 0)
652 error = EINVAL;
653 else {
654 register u_int size = *(u_int *)addr;
655
656 if (size > BPF_MAXBUFSIZE)
657 *(u_int *)addr = size = BPF_MAXBUFSIZE;
658 else if (size < BPF_MINBUFSIZE)
659 *(u_int *)addr = size = BPF_MINBUFSIZE;
660 d->bd_bufsize = size;
661 }
662 #endif
663 break;
664
665 /*
666 * Set link layer read filter.
667 */
668 case BIOCSETF:
669 error = bpf_setf(d, (struct bpf_program *)addr);
670 break;
671
672 /*
673 * Flush read packet buffer.
674 */
675 case BIOCFLUSH:
676 s = splimp();
677 reset_d(d);
678 splx(s);
679 break;
680
681 /*
682 * Put interface into promiscuous mode.
683 */
684 case BIOCPROMISC:
685 if (d->bd_bif == 0) {
686 /*
687 * No interface attached yet.
688 */
689 error = EINVAL;
690 break;
691 }
692 s = splimp();
693 if (d->bd_promisc == 0) {
694 error = ifpromisc(d->bd_bif->bif_ifp, 1);
695 if (error == 0)
696 d->bd_promisc = 1;
697 }
698 splx(s);
699 break;
700
701 /*
702 * Get device parameters.
703 */
704 case BIOCGDLT:
705 if (d->bd_bif == 0)
706 error = EINVAL;
707 else
708 *(u_int *)addr = d->bd_bif->bif_dlt;
709 break;
710
711 /*
712 * Set interface name.
713 */
714 case BIOCGETIF:
715 if (d->bd_bif == 0)
716 error = EINVAL;
717 else
718 bpf_ifname(d->bd_bif->bif_ifp, (struct ifreq *)addr);
719 break;
720
721 /*
722 * Set interface.
723 */
724 case BIOCSETIF:
725 error = bpf_setif(d, (struct ifreq *)addr);
726 break;
727
728 /*
729 * Set read timeout.
730 */
731 case BIOCSRTIMEOUT:
732 {
733 struct timeval *tv = (struct timeval *)addr;
734 u_long msec;
735
736 /* Compute number of milliseconds. */
737 msec = tv->tv_sec * 1000 + tv->tv_usec / 1000;
738 /* Scale milliseconds to ticks. Assume hard
739 clock has millisecond or greater resolution
740 (i.e. tick >= 1000). For 10ms hardclock,
741 tick/1000 = 10, so rtout<-msec/10. */
742 d->bd_rtout = msec / (tick / 1000);
743 break;
744 }
745
746 /*
747 * Get read timeout.
748 */
749 case BIOCGRTIMEOUT:
750 {
751 struct timeval *tv = (struct timeval *)addr;
752 u_long msec = d->bd_rtout;
753
754 msec *= tick / 1000;
755 tv->tv_sec = msec / 1000;
756 tv->tv_usec = msec % 1000;
757 break;
758 }
759
760 /*
761 * Get packet stats.
762 */
763 case BIOCGSTATS:
764 {
765 struct bpf_stat *bs = (struct bpf_stat *)addr;
766
767 bs->bs_recv = d->bd_rcount;
768 bs->bs_drop = d->bd_dcount;
769 break;
770 }
771
772 /*
773 * Set immediate mode.
774 */
775 case BIOCIMMEDIATE:
776 d->bd_immediate = *(u_int *)addr;
777 break;
778
779 case BIOCVERSION:
780 {
781 struct bpf_version *bv = (struct bpf_version *)addr;
782
783 bv->bv_major = BPF_MAJOR_VERSION;
784 bv->bv_minor = BPF_MINOR_VERSION;
785 break;
786 }
787 }
788 return (error);
789 }
790
791 /*
792 * Set d's packet filter program to fp. If this file already has a filter,
793 * free it and replace it. Returns EINVAL for bogus requests.
794 */
795 int
796 bpf_setf(d, fp)
797 struct bpf_d *d;
798 struct bpf_program *fp;
799 {
800 struct bpf_insn *fcode, *old;
801 u_int flen, size;
802 int s;
803
804 old = d->bd_filter;
805 if (fp->bf_insns == 0) {
806 if (fp->bf_len != 0)
807 return (EINVAL);
808 s = splimp();
809 d->bd_filter = 0;
810 reset_d(d);
811 splx(s);
812 if (old != 0)
813 free((caddr_t)old, M_DEVBUF);
814 return (0);
815 }
816 flen = fp->bf_len;
817 if (flen > BPF_MAXINSNS)
818 return (EINVAL);
819
820 size = flen * sizeof(*fp->bf_insns);
821 fcode = (struct bpf_insn *)malloc(size, M_DEVBUF, M_WAITOK);
822 if (copyin((caddr_t)fp->bf_insns, (caddr_t)fcode, size) == 0 &&
823 bpf_validate(fcode, (int)flen)) {
824 s = splimp();
825 d->bd_filter = fcode;
826 reset_d(d);
827 splx(s);
828 if (old != 0)
829 free((caddr_t)old, M_DEVBUF);
830
831 return (0);
832 }
833 free((caddr_t)fcode, M_DEVBUF);
834 return (EINVAL);
835 }
836
837 /*
838 * Detach a file from its current interface (if attached at all) and attach
839 * to the interface indicated by the name stored in ifr.
840 * Return an errno or 0.
841 */
842 static int
843 bpf_setif(d, ifr)
844 struct bpf_d *d;
845 struct ifreq *ifr;
846 {
847 struct bpf_if *bp;
848 char *cp;
849 int unit, s, error;
850
851 /*
852 * Separate string into name part and unit number. Put a null
853 * byte at the end of the name part, and compute the number.
854 * If the a unit number is unspecified, the default is 0,
855 * as initialized above. XXX This should be common code.
856 */
857 unit = 0;
858 cp = ifr->ifr_name;
859 cp[sizeof(ifr->ifr_name) - 1] = '\0';
860 while (*cp++) {
861 if (*cp >= '0' && *cp <= '9') {
862 unit = *cp - '0';
863 *cp++ = '\0';
864 while (*cp)
865 unit = 10 * unit + *cp++ - '0';
866 break;
867 }
868 }
869 /*
870 * Look through attached interfaces for the named one.
871 */
872 for (bp = bpf_iflist; bp != 0; bp = bp->bif_next) {
873 struct ifnet *ifp = bp->bif_ifp;
874
875 if (ifp == 0 || unit != ifp->if_unit
876 || strcmp(ifp->if_name, ifr->ifr_name) != 0)
877 continue;
878 /*
879 * We found the requested interface.
880 * If it's not up, return an error.
881 * Allocate the packet buffers if we need to.
882 * If we're already attached to requested interface,
883 * just flush the buffer.
884 */
885 if ((ifp->if_flags & IFF_UP) == 0)
886 return (ENETDOWN);
887
888 if (d->bd_sbuf == 0) {
889 error = bpf_allocbufs(d);
890 if (error != 0)
891 return (error);
892 }
893 s = splimp();
894 if (bp != d->bd_bif) {
895 if (d->bd_bif)
896 /*
897 * Detach if attached to something else.
898 */
899 bpf_detachd(d);
900
901 bpf_attachd(d, bp);
902 }
903 reset_d(d);
904 splx(s);
905 return (0);
906 }
907 /* Not found. */
908 return (ENXIO);
909 }
910
911 /*
912 * Convert an interface name plus unit number of an ifp to a single
913 * name which is returned in the ifr.
914 */
915 static void
916 bpf_ifname(ifp, ifr)
917 struct ifnet *ifp;
918 struct ifreq *ifr;
919 {
920 char *s = ifp->if_name;
921 char *d = ifr->ifr_name;
922
923 while (*d++ = *s++)
924 continue;
925 /* XXX Assume that unit number is less than 10. */
926 *d++ = ifp->if_unit + '0';
927 *d = '\0';
928 }
929
930 /*
931 * The new select interface passes down the proc pointer; the old select
932 * stubs had to grab it out of the user struct. This glue allows either case.
933 */
934 #if BSD >= 199103
935 #define bpf_select bpfselect
936 #else
937 int
938 bpfselect(dev, rw)
939 register dev_t dev;
940 int rw;
941 {
942 return (bpf_select(dev, rw, u.u_procp));
943 }
944 #endif
945
946 /*
947 * Support for select() system call
948 *
949 * Return true iff the specific operation will not block indefinitely.
950 * Otherwise, return false but make a note that a selwakeup() must be done.
951 */
952 int
953 bpf_select(dev, rw, p)
954 register dev_t dev;
955 int rw;
956 struct proc *p;
957 {
958 register struct bpf_d *d;
959 register int s;
960
961 if (rw != FREAD)
962 return (0);
963 /*
964 * An imitation of the FIONREAD ioctl code.
965 */
966 d = &bpf_dtab[minor(dev)];
967
968 s = splimp();
969 if (d->bd_hlen != 0 || (d->bd_immediate && d->bd_slen != 0)) {
970 /*
971 * There is data waiting.
972 */
973 splx(s);
974 return (1);
975 }
976 #if BSD >= 199103
977 selrecord(p, &d->bd_sel);
978 #else
979 /*
980 * No data ready. If there's already a select() waiting on this
981 * minor device then this is a collision. This shouldn't happen
982 * because minors really should not be shared, but if a process
983 * forks while one of these is open, it is possible that both
984 * processes could select on the same descriptor.
985 */
986 if (d->bd_selproc && d->bd_selproc->p_wchan == (caddr_t)&selwait)
987 d->bd_selcoll = 1;
988 else
989 d->bd_selproc = p;
990 #endif
991 splx(s);
992 return (0);
993 }
994
995 /*
996 * Incoming linkage from device drivers. Process the packet pkt, of length
997 * pktlen, which is stored in a contiguous buffer. The packet is parsed
998 * by each process' filter, and if accepted, stashed into the corresponding
999 * buffer.
1000 */
1001 void
1002 bpf_tap(arg, pkt, pktlen)
1003 caddr_t arg;
1004 register u_char *pkt;
1005 register u_int pktlen;
1006 {
1007 struct bpf_if *bp;
1008 register struct bpf_d *d;
1009 register u_int slen;
1010 /*
1011 * Note that the ipl does not have to be raised at this point.
1012 * The only problem that could arise here is that if two different
1013 * interfaces shared any data. This is not the case.
1014 */
1015 bp = (struct bpf_if *)arg;
1016 for (d = bp->bif_dlist; d != 0; d = d->bd_next) {
1017 ++d->bd_rcount;
1018 slen = bpf_filter(d->bd_filter, pkt, pktlen, pktlen);
1019 if (slen != 0)
1020 catchpacket(d, pkt, pktlen, slen, bcopy);
1021 }
1022 }
1023
1024 /*
1025 * Copy data from an mbuf chain into a buffer. This code is derived
1026 * from m_copydata in sys/uipc_mbuf.c.
1027 */
1028 static void
1029 bpf_mcopy(src_arg, dst_arg, len)
1030 const void *src_arg;
1031 void *dst_arg;
1032 register u_int len;
1033 {
1034 register const struct mbuf *m;
1035 register u_int count;
1036 u_char *dst;
1037
1038 m = src_arg;
1039 dst = dst_arg;
1040 while (len > 0) {
1041 if (m == 0)
1042 panic("bpf_mcopy");
1043 count = min(m->m_len, len);
1044 bcopy(mtod(m, caddr_t), (caddr_t)dst, count);
1045 m = m->m_next;
1046 dst += count;
1047 len -= count;
1048 }
1049 }
1050
1051 /*
1052 * Incoming linkage from device drivers, when packet is in an mbuf chain.
1053 */
1054 void
1055 bpf_mtap(arg, m)
1056 caddr_t arg;
1057 struct mbuf *m;
1058 {
1059 struct bpf_if *bp = (struct bpf_if *)arg;
1060 struct bpf_d *d;
1061 u_int pktlen, slen;
1062 struct mbuf *m0;
1063
1064 pktlen = 0;
1065 for (m0 = m; m0 != 0; m0 = m0->m_next)
1066 pktlen += m0->m_len;
1067
1068 for (d = bp->bif_dlist; d != 0; d = d->bd_next) {
1069 ++d->bd_rcount;
1070 slen = bpf_filter(d->bd_filter, (u_char *)m, pktlen, 0);
1071 if (slen != 0)
1072 catchpacket(d, (u_char *)m, pktlen, slen, bpf_mcopy);
1073 }
1074 }
1075
1076 /*
1077 * Move the packet data from interface memory (pkt) into the
1078 * store buffer. Return 1 if it's time to wakeup a listener (buffer full),
1079 * otherwise 0. "copy" is the routine called to do the actual data
1080 * transfer. bcopy is passed in to copy contiguous chunks, while
1081 * bpf_mcopy is passed in to copy mbuf chains. In the latter case,
1082 * pkt is really an mbuf.
1083 */
1084 static void
1085 catchpacket(d, pkt, pktlen, snaplen, cpfn)
1086 register struct bpf_d *d;
1087 register u_char *pkt;
1088 register u_int pktlen, snaplen;
1089 register void (*cpfn) __P((const void *, void *, u_int));
1090 {
1091 register struct bpf_hdr *hp;
1092 register int totlen, curlen;
1093 register int hdrlen = d->bd_bif->bif_hdrlen;
1094 /*
1095 * Figure out how many bytes to move. If the packet is
1096 * greater or equal to the snapshot length, transfer that
1097 * much. Otherwise, transfer the whole packet (unless
1098 * we hit the buffer size limit).
1099 */
1100 totlen = hdrlen + min(snaplen, pktlen);
1101 if (totlen > d->bd_bufsize)
1102 totlen = d->bd_bufsize;
1103
1104 /*
1105 * Round up the end of the previous packet to the next longword.
1106 */
1107 curlen = BPF_WORDALIGN(d->bd_slen);
1108 if (curlen + totlen > d->bd_bufsize) {
1109 /*
1110 * This packet will overflow the storage buffer.
1111 * Rotate the buffers if we can, then wakeup any
1112 * pending reads.
1113 */
1114 if (d->bd_fbuf == 0) {
1115 /*
1116 * We haven't completed the previous read yet,
1117 * so drop the packet.
1118 */
1119 ++d->bd_dcount;
1120 return;
1121 }
1122 ROTATE_BUFFERS(d);
1123 bpf_wakeup(d);
1124 curlen = 0;
1125 }
1126 else if (d->bd_immediate)
1127 /*
1128 * Immediate mode is set. A packet arrived so any
1129 * reads should be woken up.
1130 */
1131 bpf_wakeup(d);
1132
1133 /*
1134 * Append the bpf header.
1135 */
1136 hp = (struct bpf_hdr *)(d->bd_sbuf + curlen);
1137 #if BSD >= 199103
1138 microtime(&hp->bh_tstamp);
1139 #elif defined(sun)
1140 uniqtime(&hp->bh_tstamp);
1141 #else
1142 hp->bh_tstamp = time;
1143 #endif
1144 hp->bh_datalen = pktlen;
1145 hp->bh_hdrlen = hdrlen;
1146 /*
1147 * Copy the packet data into the store buffer and update its length.
1148 */
1149 (*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen));
1150 d->bd_slen = curlen + totlen;
1151 }
1152
1153 /*
1154 * Initialize all nonzero fields of a descriptor.
1155 */
1156 static int
1157 bpf_allocbufs(d)
1158 register struct bpf_d *d;
1159 {
1160 d->bd_fbuf = (caddr_t)malloc(d->bd_bufsize, M_DEVBUF, M_WAITOK);
1161 if (d->bd_fbuf == 0)
1162 return (ENOBUFS);
1163
1164 d->bd_sbuf = (caddr_t)malloc(d->bd_bufsize, M_DEVBUF, M_WAITOK);
1165 if (d->bd_sbuf == 0) {
1166 free(d->bd_fbuf, M_DEVBUF);
1167 return (ENOBUFS);
1168 }
1169 d->bd_slen = 0;
1170 d->bd_hlen = 0;
1171 return (0);
1172 }
1173
1174 /*
1175 * Free buffers currently in use by a descriptor.
1176 * Called on close.
1177 */
1178 static void
1179 bpf_freed(d)
1180 register struct bpf_d *d;
1181 {
1182 /*
1183 * We don't need to lock out interrupts since this descriptor has
1184 * been detached from its interface and it yet hasn't been marked
1185 * free.
1186 */
1187 if (d->bd_sbuf != 0) {
1188 free(d->bd_sbuf, M_DEVBUF);
1189 if (d->bd_hbuf != 0)
1190 free(d->bd_hbuf, M_DEVBUF);
1191 if (d->bd_fbuf != 0)
1192 free(d->bd_fbuf, M_DEVBUF);
1193 }
1194 if (d->bd_filter)
1195 free((caddr_t)d->bd_filter, M_DEVBUF);
1196
1197 D_MARKFREE(d);
1198 }
1199
1200 /*
1201 * Attach an interface to bpf. driverp is a pointer to a (struct bpf_if *)
1202 * in the driver's softc; dlt is the link layer type; hdrlen is the fixed
1203 * size of the link header (variable length headers not yet supported).
1204 */
1205 void
1206 bpfattach(driverp, ifp, dlt, hdrlen)
1207 caddr_t *driverp;
1208 struct ifnet *ifp;
1209 u_int dlt, hdrlen;
1210 {
1211 struct bpf_if *bp;
1212 int i;
1213 #if BSD < 199103
1214 static struct bpf_if bpf_ifs[NBPFILTER];
1215 static int bpfifno;
1216
1217 bp = (bpfifno < NBPFILTER) ? &bpf_ifs[bpfifno++] : 0;
1218 #else
1219 bp = (struct bpf_if *)malloc(sizeof(*bp), M_DEVBUF, M_DONTWAIT);
1220 #endif
1221 if (bp == 0)
1222 panic("bpfattach");
1223
1224 bp->bif_dlist = 0;
1225 bp->bif_driverp = (struct bpf_if **)driverp;
1226 bp->bif_ifp = ifp;
1227 bp->bif_dlt = dlt;
1228
1229 bp->bif_next = bpf_iflist;
1230 bpf_iflist = bp;
1231
1232 *bp->bif_driverp = 0;
1233
1234 /*
1235 * Compute the length of the bpf header. This is not necessarily
1236 * equal to SIZEOF_BPF_HDR because we want to insert spacing such
1237 * that the network layer header begins on a longword boundary (for
1238 * performance reasons and to alleviate alignment restrictions).
1239 */
1240 bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen;
1241
1242 /*
1243 * Mark all the descriptors free if this hasn't been done.
1244 */
1245 if (!D_ISFREE(&bpf_dtab[0]))
1246 for (i = 0; i < NBPFILTER; ++i)
1247 D_MARKFREE(&bpf_dtab[i]);
1248
1249 #if 0
1250 printf("bpf: %s%d attached\n", ifp->if_name, ifp->if_unit);
1251 #endif
1252 }
1253
1254 #if BSD >= 199103
1255 /* XXX This routine belongs in net/if.c. */
1256 /*
1257 * Set/clear promiscuous mode on interface ifp based on the truth value
1258 * of pswitch. The calls are reference counted so that only the first
1259 * "on" request actually has an effect, as does the final "off" request.
1260 * Results are undefined if the "off" and "on" requests are not matched.
1261 */
1262 int
1263 ifpromisc(ifp, pswitch)
1264 struct ifnet *ifp;
1265 int pswitch;
1266 {
1267 struct ifreq ifr;
1268 /*
1269 * If the device is not configured up, we cannot put it in
1270 * promiscuous mode.
1271 */
1272 if ((ifp->if_flags & IFF_UP) == 0)
1273 return (ENETDOWN);
1274
1275 if (pswitch) {
1276 if (ifp->if_pcount++ != 0)
1277 return (0);
1278 ifp->if_flags |= IFF_PROMISC;
1279 } else {
1280 if (--ifp->if_pcount > 0)
1281 return (0);
1282 ifp->if_flags &= ~IFF_PROMISC;
1283 }
1284 ifr.ifr_flags = ifp->if_flags;
1285 return ((*ifp->if_ioctl)(ifp, SIOCSIFFLAGS, (caddr_t)&ifr));
1286 }
1287 #endif
1288
1289 #if BSD < 199103
1290 /*
1291 * Allocate some memory for bpf. This is temporary SunOS support, and
1292 * is admittedly a hack.
1293 * If resources unavaiable, return 0.
1294 */
1295 static caddr_t
1296 bpf_alloc(size, canwait)
1297 register int size;
1298 register int canwait;
1299 {
1300 register struct mbuf *m;
1301
1302 if ((unsigned)size > (MCLBYTES-8))
1303 return 0;
1304
1305 MGET(m, canwait, MT_DATA);
1306 if (m == 0)
1307 return 0;
1308 if ((unsigned)size > (MLEN-8)) {
1309 MCLGET(m);
1310 if (m->m_len != MCLBYTES) {
1311 m_freem(m);
1312 return 0;
1313 }
1314 }
1315 *mtod(m, struct mbuf **) = m;
1316 return mtod(m, caddr_t) + 8;
1317 }
1318 #endif
1319