bpf.c revision 1.11 1 /*-
2 * Copyright (c) 1991-1993 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from the Stanford/CMU enet packet filter,
6 * (net/enet.c) distributed as part of 4.3BSD, and code contributed
7 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
8 * Berkeley Laboratory.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)bpf.c 7.4 (Berkeley) 6/17/91
39 *
40 * from: Header: bpf.c,v 1.3 93/12/11 02:52:18 mccanne Exp
41 * $Id: bpf.c,v 1.11 1994/01/25 06:10:08 deraadt Exp $
42 */
43
44 #include "bpfilter.h"
45
46 #if (NBPFILTER > 0)
47
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/mbuf.h>
51 #include <sys/buf.h>
52 #include <sys/proc.h>
53 #include <sys/user.h>
54 #include <sys/ioctl.h>
55 #include <sys/map.h>
56 #include <sys/select.h>
57
58 #include <sys/file.h>
59 #include <sys/tty.h>
60 #include <sys/uio.h>
61
62 #include <sys/protosw.h>
63 #include <sys/socket.h>
64 #include <net/if.h>
65
66 #include <net/bpf.h>
67 #include <net/bpfdesc.h>
68
69 #include <sys/errno.h>
70 #include <sys/kernel.h>
71 #include <sys/vnode.h>
72
73 #include "sl.h"
74 #include "ppp.h"
75
76 #ifndef BPF_BUFSIZE
77 #define BPF_BUFSIZE NBPG
78 #endif
79
80 /*
81 * The default read buffer size is patchable.
82 */
83 int bpf_bufsize = BPF_BUFSIZE;
84
85 /*
86 * bpf_iflist is the list of interfaces; each corresponds to an ifnet
87 * bpf_dtab holds the descriptors, indexed by minor device #
88 */
89 struct bpf_if *bpf_iflist;
90 struct bpf_d bpf_dtab[NBPFILTER];
91
92 static void bpf_ifname();
93 static void catchpacket();
94 static void bpf_freed();
95 static int bpf_setif();
96 static int bpf_allocbufs();
97
98 void
99 bpfilterattach(n)
100 int n;
101 {
102 }
103
104 /*
105 * Attach file to the bpf interface, i.e. make d listen on bp.
106 * Must be called at splimp.
107 */
108 static void
109 bpf_attachd(d, bp)
110 struct bpf_d *d;
111 struct bpf_if *bp;
112 {
113 /*
114 * Point d at bp, and add d to the interface's list of listeners.
115 * Finally, point the driver's bpf cookie at the interface so
116 * it will divert packets to bpf.
117 */
118 d->bd_bif = bp;
119 d->bd_next = bp->bif_dlist;
120 bp->bif_dlist = d;
121
122 *bp->bif_driverp = bp;
123 }
124
125 /*
126 * Detach a file from its interface.
127 */
128 static void
129 bpf_detachd(d)
130 struct bpf_d *d;
131 {
132 struct bpf_d **p;
133 struct bpf_if *bp;
134
135 bp = d->bd_bif;
136 /*
137 * Check if this descriptor had requested promiscuous mode.
138 * If so, turn it off.
139 */
140 if (d->bd_promisc) {
141 d->bd_promisc = 0;
142 if (ifpromisc(bp->bif_ifp, 0))
143 /*
144 * Something is really wrong if we were able to put
145 * the driver into promiscuous mode, but can't
146 * take it out.
147 */
148 panic("bpf: ifpromisc failed");
149 }
150 /* Remove d from the interface's descriptor list. */
151 p = &bp->bif_dlist;
152 while (*p != d) {
153 p = &(*p)->bd_next;
154 if (*p == 0)
155 panic("bpf_detachd: descriptor not in list");
156 }
157 *p = (*p)->bd_next;
158 if (bp->bif_dlist == 0)
159 /*
160 * Let the driver know that there are no more listeners.
161 */
162 *d->bd_bif->bif_driverp = 0;
163 d->bd_bif = 0;
164 }
165
166
167 /*
168 * Mark a descriptor free by making it point to itself.
169 * This is probably cheaper than marking with a constant since
170 * the address should be in a register anyway.
171 */
172 #define D_ISFREE(d) ((d) == (d)->bd_next)
173 #define D_MARKFREE(d) ((d)->bd_next = (d))
174 #define D_MARKUSED(d) ((d)->bd_next = 0)
175
176 /*
177 * Open ethernet device. Returns ENXIO for illegal minor device number,
178 * EBUSY if file is open by another process.
179 */
180 /* ARGSUSED */
181 int
182 bpfopen(dev, flag)
183 dev_t dev;
184 int flag;
185 {
186 register struct bpf_d *d;
187
188 if (minor(dev) >= NBPFILTER)
189 return (ENXIO);
190 /*
191 * Each minor can be opened by only one process. If the requested
192 * minor is in use, return EBUSY.
193 */
194 d = &bpf_dtab[minor(dev)];
195 if (!D_ISFREE(d))
196 return (EBUSY);
197
198 /* Mark "free" and do most initialization. */
199 bzero((char *)d, sizeof(*d));
200 d->bd_bufsize = bpf_bufsize;
201
202 return (0);
203 }
204
205 /*
206 * Close the descriptor by detaching it from its interface,
207 * deallocating its buffers, and marking it free.
208 */
209 /* ARGSUSED */
210 int
211 bpfclose(dev, flag)
212 dev_t dev;
213 int flag;
214 {
215 register struct bpf_d *d = &bpf_dtab[minor(dev)];
216 register int s;
217
218 s = splimp();
219 if (d->bd_bif)
220 bpf_detachd(d);
221 splx(s);
222 bpf_freed(d);
223
224 return (0);
225 }
226
227 /*
228 * Rotate the packet buffers in descriptor d. Move the store buffer
229 * into the hold slot, and the free buffer into the store slot.
230 * Zero the length of the new store buffer.
231 */
232 #define ROTATE_BUFFERS(d) \
233 (d)->bd_hbuf = (d)->bd_sbuf; \
234 (d)->bd_hlen = (d)->bd_slen; \
235 (d)->bd_sbuf = (d)->bd_fbuf; \
236 (d)->bd_slen = 0; \
237 (d)->bd_fbuf = 0;
238 /*
239 * bpfread - read next chunk of packets from buffers
240 */
241 int
242 bpfread(dev, uio, ioflag)
243 dev_t dev;
244 register struct uio *uio;
245 int ioflag;
246 {
247 register struct bpf_d *d = &bpf_dtab[minor(dev)];
248 int error;
249 int s;
250
251 /*
252 * Restrict application to use a buffer the same size as
253 * as kernel buffers.
254 */
255 if (uio->uio_resid != d->bd_bufsize)
256 return (EINVAL);
257
258 s = splimp();
259 /*
260 * If the hold buffer is empty, then do a timed sleep, which
261 * ends when the timeout expires or when enough packets
262 * have arrived to fill the store buffer.
263 */
264 while (d->bd_hbuf == 0) {
265 if (ioflag & IO_NDELAY) {
266 if (d->bd_slen == 0) {
267 splx(s);
268 return (EWOULDBLOCK);
269 }
270 ROTATE_BUFFERS(d);
271 break;
272 }
273 if (d->bd_immediate && d->bd_slen != 0) {
274 /*
275 * A packet(s) either arrived since the previous
276 * read or arrived while we were asleep.
277 * Rotate the buffers and return what's here.
278 */
279 ROTATE_BUFFERS(d);
280 break;
281 }
282 error = tsleep((caddr_t)d, PWAIT | PCATCH, "bpf", d->bd_rtout);
283 if (error != 0) {
284 if (error == EWOULDBLOCK) {
285 /*
286 * On a timeout, return what's in the buffer,
287 * which may be nothing. If there is something
288 * in the store buffer, we can do a rotation.
289 */
290 if (d->bd_hbuf)
291 /*
292 * We filled up the buffer in between
293 * getting the timeout and arriving
294 * here, so we don't need to rotate.
295 */
296 break;
297
298 if (d->bd_slen == 0) {
299 splx(s);
300 return (0);
301 }
302 ROTATE_BUFFERS(d);
303 break;
304 }
305 splx(s);
306 return (error);
307 }
308 }
309 /*
310 * At this point, we know we have something in the hold slot.
311 */
312 splx(s);
313
314 /*
315 * Move data from hold buffer into user space.
316 * We know the entire buffer is transferred since
317 * we checked above that the read buffer is bpf_bufsize bytes.
318 */
319 error = uiomove(d->bd_hbuf, d->bd_hlen, uio);
320
321 s = splimp();
322 d->bd_fbuf = d->bd_hbuf;
323 d->bd_hbuf = 0;
324 d->bd_hlen = 0;
325 splx(s);
326
327 return (error);
328 }
329
330
331 /*
332 * If there are processes sleeping on this descriptor, wake them up.
333 */
334 static inline void
335 bpf_wakeup(d)
336 register struct bpf_d *d;
337 {
338 wakeup((caddr_t)d);
339 #if (BSD > 199103) || defined(__NetBSD__)
340 selwakeup(&d->bd_sel);
341 /* XXX */
342 d->bd_sel.si_pid = 0;
343 #else
344 if (d->bd_selproc) {
345 selwakeup(d->bd_selproc, (int)d->bd_selcoll);
346 d->bd_selcoll = 0;
347 d->bd_selproc = 0;
348 }
349 #endif
350 }
351
352 int
353 bpfwrite(dev, uio)
354 dev_t dev;
355 struct uio *uio;
356 {
357 register struct bpf_if *bp = bpf_dtab[minor(dev)].bd_bif;
358 register struct ifnet *ifp;
359 register struct mbuf *m;
360 register u_int len, hlen;
361 register int error, s;
362 struct sockaddr dst;
363
364 if (bp == 0)
365 return (ENXIO);
366
367 /*
368 * Build a sockaddr based on the data link layer type.
369 * The AF_UNSPEC kludge allows us to hand the link level
370 * header to the driver via the sockaddr. This isn't
371 * very clean. It would be better if AF_UNSPEC meant that
372 * the driver shouldn't bother with encapsulation (i.e., the
373 * link header is already in the mbuf). The code here is
374 * structured this way, then things are kludged back before
375 * calling if_output.
376 *
377 * NOTE: When adding new link layers make sure the driver supports
378 * AF_UNSPEC and that the link header can fit in the sa_data
379 * field of a sockaddr.
380 */
381 switch (bp->bif_dlt) {
382
383 #if NSL > 0
384 case DLT_SLIP:
385 dst.sa_family = AF_INET;
386 hlen = 0;
387 break;
388 #endif
389
390 #if NPPP > 0
391 case DLT_PPP:
392 dst.sa_family = AF_UNSPEC;
393 hlen = 0;
394 break;
395 #endif
396
397 case DLT_EN10MB:
398 dst.sa_family = AF_UNSPEC;
399 hlen = 14;
400 break;
401
402 case DLT_FDDI:
403 dst.sa_family = AF_UNSPEC;
404 /* XXX 4(FORMAC)+6(dst)+6(src)+3(LLC)+5(SNAP) */
405 hlen = 24;
406 break;
407
408 case DLT_NULL:
409 dst.sa_family = AF_UNSPEC;
410 hlen = 0;
411 break;
412
413 default:
414 return (EIO);
415 }
416 ifp = bp->bif_ifp;
417 len = uio->uio_resid;
418 /*
419 * If we didn't get enough for the link level header, or we
420 * exceed the interface's mtu, return an error.
421 */
422 if (len < hlen || len - hlen > ifp->if_mtu)
423 return (EMSGSIZE);
424
425 /*
426 * XXX Avoid complicated buffer chaining ---
427 * bail if it won't fit in a single mbuf.
428 */
429 if (len > MCLBYTES)
430 return (EMSGSIZE);
431
432 MGETHDR(m, M_WAIT, MT_DATA);
433 if (m == 0)
434 return (ENOBUFS);
435 if (len > MLEN) {
436 MCLGET(m, M_WAIT);
437 if ((m->m_flags & M_EXT) == 0) {
438 m_freem(m);
439 return (ENOBUFS);
440 }
441 }
442 /*
443 * Move the whole packet, including the data link header,
444 * into the mbuf. Then, copy the link header back out of the
445 * packet into the sockaddr. Finally, strip the link header
446 * from the front of the mbuf.
447 */
448 error = uiomove(mtod(m, caddr_t), len, uio);
449 if (error) {
450 m_freem(m);
451 return (error);
452 }
453 if (hlen > 0) {
454 bcopy(mtod(m, caddr_t), dst.sa_data, hlen);
455 m->m_data += hlen;
456 len -= hlen;
457 }
458 m->m_pkthdr.len = m->m_len = len;
459 s = splnet();
460 error = (*ifp->if_output)(ifp, m, &dst, (struct rtentry *)0);
461 splx(s);
462 /*
463 * The driver frees the mbuf.
464 */
465 return (error);
466 }
467
468 /*
469 * Reset a descriptor by flushing its packet buffer and clearing the
470 * receive and drop counts. Should be called at splimp.
471 */
472 static void
473 reset_d(d)
474 struct bpf_d *d;
475 {
476 if (d->bd_hbuf) {
477 /* Free the hold buffer. */
478 d->bd_fbuf = d->bd_hbuf;
479 d->bd_hbuf = 0;
480 }
481 d->bd_slen = 0;
482 d->bd_hlen = 0;
483 d->bd_rcount = 0;
484 d->bd_dcount = 0;
485 }
486
487 /*
488 * FIONREAD Check for read packet available.
489 * SIOCGIFADDR Get interface address - convenient hook to driver.
490 * BIOCGBLEN Get buffer len [for read()].
491 * BIOCSETF Set ethernet read filter.
492 * BIOCFLUSH Flush read packet buffer.
493 * BIOCPROMISC Put interface into promiscuous mode.
494 * BIOCGDLT Get link layer type.
495 * BIOCGETIF Get interface name.
496 * BIOCSETIF Set interface.
497 * BIOCSRTIMEOUT Set read timeout.
498 * BIOCGRTIMEOUT Get read timeout.
499 * BIOCGSTATS Get packet stats.
500 * BIOCIMMEDIATE Set immediate mode.
501 * BIOCVERSION Get filter language version.
502 */
503 /* ARGSUSED */
504 int
505 bpfioctl(dev, cmd, addr, flag)
506 dev_t dev;
507 int cmd;
508 caddr_t addr;
509 int flag;
510 {
511 register struct bpf_d *d = &bpf_dtab[minor(dev)];
512 int s, error = 0;
513
514 switch (cmd) {
515
516 default:
517 error = EINVAL;
518 break;
519
520 case FIONBIO:
521 case FIOASYNC:
522 break;
523
524 /*
525 * Check for read packet available.
526 */
527 case FIONREAD:
528 {
529 int n;
530
531 s = splimp();
532 n = d->bd_slen;
533 if (d->bd_hbuf)
534 n += d->bd_hlen;
535 splx(s);
536
537 *(int *)addr = n;
538 break;
539 }
540
541 case SIOCGIFADDR:
542 {
543 struct ifnet *ifp;
544
545 if (d->bd_bif == 0)
546 error = EINVAL;
547 else {
548 ifp = d->bd_bif->bif_ifp;
549 error = (*ifp->if_ioctl)(ifp, cmd, addr);
550 }
551 break;
552 }
553
554 /*
555 * Get buffer len [for read()].
556 */
557 case BIOCGBLEN:
558 *(u_int *)addr = d->bd_bufsize;
559 break;
560
561 /*
562 * Set buffer length.
563 */
564 case BIOCSBLEN:
565 if (d->bd_bif != 0)
566 error = EINVAL;
567 else {
568 register u_int size = *(u_int *)addr;
569
570 if (size > BPF_MAXBUFSIZE)
571 *(u_int *)addr = size = BPF_MAXBUFSIZE;
572 else if (size < BPF_MINBUFSIZE)
573 *(u_int *)addr = size = BPF_MINBUFSIZE;
574 d->bd_bufsize = size;
575 }
576 break;
577
578 /*
579 * Set link layer read filter.
580 */
581 case BIOCSETF:
582 error = bpf_setf(d, (struct bpf_program *)addr);
583 break;
584
585 /*
586 * Flush read packet buffer.
587 */
588 case BIOCFLUSH:
589 s = splimp();
590 reset_d(d);
591 splx(s);
592 break;
593
594 /*
595 * Put interface into promiscuous mode.
596 */
597 case BIOCPROMISC:
598 if (d->bd_bif == 0) {
599 /*
600 * No interface attached yet.
601 */
602 error = EINVAL;
603 break;
604 }
605 s = splimp();
606 if (d->bd_promisc == 0) {
607 error = ifpromisc(d->bd_bif->bif_ifp, 1);
608 if (error == 0)
609 d->bd_promisc = 1;
610 }
611 splx(s);
612 break;
613
614 /*
615 * Get device parameters.
616 */
617 case BIOCGDLT:
618 if (d->bd_bif == 0)
619 error = EINVAL;
620 else
621 *(u_int *)addr = d->bd_bif->bif_dlt;
622 break;
623
624 /*
625 * Set interface name.
626 */
627 case BIOCGETIF:
628 if (d->bd_bif == 0)
629 error = EINVAL;
630 else
631 bpf_ifname(d->bd_bif->bif_ifp, (struct ifreq *)addr);
632 break;
633
634 /*
635 * Set interface.
636 */
637 case BIOCSETIF:
638 error = bpf_setif(d, (struct ifreq *)addr);
639 break;
640
641 /*
642 * Set read timeout.
643 */
644 case BIOCSRTIMEOUT:
645 {
646 struct timeval *tv = (struct timeval *)addr;
647 u_long msec;
648
649 /* Compute number of milliseconds. */
650 msec = tv->tv_sec * 1000 + tv->tv_usec / 1000;
651 /* Scale milliseconds to ticks. Assume hard
652 clock has millisecond or greater resolution
653 (i.e. tick >= 1000). For 10ms hardclock,
654 tick/1000 = 10, so rtout<-msec/10. */
655 d->bd_rtout = msec / (tick / 1000);
656 break;
657 }
658
659 /*
660 * Get read timeout.
661 */
662 case BIOCGRTIMEOUT:
663 {
664 struct timeval *tv = (struct timeval *)addr;
665 u_long msec = d->bd_rtout;
666
667 msec *= tick / 1000;
668 tv->tv_sec = msec / 1000;
669 tv->tv_usec = msec % 1000;
670 break;
671 }
672
673 /*
674 * Get packet stats.
675 */
676 case BIOCGSTATS:
677 {
678 struct bpf_stat *bs = (struct bpf_stat *)addr;
679
680 bs->bs_recv = d->bd_rcount;
681 bs->bs_drop = d->bd_dcount;
682 break;
683 }
684
685 /*
686 * Set immediate mode.
687 */
688 case BIOCIMMEDIATE:
689 d->bd_immediate = *(u_int *)addr;
690 break;
691
692 case BIOCVERSION:
693 {
694 struct bpf_version *bv = (struct bpf_version *)addr;
695
696 bv->bv_major = BPF_MAJOR_VERSION;
697 bv->bv_minor = BPF_MINOR_VERSION;
698 break;
699 }
700 }
701 return (error);
702 }
703
704 /*
705 * Set d's packet filter program to fp. If this file already has a filter,
706 * free it and replace it. Returns EINVAL for bogus requests.
707 */
708 int
709 bpf_setf(d, fp)
710 struct bpf_d *d;
711 struct bpf_program *fp;
712 {
713 struct bpf_insn *fcode, *old;
714 u_int flen, size;
715 int s;
716
717 old = d->bd_filter;
718 if (fp->bf_insns == 0) {
719 if (fp->bf_len != 0)
720 return (EINVAL);
721 s = splimp();
722 d->bd_filter = 0;
723 reset_d(d);
724 splx(s);
725 if (old != 0)
726 free((caddr_t)old, M_DEVBUF);
727 return (0);
728 }
729 flen = fp->bf_len;
730 if (flen > BPF_MAXINSNS)
731 return (EINVAL);
732
733 size = flen * sizeof(*fp->bf_insns);
734 fcode = (struct bpf_insn *)malloc(size, M_DEVBUF, M_WAITOK);
735 if (fcode == 0)
736 return (ENOMEM);
737 if (copyin((caddr_t)fp->bf_insns, (caddr_t)fcode, size) == 0 &&
738 bpf_validate(fcode, (int)flen)) {
739 s = splimp();
740 d->bd_filter = fcode;
741 reset_d(d);
742 splx(s);
743 if (old != 0)
744 free((caddr_t)old, M_DEVBUF);
745
746 return (0);
747 }
748 free((caddr_t)fcode, M_DEVBUF);
749 return (EINVAL);
750 }
751
752 /*
753 * Detach a file from its current interface (if attached at all) and attach
754 * to the interface indicated by the name stored in ifr.
755 * Return an errno or 0.
756 */
757 static int
758 bpf_setif(d, ifr)
759 struct bpf_d *d;
760 struct ifreq *ifr;
761 {
762 struct bpf_if *bp;
763 char *cp;
764 int unit, s, error;
765
766 /*
767 * Separate string into name part and unit number. Put a null
768 * byte at the end of the name part, and compute the number.
769 * If the a unit number is unspecified, the default is 0,
770 * as initialized above. XXX This should be common code.
771 */
772 unit = 0;
773 cp = ifr->ifr_name;
774 cp[sizeof(ifr->ifr_name) - 1] = '\0';
775 while (*cp++) {
776 if (*cp >= '0' && *cp <= '9') {
777 unit = *cp - '0';
778 *cp++ = '\0';
779 while (*cp)
780 unit = 10 * unit + *cp++ - '0';
781 break;
782 }
783 }
784 /*
785 * Look through attached interfaces for the named one.
786 */
787 for (bp = bpf_iflist; bp != 0; bp = bp->bif_next) {
788 struct ifnet *ifp = bp->bif_ifp;
789
790 if (ifp == 0 || unit != ifp->if_unit
791 || strcmp(ifp->if_name, ifr->ifr_name) != 0)
792 continue;
793 /*
794 * We found the requested interface.
795 * If it's not up, return an error.
796 * Allocate the packet buffers if we need to.
797 * If we're already attached to requested interface,
798 * just flush the buffer.
799 */
800 if ((ifp->if_flags & IFF_UP) == 0)
801 return (ENETDOWN);
802
803 if (d->bd_sbuf == 0) {
804 error = bpf_allocbufs(d);
805 if (error != 0)
806 return (error);
807 }
808 s = splimp();
809 if (bp != d->bd_bif) {
810 if (d->bd_bif)
811 /*
812 * Detach if attached to something else.
813 */
814 bpf_detachd(d);
815
816 bpf_attachd(d, bp);
817 }
818 reset_d(d);
819 splx(s);
820 return (0);
821 }
822 /* Not found. */
823 return (ENXIO);
824 }
825
826 /*
827 * Convert an interface name plus unit number of an ifp to a single
828 * name which is returned in the ifr.
829 */
830 static void
831 bpf_ifname(ifp, ifr)
832 struct ifnet *ifp;
833 struct ifreq *ifr;
834 {
835 char *s = ifp->if_name;
836 char *d = ifr->ifr_name;
837
838 while (*d++ = *s++)
839 continue;
840 /* XXX Assume that unit number is less than 10. */
841 *d++ = ifp->if_unit + '0';
842 *d = '\0';
843 }
844
845 /*
846 * Support for select() system call
847 * Inspired by the code in tty.c for the same purpose.
848 *
849 * Return true iff the specific operation will not block indefinitely.
850 * Otherwise, return false but make a note that a selwakeup() must be done.
851 */
852 int
853 bpfselect(dev, rw, p)
854 register dev_t dev;
855 int rw;
856 struct proc *p;
857 {
858 register struct bpf_d *d;
859 register int s;
860
861 if (rw != FREAD)
862 return (0);
863 /*
864 * An imitation of the FIONREAD ioctl code.
865 */
866 d = &bpf_dtab[minor(dev)];
867
868 s = splimp();
869 if (d->bd_hlen != 0 || (d->bd_immediate && d->bd_slen != 0)) {
870 /*
871 * There is data waiting.
872 */
873 splx(s);
874 return (1);
875 }
876 /*
877 * No data ready. If there's already a select() waiting on this
878 * minor device then this is a collision. This shouldn't happen
879 * because minors really should not be shared, but if a process
880 * forks while one of these is open, it is possible that both
881 * processes could select on the same descriptor.
882 */
883 #if defined(__NetBSD__)
884 selrecord(p, &d->bd_sel);
885 #else
886 if (d->bd_selproc && d->bd_selproc->p_wchan == (caddr_t)&selwait)
887 d->bd_selcoll = 1;
888 else
889 d->bd_selproc = p;
890
891 #endif
892 splx(s);
893 return (0);
894 }
895
896 /*
897 * Incoming linkage from device drivers. Process the packet pkt, of length
898 * pktlen, which is stored in a contiguous buffer. The packet is parsed
899 * by each process' filter, and if accepted, stashed into the corresponding
900 * buffer.
901 */
902 void
903 bpf_tap(arg, pkt, pktlen)
904 caddr_t arg;
905 register u_char *pkt;
906 register u_int pktlen;
907 {
908 struct bpf_if *bp;
909 register struct bpf_d *d;
910 register u_int slen;
911
912 /*
913 * Note that the ipl does not have to be raised at this point.
914 * The only problem that could arise here is that if two different
915 * interfaces shared any data. This is not the case.
916 */
917 bp = (struct bpf_if *)arg;
918 for (d = bp->bif_dlist; d != 0; d = d->bd_next) {
919 ++d->bd_rcount;
920 slen = bpf_filter(d->bd_filter, pkt, pktlen, pktlen);
921 if (slen != 0)
922 catchpacket(d, pkt, pktlen, slen, bcopy);
923 }
924 }
925
926 /*
927 * Copy data from an mbuf chain into a buffer. This code is derived
928 * from m_copydata in sys/uipc_mbuf.c.
929 */
930 static void
931 bpf_mcopy(src, dst, len)
932 u_char *src;
933 u_char *dst;
934 register int len;
935 {
936 register struct mbuf *m = (struct mbuf *)src;
937 register unsigned count;
938
939 while (len > 0) {
940 if (m == 0)
941 panic("bpf_mcopy");
942 count = MIN(m->m_len, len);
943 bcopy(mtod(m, caddr_t), (caddr_t)dst, count);
944 m = m->m_next;
945 dst += count;
946 len -= count;
947 }
948 }
949
950 /*
951 * Incoming linkage from device drivers, when packet is in an mbuf chain.
952 */
953 void
954 bpf_mtap(arg, m)
955 caddr_t arg;
956 struct mbuf *m;
957 {
958 struct bpf_if *bp = (struct bpf_if *)arg;
959 struct bpf_d *d;
960 u_int pktlen, slen;
961 struct mbuf *m0;
962
963 pktlen = 0;
964 for (m0 = m; m0 != 0; m0 = m0->m_next)
965 pktlen += m0->m_len;
966
967 for (d = bp->bif_dlist; d != 0; d = d->bd_next) {
968 ++d->bd_rcount;
969 slen = bpf_filter(d->bd_filter, (u_char *)m, pktlen, 0);
970 if (slen != 0)
971 catchpacket(d, (u_char *)m, pktlen, slen, bpf_mcopy);
972 }
973 }
974
975 /*
976 * Move the packet data from interface memory (pkt) into the
977 * store buffer. Return 1 if it's time to wakeup a listener (buffer full),
978 * otherwise 0. "copy" is the routine called to do the actual data
979 * transfer. bcopy is passed in to copy contiguous chunks, while
980 * bpf_mcopy is passed in to copy mbuf chains. In the latter case,
981 * pkt is really an mbuf.
982 */
983 static void
984 catchpacket(d, pkt, pktlen, snaplen, cpfn)
985 register struct bpf_d *d;
986 register u_char *pkt;
987 register u_int pktlen, snaplen;
988 register void (*cpfn)();
989 {
990 register struct bpf_hdr *hp;
991 register int totlen, curlen;
992 register int hdrlen = d->bd_bif->bif_hdrlen;
993 /*
994 * Figure out how many bytes to move. If the packet is
995 * greater or equal to the snapshot length, transfer that
996 * much. Otherwise, transfer the whole packet (unless
997 * we hit the buffer size limit).
998 */
999 totlen = hdrlen + MIN(snaplen, pktlen);
1000 if (totlen > d->bd_bufsize)
1001 totlen = d->bd_bufsize;
1002
1003 /*
1004 * Round up the end of the previous packet to the next longword.
1005 */
1006 curlen = BPF_WORDALIGN(d->bd_slen);
1007 if (curlen + totlen > d->bd_bufsize) {
1008 /*
1009 * This packet will overflow the storage buffer.
1010 * Rotate the buffers if we can, then wakeup any
1011 * pending reads.
1012 */
1013 if (d->bd_fbuf == 0) {
1014 /*
1015 * We haven't completed the previous read yet,
1016 * so drop the packet.
1017 */
1018 ++d->bd_dcount;
1019 return;
1020 }
1021 ROTATE_BUFFERS(d);
1022 bpf_wakeup(d);
1023 curlen = 0;
1024 }
1025 else if (d->bd_immediate)
1026 /*
1027 * Immediate mode is set. A packet arrived so any
1028 * reads should be woken up.
1029 */
1030 bpf_wakeup(d);
1031
1032 /*
1033 * Append the bpf header.
1034 */
1035 hp = (struct bpf_hdr *)(d->bd_sbuf + curlen);
1036 microtime(&hp->bh_tstamp);
1037 hp->bh_datalen = pktlen;
1038 hp->bh_hdrlen = hdrlen;
1039 /*
1040 * Copy the packet data into the store buffer and update its length.
1041 */
1042 (*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen));
1043 d->bd_slen = curlen + totlen;
1044 }
1045
1046 /*
1047 * Initialize all nonzero fields of a descriptor.
1048 */
1049 static int
1050 bpf_allocbufs(d)
1051 register struct bpf_d *d;
1052 {
1053 d->bd_fbuf = (caddr_t)malloc(d->bd_bufsize, M_DEVBUF, M_WAITOK);
1054 if (d->bd_fbuf == 0)
1055 return (ENOBUFS);
1056
1057 d->bd_sbuf = (caddr_t)malloc(d->bd_bufsize, M_DEVBUF, M_WAITOK);
1058 if (d->bd_sbuf == 0) {
1059 free(d->bd_fbuf, M_DEVBUF);
1060 return (ENOBUFS);
1061 }
1062 d->bd_slen = 0;
1063 d->bd_hlen = 0;
1064 return (0);
1065 }
1066
1067 /*
1068 * Free buffers currently in use by a descriptor.
1069 * Called on close.
1070 */
1071 static void
1072 bpf_freed(d)
1073 register struct bpf_d *d;
1074 {
1075 /*
1076 * We don't need to lock out interrupts since this descriptor has
1077 * been detached from its interface and it yet hasn't been marked
1078 * free.
1079 */
1080 if (d->bd_sbuf != 0) {
1081 free(d->bd_sbuf, M_DEVBUF);
1082 if (d->bd_hbuf != 0)
1083 free(d->bd_hbuf, M_DEVBUF);
1084 if (d->bd_fbuf != 0)
1085 free(d->bd_fbuf, M_DEVBUF);
1086 }
1087 if (d->bd_filter)
1088 free((caddr_t)d->bd_filter, M_DEVBUF);
1089
1090 D_MARKFREE(d);
1091 }
1092
1093 /*
1094 * Attach an interface to bpf. driverp is a pointer to a (struct bpf_if *)
1095 * in the driver's softc; dlt is the link layer type; hdrlen is the fixed
1096 * size of the link header (variable length headers not yet supported).
1097 */
1098 void
1099 bpfattach(driverp, ifp, dlt, hdrlen)
1100 caddr_t *driverp;
1101 struct ifnet *ifp;
1102 u_int dlt, hdrlen;
1103 {
1104 struct bpf_if *bp;
1105 int i;
1106
1107 bp = (struct bpf_if *)malloc(sizeof(*bp), M_DEVBUF, M_DONTWAIT);
1108 if (bp == 0) {
1109 printf("bpf: no buffers in attach");
1110 return;
1111 }
1112 bp->bif_dlist = 0;
1113 bp->bif_driverp = (struct bpf_if **)driverp;
1114 bp->bif_ifp = ifp;
1115 bp->bif_dlt = dlt;
1116
1117 bp->bif_next = bpf_iflist;
1118 bpf_iflist = bp;
1119
1120 *bp->bif_driverp = 0;
1121
1122 /*
1123 * Compute the length of the bpf header. This is not necessarily
1124 * equal to SIZEOF_BPF_HDR because we want to insert spacing such
1125 * that the network layer header begins on a longword boundary (for
1126 * performance reasons and to alleviate alignment restrictions).
1127 */
1128 bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen;
1129
1130 /*
1131 * Mark all the descriptors free if this hasn't been done.
1132 */
1133 if (!D_ISFREE(&bpf_dtab[0]))
1134 for (i = 0; i < NBPFILTER; ++i)
1135 D_MARKFREE(&bpf_dtab[i]);
1136
1137 #if 0
1138 printf("bpf: %s%d attached\n", ifp->if_name, ifp->if_unit);
1139 #endif
1140 }
1141
1142 /* XXX This routine belongs in net/if.c. */
1143 /*
1144 * Set/clear promiscuous mode on interface ifp based on the truth value
1145 * of pswitch. The calls are reference counted so that only the first
1146 * "on" request actually has an effect, as does the final "off" request.
1147 * Results are undefined if the "off" and "on" requests are not matched.
1148 */
1149 int
1150 ifpromisc(ifp, pswitch)
1151 struct ifnet *ifp;
1152 int pswitch;
1153 {
1154 struct ifreq ifr;
1155 /*
1156 * If the device is not configured up, we cannot put it in
1157 * promiscuous mode.
1158 */
1159 if ((ifp->if_flags & IFF_UP) == 0)
1160 return (ENETDOWN);
1161
1162 if (pswitch) {
1163 if (ifp->if_pcount++ != 0)
1164 return (0);
1165 ifp->if_flags |= IFF_PROMISC;
1166 } else {
1167 if (--ifp->if_pcount > 0)
1168 return (0);
1169 ifp->if_flags &= ~IFF_PROMISC;
1170 }
1171 ifr.ifr_flags = ifp->if_flags;
1172 return ((*ifp->if_ioctl)(ifp, SIOCSIFFLAGS, (caddr_t)&ifr));
1173 }
1174
1175 #endif (NBPFILTER > 0)
1176