bpf.c revision 1.23 1 /* $NetBSD: bpf.c,v 1.23 1995/09/27 18:30:37 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 1990, 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from the Stanford/CMU enet packet filter,
8 * (net/enet.c) distributed as part of 4.3BSD, and code contributed
9 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
10 * Berkeley Laboratory.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the University of
23 * California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 * @(#)bpf.c 8.2 (Berkeley) 3/28/94
41 */
42
43 #include "bpfilter.h"
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/mbuf.h>
48 #include <sys/buf.h>
49 #include <sys/time.h>
50 #include <sys/proc.h>
51 #include <sys/user.h>
52 #include <sys/ioctl.h>
53 #include <sys/map.h>
54
55 #include <sys/file.h>
56 #if defined(sparc) && BSD < 199103
57 #include <sys/stream.h>
58 #endif
59 #include <sys/tty.h>
60 #include <sys/uio.h>
61
62 #include <sys/protosw.h>
63 #include <sys/socket.h>
64 #include <net/if.h>
65
66 #include <net/bpf.h>
67 #include <net/bpfdesc.h>
68
69 #include <sys/errno.h>
70
71 #include <netinet/in.h>
72 #include <netinet/if_arc.h>
73 #include <netinet/if_ether.h>
74 #include <sys/kernel.h>
75
76 /*
77 * Older BSDs don't have kernel malloc.
78 */
79 #if BSD < 199103
80 extern bcopy();
81 static caddr_t bpf_alloc();
82 #include <net/bpf_compat.h>
83 #define BPF_BUFSIZE (MCLBYTES-8)
84 #define UIOMOVE(cp, len, code, uio) uiomove(cp, len, code, uio)
85 #else
86 #define BPF_BUFSIZE 4096
87 #define UIOMOVE(cp, len, code, uio) uiomove(cp, len, uio)
88 #endif
89
90 #define PRINET 26 /* interruptible */
91
92 /*
93 * The default read buffer size is patchable.
94 */
95 int bpf_bufsize = BPF_BUFSIZE;
96
97 /*
98 * bpf_iflist is the list of interfaces; each corresponds to an ifnet
99 * bpf_dtab holds the descriptors, indexed by minor device #
100 */
101 struct bpf_if *bpf_iflist;
102 struct bpf_d bpf_dtab[NBPFILTER];
103
104 #if BSD >= 199207 || NetBSD0_9 >= 2
105 /*
106 * bpfilterattach() is called at boot time in new systems. We do
107 * nothing here since old systems will not call this.
108 */
109 /* ARGSUSED */
110 void
111 bpfilterattach(n)
112 int n;
113 {
114 }
115 #endif
116
117 static int bpf_allocbufs __P((struct bpf_d *));
118 static int bpf_allocbufs __P((struct bpf_d *));
119 static void bpf_freed __P((struct bpf_d *));
120 static void bpf_freed __P((struct bpf_d *));
121 static void bpf_ifname __P((struct ifnet *, struct ifreq *));
122 static void bpf_ifname __P((struct ifnet *, struct ifreq *));
123 static void bpf_mcopy __P((const void *, void *, size_t));
124 static int bpf_movein __P((struct uio *, int,
125 struct mbuf **, struct sockaddr *));
126 static int bpf_setif __P((struct bpf_d *, struct ifreq *));
127 static int bpf_setif __P((struct bpf_d *, struct ifreq *));
128 static __inline void
129 bpf_wakeup __P((struct bpf_d *));
130 static void catchpacket __P((struct bpf_d *, u_char *, size_t,
131 size_t, void (*)(const void *, void *, size_t)));
132 static void reset_d __P((struct bpf_d *));
133
134 static int
135 bpf_movein(uio, linktype, mp, sockp)
136 register struct uio *uio;
137 int linktype;
138 register struct mbuf **mp;
139 register struct sockaddr *sockp;
140 {
141 struct mbuf *m;
142 int error;
143 int len;
144 int hlen;
145
146 /*
147 * Build a sockaddr based on the data link layer type.
148 * We do this at this level because the ethernet header
149 * is copied directly into the data field of the sockaddr.
150 * In the case of SLIP, there is no header and the packet
151 * is forwarded as is.
152 * Also, we are careful to leave room at the front of the mbuf
153 * for the link level header.
154 */
155 switch (linktype) {
156
157 case DLT_SLIP:
158 sockp->sa_family = AF_INET;
159 hlen = 0;
160 break;
161
162 case DLT_PPP:
163 sockp->sa_family = AF_UNSPEC;
164 hlen = 0;
165 break;
166
167 case DLT_EN10MB:
168 sockp->sa_family = AF_UNSPEC;
169 /* XXX Would MAXLINKHDR be better? */
170 hlen = sizeof(struct ether_header);
171 break;
172
173 case DLT_ARCNET:
174 sockp->sa_family = AF_UNSPEC;
175 hlen = ARC_HDRLEN;
176 break;
177
178 case DLT_FDDI:
179 sockp->sa_family = AF_UNSPEC;
180 /* XXX 4(FORMAC)+6(dst)+6(src)+3(LLC)+5(SNAP) */
181 hlen = 24;
182 break;
183
184 case DLT_NULL:
185 sockp->sa_family = AF_UNSPEC;
186 hlen = 0;
187 break;
188
189 default:
190 return (EIO);
191 }
192
193 len = uio->uio_resid;
194 if ((unsigned)len > MCLBYTES)
195 return (EIO);
196
197 MGETHDR(m, M_WAIT, MT_DATA);
198 if (m == 0)
199 return (ENOBUFS);
200 m->m_pkthdr.rcvif = 0;
201 m->m_pkthdr.len = len - hlen;
202
203 if (len > MHLEN) {
204 #if BSD >= 199103
205 MCLGET(m, M_WAIT);
206 if ((m->m_flags & M_EXT) == 0) {
207 #else
208 MCLGET(m);
209 if (m->m_len != MCLBYTES) {
210 #endif
211 error = ENOBUFS;
212 goto bad;
213 }
214 }
215 m->m_len = len;
216 *mp = m;
217 /*
218 * Make room for link header.
219 */
220 if (hlen != 0) {
221 m->m_len -= hlen;
222 #if BSD >= 199103
223 m->m_data += hlen; /* XXX */
224 #else
225 m->m_off += hlen;
226 #endif
227 error = UIOMOVE((caddr_t)sockp->sa_data, hlen, UIO_WRITE, uio);
228 if (error)
229 goto bad;
230 }
231 error = UIOMOVE(mtod(m, caddr_t), len - hlen, UIO_WRITE, uio);
232 if (!error)
233 return (0);
234 bad:
235 m_freem(m);
236 return (error);
237 }
238
239 /*
240 * Attach file to the bpf interface, i.e. make d listen on bp.
241 * Must be called at splimp.
242 */
243 static void
244 bpf_attachd(d, bp)
245 struct bpf_d *d;
246 struct bpf_if *bp;
247 {
248 /*
249 * Point d at bp, and add d to the interface's list of listeners.
250 * Finally, point the driver's bpf cookie at the interface so
251 * it will divert packets to bpf.
252 */
253 d->bd_bif = bp;
254 d->bd_next = bp->bif_dlist;
255 bp->bif_dlist = d;
256
257 *bp->bif_driverp = bp;
258 }
259
260 /*
261 * Detach a file from its interface.
262 */
263 static void
264 bpf_detachd(d)
265 struct bpf_d *d;
266 {
267 struct bpf_d **p;
268 struct bpf_if *bp;
269
270 bp = d->bd_bif;
271 /*
272 * Check if this descriptor had requested promiscuous mode.
273 * If so, turn it off.
274 */
275 if (d->bd_promisc) {
276 int error;
277
278 d->bd_promisc = 0;
279 error = ifpromisc(bp->bif_ifp, 0);
280 if (error && error != EINVAL)
281 /*
282 * Something is really wrong if we were able to put
283 * the driver into promiscuous mode, but can't
284 * take it out.
285 */
286 panic("bpf: ifpromisc failed");
287 }
288 /* Remove d from the interface's descriptor list. */
289 p = &bp->bif_dlist;
290 while (*p != d) {
291 p = &(*p)->bd_next;
292 if (*p == 0)
293 panic("bpf_detachd: descriptor not in list");
294 }
295 *p = (*p)->bd_next;
296 if (bp->bif_dlist == 0)
297 /*
298 * Let the driver know that there are no more listeners.
299 */
300 *d->bd_bif->bif_driverp = 0;
301 d->bd_bif = 0;
302 }
303
304
305 /*
306 * Mark a descriptor free by making it point to itself.
307 * This is probably cheaper than marking with a constant since
308 * the address should be in a register anyway.
309 */
310 #define D_ISFREE(d) ((d) == (d)->bd_next)
311 #define D_MARKFREE(d) ((d)->bd_next = (d))
312 #define D_MARKUSED(d) ((d)->bd_next = 0)
313
314 /*
315 * Open ethernet device. Returns ENXIO for illegal minor device number,
316 * EBUSY if file is open by another process.
317 */
318 /* ARGSUSED */
319 int
320 bpfopen(dev, flag)
321 dev_t dev;
322 int flag;
323 {
324 register struct bpf_d *d;
325
326 if (minor(dev) >= NBPFILTER)
327 return (ENXIO);
328 /*
329 * Each minor can be opened by only one process. If the requested
330 * minor is in use, return EBUSY.
331 */
332 d = &bpf_dtab[minor(dev)];
333 if (!D_ISFREE(d))
334 return (EBUSY);
335
336 /* Mark "free" and do most initialization. */
337 bzero((char *)d, sizeof(*d));
338 d->bd_bufsize = bpf_bufsize;
339 d->bd_sig = SIGIO;
340
341 return (0);
342 }
343
344 /*
345 * Close the descriptor by detaching it from its interface,
346 * deallocating its buffers, and marking it free.
347 */
348 /* ARGSUSED */
349 int
350 bpfclose(dev, flag)
351 dev_t dev;
352 int flag;
353 {
354 register struct bpf_d *d = &bpf_dtab[minor(dev)];
355 register int s;
356
357 s = splimp();
358 if (d->bd_bif)
359 bpf_detachd(d);
360 splx(s);
361 bpf_freed(d);
362
363 return (0);
364 }
365
366 /*
367 * Support for SunOS, which does not have tsleep.
368 */
369 #if BSD < 199103
370 static
371 bpf_timeout(arg)
372 caddr_t arg;
373 {
374 struct bpf_d *d = (struct bpf_d *)arg;
375 d->bd_timedout = 1;
376 wakeup(arg);
377 }
378
379 #define BPF_SLEEP(chan, pri, s, t) bpf_sleep((struct bpf_d *)chan)
380
381 int
382 bpf_sleep(d)
383 register struct bpf_d *d;
384 {
385 register int rto = d->bd_rtout;
386 register int st;
387
388 if (rto != 0) {
389 d->bd_timedout = 0;
390 timeout(bpf_timeout, (caddr_t)d, rto);
391 }
392 st = sleep((caddr_t)d, PRINET|PCATCH);
393 if (rto != 0) {
394 if (d->bd_timedout == 0)
395 untimeout(bpf_timeout, (caddr_t)d);
396 else if (st == 0)
397 return EWOULDBLOCK;
398 }
399 return (st != 0) ? EINTR : 0;
400 }
401 #else
402 #define BPF_SLEEP tsleep
403 #endif
404
405 /*
406 * Rotate the packet buffers in descriptor d. Move the store buffer
407 * into the hold slot, and the free buffer into the store slot.
408 * Zero the length of the new store buffer.
409 */
410 #define ROTATE_BUFFERS(d) \
411 (d)->bd_hbuf = (d)->bd_sbuf; \
412 (d)->bd_hlen = (d)->bd_slen; \
413 (d)->bd_sbuf = (d)->bd_fbuf; \
414 (d)->bd_slen = 0; \
415 (d)->bd_fbuf = 0;
416 /*
417 * bpfread - read next chunk of packets from buffers
418 */
419 int
420 bpfread(dev, uio)
421 dev_t dev;
422 register struct uio *uio;
423 {
424 register struct bpf_d *d = &bpf_dtab[minor(dev)];
425 int error;
426 int s;
427
428 /*
429 * Restrict application to use a buffer the same size as
430 * as kernel buffers.
431 */
432 if (uio->uio_resid != d->bd_bufsize)
433 return (EINVAL);
434
435 s = splimp();
436 /*
437 * If the hold buffer is empty, then do a timed sleep, which
438 * ends when the timeout expires or when enough packets
439 * have arrived to fill the store buffer.
440 */
441 while (d->bd_hbuf == 0) {
442 if (d->bd_immediate && d->bd_slen != 0) {
443 /*
444 * A packet(s) either arrived since the previous
445 * read or arrived while we were asleep.
446 * Rotate the buffers and return what's here.
447 */
448 ROTATE_BUFFERS(d);
449 break;
450 }
451 if (d->bd_rtout != -1)
452 error = BPF_SLEEP((caddr_t)d, PRINET|PCATCH, "bpf",
453 d->bd_rtout);
454 else
455 error = EWOULDBLOCK; /* User requested non-blocking I/O */
456 if (error == EINTR || error == ERESTART) {
457 splx(s);
458 return (error);
459 }
460 if (error == EWOULDBLOCK) {
461 /*
462 * On a timeout, return what's in the buffer,
463 * which may be nothing. If there is something
464 * in the store buffer, we can rotate the buffers.
465 */
466 if (d->bd_hbuf)
467 /*
468 * We filled up the buffer in between
469 * getting the timeout and arriving
470 * here, so we don't need to rotate.
471 */
472 break;
473
474 if (d->bd_slen == 0) {
475 splx(s);
476 return (0);
477 }
478 ROTATE_BUFFERS(d);
479 break;
480 }
481 }
482 /*
483 * At this point, we know we have something in the hold slot.
484 */
485 splx(s);
486
487 /*
488 * Move data from hold buffer into user space.
489 * We know the entire buffer is transferred since
490 * we checked above that the read buffer is bpf_bufsize bytes.
491 */
492 error = UIOMOVE(d->bd_hbuf, d->bd_hlen, UIO_READ, uio);
493
494 s = splimp();
495 d->bd_fbuf = d->bd_hbuf;
496 d->bd_hbuf = 0;
497 d->bd_hlen = 0;
498 splx(s);
499
500 return (error);
501 }
502
503
504 /*
505 * If there are processes sleeping on this descriptor, wake them up.
506 */
507 static __inline void
508 bpf_wakeup(d)
509 register struct bpf_d *d;
510 {
511 struct proc *p;
512
513 wakeup((caddr_t)d);
514 if (d->bd_async && d->bd_sig)
515 if (d->bd_pgid > 0)
516 gsignal (d->bd_pgid, d->bd_sig);
517 else if (p = pfind (-d->bd_pgid))
518 psignal (p, d->bd_sig);
519
520 #if BSD >= 199103
521 selwakeup(&d->bd_sel);
522 /* XXX */
523 d->bd_sel.si_pid = 0;
524 #else
525 if (d->bd_selproc) {
526 selwakeup(d->bd_selproc, (int)d->bd_selcoll);
527 d->bd_selcoll = 0;
528 d->bd_selproc = 0;
529 }
530 #endif
531 }
532
533 int
534 bpfwrite(dev, uio)
535 dev_t dev;
536 struct uio *uio;
537 {
538 register struct bpf_d *d = &bpf_dtab[minor(dev)];
539 struct ifnet *ifp;
540 struct mbuf *m;
541 int error, s;
542 static struct sockaddr dst;
543
544 if (d->bd_bif == 0)
545 return (ENXIO);
546
547 ifp = d->bd_bif->bif_ifp;
548
549 if (uio->uio_resid == 0)
550 return (0);
551
552 error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, &m, &dst);
553 if (error)
554 return (error);
555
556 if (m->m_pkthdr.len > ifp->if_mtu)
557 return (EMSGSIZE);
558
559 s = splsoftnet();
560 #if BSD >= 199103
561 error = (*ifp->if_output)(ifp, m, &dst, (struct rtentry *)0);
562 #else
563 error = (*ifp->if_output)(ifp, m, &dst);
564 #endif
565 splx(s);
566 /*
567 * The driver frees the mbuf.
568 */
569 return (error);
570 }
571
572 /*
573 * Reset a descriptor by flushing its packet buffer and clearing the
574 * receive and drop counts. Should be called at splimp.
575 */
576 static void
577 reset_d(d)
578 struct bpf_d *d;
579 {
580 if (d->bd_hbuf) {
581 /* Free the hold buffer. */
582 d->bd_fbuf = d->bd_hbuf;
583 d->bd_hbuf = 0;
584 }
585 d->bd_slen = 0;
586 d->bd_hlen = 0;
587 d->bd_rcount = 0;
588 d->bd_dcount = 0;
589 }
590
591 /*
592 * FIONREAD Check for read packet available.
593 * BIOCGBLEN Get buffer len [for read()].
594 * BIOCSETF Set ethernet read filter.
595 * BIOCFLUSH Flush read packet buffer.
596 * BIOCPROMISC Put interface into promiscuous mode.
597 * BIOCGDLT Get link layer type.
598 * BIOCGETIF Get interface name.
599 * BIOCSETIF Set interface.
600 * BIOCSRTIMEOUT Set read timeout.
601 * BIOCGRTIMEOUT Get read timeout.
602 * BIOCGSTATS Get packet stats.
603 * BIOCIMMEDIATE Set immediate mode.
604 * BIOCVERSION Get filter language version.
605 */
606 /* ARGSUSED */
607 int
608 bpfioctl(dev, cmd, addr, flag)
609 dev_t dev;
610 u_long cmd;
611 caddr_t addr;
612 int flag;
613 {
614 register struct bpf_d *d = &bpf_dtab[minor(dev)];
615 int s, error = 0;
616
617 switch (cmd) {
618
619 default:
620 error = EINVAL;
621 break;
622
623 /*
624 * Check for read packet available.
625 */
626 case FIONREAD:
627 {
628 int n;
629
630 s = splimp();
631 n = d->bd_slen;
632 if (d->bd_hbuf)
633 n += d->bd_hlen;
634 splx(s);
635
636 *(int *)addr = n;
637 break;
638 }
639
640 /*
641 * Get buffer len [for read()].
642 */
643 case BIOCGBLEN:
644 *(u_int *)addr = d->bd_bufsize;
645 break;
646
647 /*
648 * Set buffer length.
649 */
650 case BIOCSBLEN:
651 #if BSD < 199103
652 error = EINVAL;
653 #else
654 if (d->bd_bif != 0)
655 error = EINVAL;
656 else {
657 register u_int size = *(u_int *)addr;
658
659 if (size > BPF_MAXBUFSIZE)
660 *(u_int *)addr = size = BPF_MAXBUFSIZE;
661 else if (size < BPF_MINBUFSIZE)
662 *(u_int *)addr = size = BPF_MINBUFSIZE;
663 d->bd_bufsize = size;
664 }
665 #endif
666 break;
667
668 /*
669 * Set link layer read filter.
670 */
671 case BIOCSETF:
672 error = bpf_setf(d, (struct bpf_program *)addr);
673 break;
674
675 /*
676 * Flush read packet buffer.
677 */
678 case BIOCFLUSH:
679 s = splimp();
680 reset_d(d);
681 splx(s);
682 break;
683
684 /*
685 * Put interface into promiscuous mode.
686 */
687 case BIOCPROMISC:
688 if (d->bd_bif == 0) {
689 /*
690 * No interface attached yet.
691 */
692 error = EINVAL;
693 break;
694 }
695 s = splimp();
696 if (d->bd_promisc == 0) {
697 error = ifpromisc(d->bd_bif->bif_ifp, 1);
698 if (error == 0)
699 d->bd_promisc = 1;
700 }
701 splx(s);
702 break;
703
704 /*
705 * Get device parameters.
706 */
707 case BIOCGDLT:
708 if (d->bd_bif == 0)
709 error = EINVAL;
710 else
711 *(u_int *)addr = d->bd_bif->bif_dlt;
712 break;
713
714 /*
715 * Set interface name.
716 */
717 case BIOCGETIF:
718 if (d->bd_bif == 0)
719 error = EINVAL;
720 else
721 bpf_ifname(d->bd_bif->bif_ifp, (struct ifreq *)addr);
722 break;
723
724 /*
725 * Set interface.
726 */
727 case BIOCSETIF:
728 error = bpf_setif(d, (struct ifreq *)addr);
729 break;
730
731 /*
732 * Set read timeout.
733 */
734 case BIOCSRTIMEOUT:
735 {
736 struct timeval *tv = (struct timeval *)addr;
737
738 /* Compute number of ticks. */
739 d->bd_rtout = tv->tv_sec * hz + tv->tv_usec / tick;
740 break;
741 }
742
743 /*
744 * Get read timeout.
745 */
746 case BIOCGRTIMEOUT:
747 {
748 struct timeval *tv = (struct timeval *)addr;
749
750 tv->tv_sec = d->bd_rtout / hz;
751 tv->tv_usec = (d->bd_rtout % hz) * tick;
752 break;
753 }
754
755 /*
756 * Get packet stats.
757 */
758 case BIOCGSTATS:
759 {
760 struct bpf_stat *bs = (struct bpf_stat *)addr;
761
762 bs->bs_recv = d->bd_rcount;
763 bs->bs_drop = d->bd_dcount;
764 break;
765 }
766
767 /*
768 * Set immediate mode.
769 */
770 case BIOCIMMEDIATE:
771 d->bd_immediate = *(u_int *)addr;
772 break;
773
774 case BIOCVERSION:
775 {
776 struct bpf_version *bv = (struct bpf_version *)addr;
777
778 bv->bv_major = BPF_MAJOR_VERSION;
779 bv->bv_minor = BPF_MINOR_VERSION;
780 break;
781 }
782
783
784 case FIONBIO: /* Non-blocking I/O */
785 if (*(int *)addr)
786 d->bd_rtout = -1;
787 else
788 d->bd_rtout = 0;
789 break;
790
791 case FIOASYNC: /* Send signal on receive packets */
792 d->bd_async = *(int *)addr;
793 break;
794
795 /*
796 * N.B. ioctl (FIOSETOWN) and fcntl (F_SETOWN) both end up doing
797 * the equivalent of a TIOCSPGRP and hence end up here. *However*
798 * TIOCSPGRP's arg is a process group if it's positive and a process
799 * id if it's negative. This is exactly the opposite of what the
800 * other two functions want! Therefore there is code in ioctl and
801 * fcntl to negate the arg before calling here.
802 */
803 case TIOCSPGRP: /* Process or group to send signals to */
804 d->bd_pgid = *(int *)addr;
805 break;
806
807 case TIOCGPGRP:
808 *(int *)addr = d->bd_pgid;
809 break;
810
811 case BIOCSRSIG: /* Set receive signal */
812 {
813 u_int sig;
814
815 sig = *(u_int *)addr;
816
817 if (sig >= NSIG)
818 error = EINVAL;
819 else
820 d->bd_sig = sig;
821 break;
822 }
823 case BIOCGRSIG:
824 *(u_int *)addr = d->bd_sig;
825 break;
826 }
827 return (error);
828 }
829
830 /*
831 * Set d's packet filter program to fp. If this file already has a filter,
832 * free it and replace it. Returns EINVAL for bogus requests.
833 */
834 int
835 bpf_setf(d, fp)
836 struct bpf_d *d;
837 struct bpf_program *fp;
838 {
839 struct bpf_insn *fcode, *old;
840 u_int flen, size;
841 int s;
842
843 old = d->bd_filter;
844 if (fp->bf_insns == 0) {
845 if (fp->bf_len != 0)
846 return (EINVAL);
847 s = splimp();
848 d->bd_filter = 0;
849 reset_d(d);
850 splx(s);
851 if (old != 0)
852 free((caddr_t)old, M_DEVBUF);
853 return (0);
854 }
855 flen = fp->bf_len;
856 if (flen > BPF_MAXINSNS)
857 return (EINVAL);
858
859 size = flen * sizeof(*fp->bf_insns);
860 fcode = (struct bpf_insn *)malloc(size, M_DEVBUF, M_WAITOK);
861 if (copyin((caddr_t)fp->bf_insns, (caddr_t)fcode, size) == 0 &&
862 bpf_validate(fcode, (int)flen)) {
863 s = splimp();
864 d->bd_filter = fcode;
865 reset_d(d);
866 splx(s);
867 if (old != 0)
868 free((caddr_t)old, M_DEVBUF);
869
870 return (0);
871 }
872 free((caddr_t)fcode, M_DEVBUF);
873 return (EINVAL);
874 }
875
876 /*
877 * Detach a file from its current interface (if attached at all) and attach
878 * to the interface indicated by the name stored in ifr.
879 * Return an errno or 0.
880 */
881 static int
882 bpf_setif(d, ifr)
883 struct bpf_d *d;
884 struct ifreq *ifr;
885 {
886 struct bpf_if *bp;
887 char *cp;
888 int unit, s, error;
889
890 /*
891 * Separate string into name part and unit number. Put a null
892 * byte at the end of the name part, and compute the number.
893 * If the a unit number is unspecified, the default is 0,
894 * as initialized above. XXX This should be common code.
895 */
896 unit = 0;
897 cp = ifr->ifr_name;
898 cp[sizeof(ifr->ifr_name) - 1] = '\0';
899 while (*cp++) {
900 if (*cp >= '0' && *cp <= '9') {
901 unit = *cp - '0';
902 *cp++ = '\0';
903 while (*cp)
904 unit = 10 * unit + *cp++ - '0';
905 break;
906 }
907 }
908 /*
909 * Look through attached interfaces for the named one.
910 */
911 for (bp = bpf_iflist; bp != 0; bp = bp->bif_next) {
912 struct ifnet *ifp = bp->bif_ifp;
913
914 if (ifp == 0 || unit != ifp->if_unit
915 || strcmp(ifp->if_name, ifr->ifr_name) != 0)
916 continue;
917 /*
918 * We found the requested interface.
919 * If it's not up, return an error.
920 * Allocate the packet buffers if we need to.
921 * If we're already attached to requested interface,
922 * just flush the buffer.
923 */
924 if ((ifp->if_flags & IFF_UP) == 0)
925 return (ENETDOWN);
926
927 if (d->bd_sbuf == 0) {
928 error = bpf_allocbufs(d);
929 if (error != 0)
930 return (error);
931 }
932 s = splimp();
933 if (bp != d->bd_bif) {
934 if (d->bd_bif)
935 /*
936 * Detach if attached to something else.
937 */
938 bpf_detachd(d);
939
940 bpf_attachd(d, bp);
941 }
942 reset_d(d);
943 splx(s);
944 return (0);
945 }
946 /* Not found. */
947 return (ENXIO);
948 }
949
950 /*
951 * Convert an interface name plus unit number of an ifp to a single
952 * name which is returned in the ifr.
953 */
954 static void
955 bpf_ifname(ifp, ifr)
956 struct ifnet *ifp;
957 struct ifreq *ifr;
958 {
959 char *s = ifp->if_name;
960 char *d = ifr->ifr_name;
961
962 while (*d++ = *s++)
963 continue;
964 /* XXX Assume that unit number is less than 10. */
965 *d++ = ifp->if_unit + '0';
966 *d = '\0';
967 }
968
969 /*
970 * The new select interface passes down the proc pointer; the old select
971 * stubs had to grab it out of the user struct. This glue allows either case.
972 */
973 #if BSD >= 199103
974 #define bpf_select bpfselect
975 #else
976 int
977 bpfselect(dev, rw)
978 register dev_t dev;
979 int rw;
980 {
981 return (bpf_select(dev, rw, u.u_procp));
982 }
983 #endif
984
985 /*
986 * Support for select() system call
987 *
988 * Return true iff the specific operation will not block indefinitely.
989 * Otherwise, return false but make a note that a selwakeup() must be done.
990 */
991 int
992 bpf_select(dev, rw, p)
993 register dev_t dev;
994 int rw;
995 struct proc *p;
996 {
997 register struct bpf_d *d;
998 register int s;
999
1000 if (rw != FREAD)
1001 return (0);
1002 /*
1003 * An imitation of the FIONREAD ioctl code.
1004 */
1005 d = &bpf_dtab[minor(dev)];
1006
1007 s = splimp();
1008 if (d->bd_hlen != 0 || (d->bd_immediate && d->bd_slen != 0)) {
1009 /*
1010 * There is data waiting.
1011 */
1012 splx(s);
1013 return (1);
1014 }
1015 #if BSD >= 199103
1016 selrecord(p, &d->bd_sel);
1017 #else
1018 /*
1019 * No data ready. If there's already a select() waiting on this
1020 * minor device then this is a collision. This shouldn't happen
1021 * because minors really should not be shared, but if a process
1022 * forks while one of these is open, it is possible that both
1023 * processes could select on the same descriptor.
1024 */
1025 if (d->bd_selproc && d->bd_selproc->p_wchan == (caddr_t)&selwait)
1026 d->bd_selcoll = 1;
1027 else
1028 d->bd_selproc = p;
1029 #endif
1030 splx(s);
1031 return (0);
1032 }
1033
1034 /*
1035 * Incoming linkage from device drivers. Process the packet pkt, of length
1036 * pktlen, which is stored in a contiguous buffer. The packet is parsed
1037 * by each process' filter, and if accepted, stashed into the corresponding
1038 * buffer.
1039 */
1040 void
1041 bpf_tap(arg, pkt, pktlen)
1042 caddr_t arg;
1043 register u_char *pkt;
1044 register u_int pktlen;
1045 {
1046 struct bpf_if *bp;
1047 register struct bpf_d *d;
1048 register size_t slen;
1049 /*
1050 * Note that the ipl does not have to be raised at this point.
1051 * The only problem that could arise here is that if two different
1052 * interfaces shared any data. This is not the case.
1053 */
1054 bp = (struct bpf_if *)arg;
1055 for (d = bp->bif_dlist; d != 0; d = d->bd_next) {
1056 ++d->bd_rcount;
1057 slen = bpf_filter(d->bd_filter, pkt, pktlen, pktlen);
1058 if (slen != 0)
1059 catchpacket(d, pkt, pktlen, slen, bcopy);
1060 }
1061 }
1062
1063 /*
1064 * Copy data from an mbuf chain into a buffer. This code is derived
1065 * from m_copydata in sys/uipc_mbuf.c.
1066 */
1067 static void
1068 bpf_mcopy(src_arg, dst_arg, len)
1069 const void *src_arg;
1070 void *dst_arg;
1071 register size_t len;
1072 {
1073 register const struct mbuf *m;
1074 register u_int count;
1075 u_char *dst;
1076
1077 m = src_arg;
1078 dst = dst_arg;
1079 while (len > 0) {
1080 if (m == 0)
1081 panic("bpf_mcopy");
1082 count = min(m->m_len, len);
1083 bcopy(mtod(m, caddr_t), (caddr_t)dst, count);
1084 m = m->m_next;
1085 dst += count;
1086 len -= count;
1087 }
1088 }
1089
1090 /*
1091 * Incoming linkage from device drivers, when packet is in an mbuf chain.
1092 */
1093 void
1094 bpf_mtap(arg, m)
1095 caddr_t arg;
1096 struct mbuf *m;
1097 {
1098 struct bpf_if *bp = (struct bpf_if *)arg;
1099 struct bpf_d *d;
1100 size_t pktlen, slen;
1101 struct mbuf *m0;
1102
1103 pktlen = 0;
1104 for (m0 = m; m0 != 0; m0 = m0->m_next)
1105 pktlen += m0->m_len;
1106
1107 for (d = bp->bif_dlist; d != 0; d = d->bd_next) {
1108 ++d->bd_rcount;
1109 slen = bpf_filter(d->bd_filter, (u_char *)m, pktlen, 0);
1110 if (slen != 0)
1111 catchpacket(d, (u_char *)m, pktlen, slen, bpf_mcopy);
1112 }
1113 }
1114
1115 /*
1116 * Move the packet data from interface memory (pkt) into the
1117 * store buffer. Return 1 if it's time to wakeup a listener (buffer full),
1118 * otherwise 0. "copy" is the routine called to do the actual data
1119 * transfer. bcopy is passed in to copy contiguous chunks, while
1120 * bpf_mcopy is passed in to copy mbuf chains. In the latter case,
1121 * pkt is really an mbuf.
1122 */
1123 static void
1124 catchpacket(d, pkt, pktlen, snaplen, cpfn)
1125 register struct bpf_d *d;
1126 register u_char *pkt;
1127 register size_t pktlen, snaplen;
1128 register void (*cpfn) __P((const void *, void *, size_t));
1129 {
1130 register struct bpf_hdr *hp;
1131 register int totlen, curlen;
1132 register int hdrlen = d->bd_bif->bif_hdrlen;
1133 /*
1134 * Figure out how many bytes to move. If the packet is
1135 * greater or equal to the snapshot length, transfer that
1136 * much. Otherwise, transfer the whole packet (unless
1137 * we hit the buffer size limit).
1138 */
1139 totlen = hdrlen + min(snaplen, pktlen);
1140 if (totlen > d->bd_bufsize)
1141 totlen = d->bd_bufsize;
1142
1143 /*
1144 * Round up the end of the previous packet to the next longword.
1145 */
1146 curlen = BPF_WORDALIGN(d->bd_slen);
1147 if (curlen + totlen > d->bd_bufsize) {
1148 /*
1149 * This packet will overflow the storage buffer.
1150 * Rotate the buffers if we can, then wakeup any
1151 * pending reads.
1152 */
1153 if (d->bd_fbuf == 0) {
1154 /*
1155 * We haven't completed the previous read yet,
1156 * so drop the packet.
1157 */
1158 ++d->bd_dcount;
1159 return;
1160 }
1161 ROTATE_BUFFERS(d);
1162 bpf_wakeup(d);
1163 curlen = 0;
1164 }
1165 else if (d->bd_immediate)
1166 /*
1167 * Immediate mode is set. A packet arrived so any
1168 * reads should be woken up.
1169 */
1170 bpf_wakeup(d);
1171
1172 /*
1173 * Append the bpf header.
1174 */
1175 hp = (struct bpf_hdr *)(d->bd_sbuf + curlen);
1176 #if BSD >= 199103
1177 microtime(&hp->bh_tstamp);
1178 #elif defined(sun)
1179 uniqtime(&hp->bh_tstamp);
1180 #else
1181 hp->bh_tstamp = time;
1182 #endif
1183 hp->bh_datalen = pktlen;
1184 hp->bh_hdrlen = hdrlen;
1185 /*
1186 * Copy the packet data into the store buffer and update its length.
1187 */
1188 (*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen));
1189 d->bd_slen = curlen + totlen;
1190 }
1191
1192 /*
1193 * Initialize all nonzero fields of a descriptor.
1194 */
1195 static int
1196 bpf_allocbufs(d)
1197 register struct bpf_d *d;
1198 {
1199 d->bd_fbuf = (caddr_t)malloc(d->bd_bufsize, M_DEVBUF, M_WAITOK);
1200 if (d->bd_fbuf == 0)
1201 return (ENOBUFS);
1202
1203 d->bd_sbuf = (caddr_t)malloc(d->bd_bufsize, M_DEVBUF, M_WAITOK);
1204 if (d->bd_sbuf == 0) {
1205 free(d->bd_fbuf, M_DEVBUF);
1206 return (ENOBUFS);
1207 }
1208 d->bd_slen = 0;
1209 d->bd_hlen = 0;
1210 return (0);
1211 }
1212
1213 /*
1214 * Free buffers currently in use by a descriptor.
1215 * Called on close.
1216 */
1217 static void
1218 bpf_freed(d)
1219 register struct bpf_d *d;
1220 {
1221 /*
1222 * We don't need to lock out interrupts since this descriptor has
1223 * been detached from its interface and it yet hasn't been marked
1224 * free.
1225 */
1226 if (d->bd_sbuf != 0) {
1227 free(d->bd_sbuf, M_DEVBUF);
1228 if (d->bd_hbuf != 0)
1229 free(d->bd_hbuf, M_DEVBUF);
1230 if (d->bd_fbuf != 0)
1231 free(d->bd_fbuf, M_DEVBUF);
1232 }
1233 if (d->bd_filter)
1234 free((caddr_t)d->bd_filter, M_DEVBUF);
1235
1236 D_MARKFREE(d);
1237 }
1238
1239 /*
1240 * Attach an interface to bpf. driverp is a pointer to a (struct bpf_if *)
1241 * in the driver's softc; dlt is the link layer type; hdrlen is the fixed
1242 * size of the link header (variable length headers not yet supported).
1243 */
1244 void
1245 bpfattach(driverp, ifp, dlt, hdrlen)
1246 caddr_t *driverp;
1247 struct ifnet *ifp;
1248 u_int dlt, hdrlen;
1249 {
1250 struct bpf_if *bp;
1251 int i;
1252 #if BSD < 199103
1253 static struct bpf_if bpf_ifs[NBPFILTER];
1254 static int bpfifno;
1255
1256 bp = (bpfifno < NBPFILTER) ? &bpf_ifs[bpfifno++] : 0;
1257 #else
1258 bp = (struct bpf_if *)malloc(sizeof(*bp), M_DEVBUF, M_DONTWAIT);
1259 #endif
1260 if (bp == 0)
1261 panic("bpfattach");
1262
1263 bp->bif_dlist = 0;
1264 bp->bif_driverp = (struct bpf_if **)driverp;
1265 bp->bif_ifp = ifp;
1266 bp->bif_dlt = dlt;
1267
1268 bp->bif_next = bpf_iflist;
1269 bpf_iflist = bp;
1270
1271 *bp->bif_driverp = 0;
1272
1273 /*
1274 * Compute the length of the bpf header. This is not necessarily
1275 * equal to SIZEOF_BPF_HDR because we want to insert spacing such
1276 * that the network layer header begins on a longword boundary (for
1277 * performance reasons and to alleviate alignment restrictions).
1278 */
1279 bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen;
1280
1281 /*
1282 * Mark all the descriptors free if this hasn't been done.
1283 */
1284 if (!D_ISFREE(&bpf_dtab[0]))
1285 for (i = 0; i < NBPFILTER; ++i)
1286 D_MARKFREE(&bpf_dtab[i]);
1287
1288 #if 0
1289 printf("bpf: %s%d attached\n", ifp->if_name, ifp->if_unit);
1290 #endif
1291 }
1292
1293 #if BSD >= 199103
1294 /* XXX This routine belongs in net/if.c. */
1295 /*
1296 * Set/clear promiscuous mode on interface ifp based on the truth value
1297 * of pswitch. The calls are reference counted so that only the first
1298 * "on" request actually has an effect, as does the final "off" request.
1299 * Results are undefined if the "off" and "on" requests are not matched.
1300 */
1301 int
1302 ifpromisc(ifp, pswitch)
1303 struct ifnet *ifp;
1304 int pswitch;
1305 {
1306 struct ifreq ifr;
1307
1308 if (pswitch) {
1309 /*
1310 * If the device is not configured up, we cannot put it in
1311 * promiscuous mode.
1312 */
1313 if ((ifp->if_flags & IFF_UP) == 0)
1314 return (ENETDOWN);
1315 if (ifp->if_pcount++ != 0)
1316 return (0);
1317 ifp->if_flags |= IFF_PROMISC;
1318 } else {
1319 if (--ifp->if_pcount > 0)
1320 return (0);
1321 ifp->if_flags &= ~IFF_PROMISC;
1322 /*
1323 * If the device is not configured up, we should not need to
1324 * turn off promiscuous mode (device should have turned it
1325 * off when interface went down; and will look at IFF_PROMISC
1326 * again next time interface comes up).
1327 */
1328 if ((ifp->if_flags & IFF_UP) == 0)
1329 return (0);
1330 }
1331 ifr.ifr_flags = ifp->if_flags;
1332 return ((*ifp->if_ioctl)(ifp, SIOCSIFFLAGS, (caddr_t)&ifr));
1333 }
1334 #endif
1335
1336 #if BSD < 199103
1337 /*
1338 * Allocate some memory for bpf. This is temporary SunOS support, and
1339 * is admittedly a hack.
1340 * If resources unavaiable, return 0.
1341 */
1342 static caddr_t
1343 bpf_alloc(size, canwait)
1344 register int size;
1345 register int canwait;
1346 {
1347 register struct mbuf *m;
1348
1349 if ((unsigned)size > (MCLBYTES-8))
1350 return 0;
1351
1352 MGET(m, canwait, MT_DATA);
1353 if (m == 0)
1354 return 0;
1355 if ((unsigned)size > (MLEN-8)) {
1356 MCLGET(m);
1357 if (m->m_len != MCLBYTES) {
1358 m_freem(m);
1359 return 0;
1360 }
1361 }
1362 *mtod(m, struct mbuf **) = m;
1363 return mtod(m, caddr_t) + 8;
1364 }
1365 #endif
1366