bpf.c revision 1.5.4.5 1 /*-
2 * Copyright (c) 1990-1991 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from the Stanford/CMU enet packet filter,
6 * (net/enet.c) distributed as part of 4.3BSD, and code contributed
7 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
8 * Berkeley Laboratory.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * from: @(#)bpf.c 7.5 (Berkeley) 7/15/91
39 * $Id: bpf.c,v 1.5.4.5 1993/12/03 03:35:05 mycroft Exp $
40 */
41
42 #include "bpfilter.h"
43
44 #if NBPFILTER > 0
45
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/mbuf.h>
49 #include <sys/buf.h>
50 #include <sys/dir.h>
51 #include <sys/time.h>
52 #include <sys/proc.h>
53 #include <sys/user.h>
54 #include <sys/ioctl.h>
55 #include <sys/select.h>
56
57 #include <sys/file.h>
58 #if defined(sparc) && BSD < 199103
59 #include <sys/stream.h>
60 #endif
61 #include <sys/tty.h>
62 #include <sys/uio.h>
63
64 #include <sys/protosw.h>
65 #include <sys/socket.h>
66 #include <net/if.h>
67
68 #include <net/bpf.h>
69 #include <net/bpfdesc.h>
70
71 #include <sys/errno.h>
72
73 #include <netinet/in.h>
74 #include <netinet/if_ether.h>
75 #include <sys/kernel.h>
76
77 #include <machine/cpu.h>
78
79 /*
80 * Older BSDs don't have kernel malloc.
81 */
82 #if BSD < 199103
83 extern bcopy();
84 static caddr_t bpf_alloc();
85 #include <net/bpf_compat.h>
86 #define BPF_BUFSIZE (MCLBYTES-8)
87 #define UIOMOVE(cp, len, code, uio) uiomove(cp, len, code, uio)
88 #else
89 #define BPF_BUFSIZE 4096
90 #define UIOMOVE(cp, len, code, uio) uiomove(cp, len, uio)
91 #endif
92
93 #define PRINET 26 /* interruptible */
94
95 /*
96 * The default read buffer size is patchable.
97 */
98 int bpf_bufsize = BPF_BUFSIZE;
99
100 /*
101 * bpf_iflist is the list of interfaces; each corresponds to an ifnet
102 * bpf_dtab holds the descriptors, indexed by minor device #
103 */
104 struct bpf_if *bpf_iflist;
105 struct bpf_d bpf_dtab[NBPFILTER];
106
107 static void bpf_ifname();
108 static void catchpacket();
109 static void bpf_freed();
110 static int bpf_setif();
111 static int bpf_initd();
112 static int bpf_allocbufs();
113
114 void
115 bpfilterattach(n)
116 int n;
117 {
118 }
119
120 static int
121 bpf_movein(uio, linktype, mp, sockp)
122 register struct uio *uio;
123 int linktype;
124 register struct mbuf **mp;
125 register struct sockaddr *sockp;
126 {
127 struct mbuf *m;
128 int error;
129 int len;
130 int hlen;
131
132 /*
133 * Build a sockaddr based on the data link layer type.
134 * We do this at this level because the ethernet header
135 * is copied directly into the data field of the sockaddr.
136 * In the case of SLIP, there is no header and the packet
137 * is forwarded as is.
138 * Also, we are careful to leave room at the front of the mbuf
139 * for the link level header.
140 */
141 switch (linktype) {
142
143 case DLT_SLIP:
144 sockp->sa_family = AF_INET;
145 hlen = 0;
146 break;
147
148 case DLT_EN10MB:
149 sockp->sa_family = AF_UNSPEC;
150 /* XXX Would MAXLINKHDR be better? */
151 hlen = sizeof(struct ether_header);
152 break;
153
154 case DLT_FDDI:
155 sockp->sa_family = AF_UNSPEC;
156 /* XXX 4(FORMAC)+6(dst)+6(src)+3(LLC)+5(SNAP) */
157 hlen = 24;
158 break;
159
160 case DLT_NULL:
161 sockp->sa_family = AF_UNSPEC;
162 hlen = 0;
163 break;
164
165 default:
166 return (EIO);
167 }
168
169 len = uio->uio_resid;
170 if ((unsigned)len > MCLBYTES)
171 return (EIO);
172
173 MGET(m, M_WAIT, MT_DATA);
174 if (m == 0)
175 return (ENOBUFS);
176 if (len > MLEN) {
177 #if BSD >= 199103
178 MCLGET(m, M_WAIT);
179 if ((m->m_flags & M_EXT) == 0) {
180 #else
181 MCLGET(m);
182 if (m->m_len != MCLBYTES) {
183 #endif
184 error = ENOBUFS;
185 goto bad;
186 }
187 }
188 m->m_len = len;
189 *mp = m;
190 /*
191 * Make room for link header.
192 */
193 if (hlen != 0) {
194 m->m_len -= hlen;
195 #if BSD >= 199103
196 m->m_data += hlen; /* XXX */
197 #else
198 m->m_off += hlen;
199 #endif
200 error = UIOMOVE((caddr_t)sockp->sa_data, hlen, UIO_WRITE, uio);
201 if (error)
202 goto bad;
203
204 if (linktype == DLT_EN10MB) {
205 /*
206 * ether_output() routine does a htons() on the type
207 * field, so here we make sure it's in host order.
208 */
209 struct ether_header *eh;
210 eh = (struct ether_header *)sockp->sa_data;
211 eh->ether_type = ntohs(eh->ether_type);
212 }
213 }
214 error = UIOMOVE(mtod(m, caddr_t), len - hlen, UIO_WRITE, uio);
215 if (!error)
216 return (0);
217 bad:
218 m_freem(m);
219 return (error);
220 }
221
222 /*
223 * Attach file to the bpf interface, i.e. make d listen on bp.
224 * Must be called at splimp.
225 */
226 static void
227 bpf_attachd(d, bp)
228 struct bpf_d *d;
229 struct bpf_if *bp;
230 {
231 /*
232 * Point d at bp, and add d to the interface's list of listeners.
233 * Finally, point the driver's bpf cookie at the interface so
234 * it will divert packets to bpf.
235 */
236 d->bd_bif = bp;
237 d->bd_next = bp->bif_dlist;
238 bp->bif_dlist = d;
239
240 *bp->bif_driverp = bp;
241 }
242
243 /*
244 * Detach a file from its interface.
245 */
246 static void
247 bpf_detachd(d)
248 struct bpf_d *d;
249 {
250 struct bpf_d **p;
251 struct bpf_if *bp;
252
253 bp = d->bd_bif;
254 /*
255 * Check if this descriptor had requested promiscuous mode.
256 * If so, turn it off.
257 */
258 if (d->bd_promisc) {
259 d->bd_promisc = 0;
260 if (ifpromisc(bp->bif_ifp, 0))
261 /*
262 * Something is really wrong if we were able to put
263 * the driver into promiscuous mode, but can't
264 * take it out.
265 */
266 panic("bpf: ifpromisc failed");
267 }
268 /* Remove d from the interface's descriptor list. */
269 p = &bp->bif_dlist;
270 while (*p != d) {
271 p = &(*p)->bd_next;
272 if (*p == 0)
273 panic("bpf_detachd: descriptor not in list");
274 }
275 *p = (*p)->bd_next;
276 if (bp->bif_dlist == 0)
277 /*
278 * Let the driver know that there are no more listeners.
279 */
280 *d->bd_bif->bif_driverp = 0;
281 d->bd_bif = 0;
282 }
283
284
285 /*
286 * Mark a descriptor free by making it point to itself.
287 * This is probably cheaper than marking with a constant since
288 * the address should be in a register anyway.
289 */
290 #define D_ISFREE(d) ((d) == (d)->bd_next)
291 #define D_MARKFREE(d) ((d)->bd_next = (d))
292 #define D_MARKUSED(d) ((d)->bd_next = 0)
293
294 /*
295 * Open ethernet device. Returns ENXIO for illegal minor device number,
296 * EBUSY if file is open by another process.
297 */
298 /* ARGSUSED */
299 int
300 bpfopen(dev, flag)
301 dev_t dev;
302 int flag;
303 {
304 register struct bpf_d *d;
305
306 if (minor(dev) >= NBPFILTER)
307 return (ENXIO);
308 /*
309 * Each minor can be opened by only one process. If the requested
310 * minor is in use, return EBUSY.
311 */
312 d = &bpf_dtab[minor(dev)];
313 if (!D_ISFREE(d))
314 return (EBUSY);
315
316 /* Mark "free" and do most initialization. */
317 bzero((char *)d, sizeof(*d));
318 d->bd_bufsize = bpf_bufsize;
319
320 return (0);
321 }
322
323 /*
324 * Close the descriptor by detaching it from its interface,
325 * deallocating its buffers, and marking it free.
326 */
327 /* ARGSUSED */
328 int
329 bpfclose(dev, flag)
330 dev_t dev;
331 int flag;
332 {
333 register struct bpf_d *d = &bpf_dtab[minor(dev)];
334 register int s;
335
336 s = splimp();
337 if (d->bd_bif)
338 bpf_detachd(d);
339 splx(s);
340 bpf_freed(d);
341
342 return (0);
343 }
344
345 /*
346 * Support for SunOS, which does not have tsleep.
347 */
348 #if BSD < 199103
349 static
350 bpf_timeout(arg)
351 caddr_t arg;
352 {
353 struct bpf_d *d = (struct bpf_d *)arg;
354 d->bd_timedout = 1;
355 wakeup(arg);
356 }
357
358 #define BPF_SLEEP(chan, pri, s, t) bpf_sleep((struct bpf_d *)chan)
359
360 int
361 bpf_sleep(d)
362 register struct bpf_d *d;
363 {
364 register int rto = d->bd_rtout;
365 register int st;
366
367 if (rto != 0) {
368 d->bd_timedout = 0;
369 timeout(bpf_timeout, (caddr_t)d, rto);
370 }
371 st = tsleep((caddr_t)d, PRINET|PCATCH, "bpf_sleep", 0);
372 if (rto != 0) {
373 if (d->bd_timedout == 0)
374 untimeout(bpf_timeout, (caddr_t)d);
375 else if (st == 0)
376 return EWOULDBLOCK;
377 }
378 return (st != 0) ? EINTR : 0;
379 }
380 #else
381 #define BPF_SLEEP tsleep
382 #endif
383
384 /*
385 * Rotate the packet buffers in descriptor d. Move the store buffer
386 * into the hold slot, and the free buffer into the store slot.
387 * Zero the length of the new store buffer.
388 */
389 #define ROTATE_BUFFERS(d) \
390 (d)->bd_hbuf = (d)->bd_sbuf; \
391 (d)->bd_hlen = (d)->bd_slen; \
392 (d)->bd_sbuf = (d)->bd_fbuf; \
393 (d)->bd_slen = 0; \
394 (d)->bd_fbuf = 0;
395 /*
396 * bpfread - read next chunk of packets from buffers
397 */
398 int
399 bpfread(dev, uio)
400 dev_t dev;
401 register struct uio *uio;
402 {
403 register struct bpf_d *d = &bpf_dtab[minor(dev)];
404 int error;
405 int s;
406
407 /*
408 * Restrict application to use a buffer the same size as
409 * as kernel buffers.
410 */
411 if (uio->uio_resid != d->bd_bufsize)
412 return (EINVAL);
413
414 s = splimp();
415 /*
416 * If the hold buffer is empty, then do a timed sleep, which
417 * ends when the timeout expires or when enough packets
418 * have arrived to fill the store buffer.
419 */
420 while (d->bd_hbuf == 0) {
421 if (d->bd_immediate && d->bd_slen != 0) {
422 /*
423 * A packet(s) either arrived since the previous
424 * read or arrived while we were asleep.
425 * Rotate the buffers and return what's here.
426 */
427 ROTATE_BUFFERS(d);
428 break;
429 }
430 error = BPF_SLEEP((caddr_t)d, PRINET|PCATCH, "bpf",
431 d->bd_rtout);
432 if (error == EINTR || error == ERESTART) {
433 splx(s);
434 return (error);
435 }
436 if (error == EWOULDBLOCK) {
437 /*
438 * On a timeout, return what's in the buffer,
439 * which may be nothing. If there is something
440 * in the store buffer, we can rotate the buffers.
441 */
442 if (d->bd_hbuf)
443 /*
444 * We filled up the buffer in between
445 * getting the timeout and arriving
446 * here, so we don't need to rotate.
447 */
448 break;
449
450 if (d->bd_slen == 0) {
451 splx(s);
452 return (0);
453 }
454 ROTATE_BUFFERS(d);
455 break;
456 }
457 }
458 /*
459 * At this point, we know we have something in the hold slot.
460 */
461 splx(s);
462
463 /*
464 * Move data from hold buffer into user space.
465 * We know the entire buffer is transferred since
466 * we checked above that the read buffer is bpf_bufsize bytes.
467 */
468 error = UIOMOVE(d->bd_hbuf, d->bd_hlen, UIO_READ, uio);
469
470 s = splimp();
471 d->bd_fbuf = d->bd_hbuf;
472 d->bd_hbuf = 0;
473 d->bd_hlen = 0;
474 splx(s);
475
476 return (error);
477 }
478
479
480 /*
481 * If there are processes sleeping on this descriptor, wake them up.
482 */
483 static inline void
484 bpf_wakeup(d)
485 register struct bpf_d *d;
486 {
487 wakeup((caddr_t)d);
488 #if (BSD > 199103) || defined(__NetBSD__)
489 selwakeup(&d->bd_sel);
490 /* XXX */
491 d->bd_sel.si_pid = 0;
492 #else
493 if (d->bd_selproc) {
494 selwakeup(d->bd_selproc, (int)d->bd_selcoll);
495 d->bd_selcoll = 0;
496 d->bd_selproc = 0;
497 }
498 #endif
499 }
500
501 int
502 bpfwrite(dev, uio)
503 dev_t dev;
504 struct uio *uio;
505 {
506 register struct bpf_d *d = &bpf_dtab[minor(dev)];
507 struct ifnet *ifp;
508 struct mbuf *m;
509 int error, s;
510 static struct sockaddr dst;
511
512 if (d->bd_bif == 0)
513 return (ENXIO);
514
515 ifp = d->bd_bif->bif_ifp;
516
517 if (uio->uio_resid == 0)
518 return (0);
519 if (uio->uio_resid > ifp->if_mtu)
520 return (EMSGSIZE);
521
522 error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, &m, &dst);
523 if (error)
524 return (error);
525
526 s = splnet();
527 #if BSD >= 199103
528 error = (*ifp->if_output)(ifp, m, &dst, (struct rtentry *)0);
529 #else
530 error = (*ifp->if_output)(ifp, m, &dst);
531 #endif
532 splx(s);
533 /*
534 * The driver frees the mbuf.
535 */
536 return (error);
537 }
538
539 /*
540 * Reset a descriptor by flushing its packet buffer and clearing the
541 * receive and drop counts. Should be called at splimp.
542 */
543 static void
544 reset_d(d)
545 struct bpf_d *d;
546 {
547 if (d->bd_hbuf) {
548 /* Free the hold buffer. */
549 d->bd_fbuf = d->bd_hbuf;
550 d->bd_hbuf = 0;
551 }
552 d->bd_slen = 0;
553 d->bd_hlen = 0;
554 d->bd_rcount = 0;
555 d->bd_dcount = 0;
556 }
557
558 /*
559 * FIONREAD Check for read packet available.
560 * SIOCGIFADDR Get interface address - convenient hook to driver.
561 * BIOCGBLEN Get buffer len [for read()].
562 * BIOCSETF Set ethernet read filter.
563 * BIOCFLUSH Flush read packet buffer.
564 * BIOCPROMISC Put interface into promiscuous mode.
565 * BIOCGDLT Get link layer type.
566 * BIOCGETIF Get interface name.
567 * BIOCSETIF Set interface.
568 * BIOCSRTIMEOUT Set read timeout.
569 * BIOCGRTIMEOUT Get read timeout.
570 * BIOCGSTATS Get packet stats.
571 * BIOCIMMEDIATE Set immediate mode.
572 * BIOCVERSION Get filter language version.
573 */
574 /* ARGSUSED */
575 int
576 bpfioctl(dev, cmd, addr, flag)
577 dev_t dev;
578 int cmd;
579 caddr_t addr;
580 int flag;
581 {
582 register struct bpf_d *d = &bpf_dtab[minor(dev)];
583 int s, error = 0;
584
585 switch (cmd) {
586
587 default:
588 error = EINVAL;
589 break;
590
591 /*
592 * Check for read packet available.
593 */
594 case FIONREAD:
595 {
596 int n;
597
598 s = splimp();
599 n = d->bd_slen;
600 if (d->bd_hbuf)
601 n += d->bd_hlen;
602 splx(s);
603
604 *(int *)addr = n;
605 break;
606 }
607
608 case SIOCGIFADDR:
609 {
610 struct ifnet *ifp;
611
612 if (d->bd_bif == 0)
613 error = EINVAL;
614 else {
615 ifp = d->bd_bif->bif_ifp;
616 error = (*ifp->if_ioctl)(ifp, cmd, addr);
617 }
618 break;
619 }
620
621 /*
622 * Get buffer len [for read()].
623 */
624 case BIOCGBLEN:
625 *(u_int *)addr = d->bd_bufsize;
626 break;
627
628 /*
629 * Set buffer length.
630 */
631 case BIOCSBLEN:
632 #if BSD < 199103
633 error = EINVAL;
634 #else
635 if (d->bd_bif != 0)
636 error = EINVAL;
637 else {
638 register u_int size = *(u_int *)addr;
639
640 if (size > BPF_MAXBUFSIZE)
641 *(u_int *)addr = size = BPF_MAXBUFSIZE;
642 else if (size < BPF_MINBUFSIZE)
643 *(u_int *)addr = size = BPF_MINBUFSIZE;
644 d->bd_bufsize = size;
645 }
646 #endif
647 break;
648
649 /*
650 * Set link layer read filter.
651 */
652 case BIOCSETF:
653 error = bpf_setf(d, (struct bpf_program *)addr);
654 break;
655
656 /*
657 * Flush read packet buffer.
658 */
659 case BIOCFLUSH:
660 s = splimp();
661 reset_d(d);
662 splx(s);
663 break;
664
665 /*
666 * Put interface into promiscuous mode.
667 */
668 case BIOCPROMISC:
669 if (d->bd_bif == 0) {
670 /*
671 * No interface attached yet.
672 */
673 error = EINVAL;
674 break;
675 }
676 s = splimp();
677 if (d->bd_promisc == 0) {
678 error = ifpromisc(d->bd_bif->bif_ifp, 1);
679 if (error == 0)
680 d->bd_promisc = 1;
681 }
682 splx(s);
683 break;
684
685 /*
686 * Get device parameters.
687 */
688 case BIOCGDLT:
689 if (d->bd_bif == 0)
690 error = EINVAL;
691 else
692 *(u_int *)addr = d->bd_bif->bif_dlt;
693 break;
694
695 /*
696 * Set interface name.
697 */
698 case BIOCGETIF:
699 if (d->bd_bif == 0)
700 error = EINVAL;
701 else
702 bpf_ifname(d->bd_bif->bif_ifp, (struct ifreq *)addr);
703 break;
704
705 /*
706 * Set interface.
707 */
708 case BIOCSETIF:
709 error = bpf_setif(d, (struct ifreq *)addr);
710 break;
711
712 /*
713 * Set read timeout.
714 */
715 case BIOCSRTIMEOUT:
716 {
717 struct timeval *tv = (struct timeval *)addr;
718 u_long msec;
719
720 /* Compute number of milliseconds. */
721 msec = tv->tv_sec * 1000 + tv->tv_usec / 1000;
722 /* Scale milliseconds to ticks. Assume hard
723 clock has millisecond or greater resolution
724 (i.e. tick >= 1000). For 10ms hardclock,
725 tick/1000 = 10, so rtout<-msec/10. */
726 d->bd_rtout = msec / (tick / 1000);
727 break;
728 }
729
730 /*
731 * Get read timeout.
732 */
733 case BIOCGRTIMEOUT:
734 {
735 struct timeval *tv = (struct timeval *)addr;
736 u_long msec = d->bd_rtout;
737
738 msec *= tick / 1000;
739 tv->tv_sec = msec / 1000;
740 tv->tv_usec = msec % 1000;
741 break;
742 }
743
744 /*
745 * Get packet stats.
746 */
747 case BIOCGSTATS:
748 {
749 struct bpf_stat *bs = (struct bpf_stat *)addr;
750
751 bs->bs_recv = d->bd_rcount;
752 bs->bs_drop = d->bd_dcount;
753 break;
754 }
755
756 /*
757 * Set immediate mode.
758 */
759 case BIOCIMMEDIATE:
760 d->bd_immediate = *(u_int *)addr;
761 break;
762
763 case BIOCVERSION:
764 {
765 struct bpf_version *bv = (struct bpf_version *)addr;
766
767 bv->bv_major = BPF_MAJOR_VERSION;
768 bv->bv_minor = BPF_MINOR_VERSION;
769 break;
770 }
771 }
772 return (error);
773 }
774
775 /*
776 * Set d's packet filter program to fp. If this file already has a filter,
777 * free it and replace it. Returns EINVAL for bogus requests.
778 */
779 int
780 bpf_setf(d, fp)
781 struct bpf_d *d;
782 struct bpf_program *fp;
783 {
784 struct bpf_insn *fcode, *old;
785 u_int flen, size;
786 int s;
787
788 old = d->bd_filter;
789 if (fp->bf_insns == 0) {
790 if (fp->bf_len != 0)
791 return (EINVAL);
792 s = splimp();
793 d->bd_filter = 0;
794 reset_d(d);
795 splx(s);
796 if (old != 0)
797 free((caddr_t)old, M_DEVBUF);
798 return (0);
799 }
800 flen = fp->bf_len;
801 if (flen > BPF_MAXINSNS)
802 return (EINVAL);
803
804 size = flen * sizeof(*fp->bf_insns);
805 fcode = (struct bpf_insn *)malloc(size, M_DEVBUF, M_WAITOK);
806 if (copyin((caddr_t)fp->bf_insns, (caddr_t)fcode, size) == 0 &&
807 bpf_validate(fcode, (int)flen)) {
808 s = splimp();
809 d->bd_filter = fcode;
810 reset_d(d);
811 splx(s);
812 if (old != 0)
813 free((caddr_t)old, M_DEVBUF);
814
815 return (0);
816 }
817 free((caddr_t)fcode, M_DEVBUF);
818 return (EINVAL);
819 }
820
821 /*
822 * Detach a file from its current interface (if attached at all) and attach
823 * to the interface indicated by the name stored in ifr.
824 * Return an errno or 0.
825 */
826 static int
827 bpf_setif(d, ifr)
828 struct bpf_d *d;
829 struct ifreq *ifr;
830 {
831 struct bpf_if *bp;
832 char *cp;
833 int unit, s, error;
834
835 /*
836 * Separate string into name part and unit number. Put a null
837 * byte at the end of the name part, and compute the number.
838 * If the a unit number is unspecified, the default is 0,
839 * as initialized above. XXX This should be common code.
840 */
841 unit = 0;
842 cp = ifr->ifr_name;
843 cp[sizeof(ifr->ifr_name) - 1] = '\0';
844 while (*cp++) {
845 if (*cp >= '0' && *cp <= '9') {
846 unit = *cp - '0';
847 *cp++ = '\0';
848 while (*cp)
849 unit = 10 * unit + *cp++ - '0';
850 break;
851 }
852 }
853 /*
854 * Look through attached interfaces for the named one.
855 */
856 for (bp = bpf_iflist; bp != 0; bp = bp->bif_next) {
857 struct ifnet *ifp = bp->bif_ifp;
858
859 if (ifp == 0 || unit != ifp->if_unit
860 || strcmp(ifp->if_name, ifr->ifr_name) != 0)
861 continue;
862 /*
863 * We found the requested interface.
864 * If it's not up, return an error.
865 * Allocate the packet buffers if we need to.
866 * If we're already attached to requested interface,
867 * just flush the buffer.
868 */
869 if ((ifp->if_flags & IFF_UP) == 0)
870 return (ENETDOWN);
871
872 if (d->bd_sbuf == 0) {
873 error = bpf_allocbufs(d);
874 if (error != 0)
875 return (error);
876 }
877 s = splimp();
878 if (bp != d->bd_bif) {
879 if (d->bd_bif)
880 /*
881 * Detach if attached to something else.
882 */
883 bpf_detachd(d);
884
885 bpf_attachd(d, bp);
886 }
887 reset_d(d);
888 splx(s);
889 return (0);
890 }
891 /* Not found. */
892 return (ENXIO);
893 }
894
895 /*
896 * Convert an interface name plus unit number of an ifp to a single
897 * name which is returned in the ifr.
898 */
899 static void
900 bpf_ifname(ifp, ifr)
901 struct ifnet *ifp;
902 struct ifreq *ifr;
903 {
904 char *s = ifp->if_name;
905 char *d = ifr->ifr_name;
906
907 while (*d++ = *s++)
908 continue;
909 /* XXX Assume that unit number is less than 10. */
910 *d++ = ifp->if_unit + '0';
911 *d = '\0';
912 }
913
914 /*
915 * The new select interface passes down the proc pointer; the old select
916 * stubs had to grab it out of the user struct. This glue allows either case.
917 */
918 #if BSD >= 199103
919 #define bpf_select bpfselect
920 #else
921 int
922 bpfselect(dev, rw)
923 register dev_t dev;
924 int rw;
925 {
926 return (bpf_select(dev, rw, u.u_procp));
927 }
928 #endif
929
930 /*
931 * Support for select() system call
932 * Inspired by the code in tty.c for the same purpose.
933 *
934 * Return true iff the specific operation will not block indefinitely.
935 * Otherwise, return false but make a note that a selwakeup() must be done.
936 */
937 int
938 bpf_select(dev, rw, p)
939 register dev_t dev;
940 int rw;
941 struct proc *p;
942 {
943 register struct bpf_d *d;
944 register int s;
945
946 if (rw != FREAD)
947 return (0);
948 /*
949 * An imitation of the FIONREAD ioctl code.
950 */
951 d = &bpf_dtab[minor(dev)];
952
953 s = splimp();
954 if (d->bd_hlen != 0 || (d->bd_immediate && d->bd_slen != 0)) {
955 /*
956 * There is data waiting.
957 */
958 splx(s);
959 return (1);
960 }
961 #if defined(__NetBSD__)
962 selrecord(p, &d->bd_sel);
963 #else
964 /*
965 * No data ready. If there's already a select() waiting on this
966 * minor device then this is a collision. This shouldn't happen
967 * because minors really should not be shared, but if a process
968 * forks while one of these is open, it is possible that both
969 * processes could select on the same descriptor.
970 */
971 if (d->bd_selproc && d->bd_selproc->p_wchan == (caddr_t)&selwait)
972 d->bd_selcoll = 1;
973 else
974 d->bd_selproc = p;
975 #endif
976 splx(s);
977 return (0);
978 }
979
980 /*
981 * Incoming linkage from device drivers. Process the packet pkt, of length
982 * pktlen, which is stored in a contiguous buffer. The packet is parsed
983 * by each process' filter, and if accepted, stashed into the corresponding
984 * buffer.
985 */
986 void
987 bpf_tap(arg, pkt, pktlen)
988 caddr_t arg;
989 register u_char *pkt;
990 register u_int pktlen;
991 {
992 struct bpf_if *bp;
993 register struct bpf_d *d;
994 register u_int slen;
995 /*
996 * Note that the ipl does not have to be raised at this point.
997 * The only problem that could arise here is that if two different
998 * interfaces shared any data. This is not the case.
999 */
1000 bp = (struct bpf_if *)arg;
1001 for (d = bp->bif_dlist; d != 0; d = d->bd_next) {
1002 ++d->bd_rcount;
1003 slen = bpf_filter(d->bd_filter, pkt, pktlen, pktlen);
1004 if (slen != 0)
1005 catchpacket(d, pkt, pktlen, slen, bcopy);
1006 }
1007 }
1008
1009 /*
1010 * Copy data from an mbuf chain into a buffer. This code is derived
1011 * from m_copydata in sys/uipc_mbuf.c.
1012 */
1013 static void
1014 bpf_mcopy(src, dst, len)
1015 u_char *src;
1016 u_char *dst;
1017 register int len;
1018 {
1019 register struct mbuf *m = (struct mbuf *)src;
1020 register unsigned count;
1021
1022 while (len > 0) {
1023 if (m == 0)
1024 panic("bpf_mcopy");
1025 count = MIN(m->m_len, len);
1026 bcopy(mtod(m, caddr_t), (caddr_t)dst, count);
1027 m = m->m_next;
1028 dst += count;
1029 len -= count;
1030 }
1031 }
1032
1033 /*
1034 * Incoming linkage from device drivers, when packet is in an mbuf chain.
1035 */
1036 void
1037 bpf_mtap(arg, m)
1038 caddr_t arg;
1039 struct mbuf *m;
1040 {
1041 struct bpf_if *bp = (struct bpf_if *)arg;
1042 struct bpf_d *d;
1043 u_int pktlen, slen;
1044 struct mbuf *m0;
1045
1046 pktlen = 0;
1047 for (m0 = m; m0 != 0; m0 = m0->m_next)
1048 pktlen += m0->m_len;
1049
1050 for (d = bp->bif_dlist; d != 0; d = d->bd_next) {
1051 ++d->bd_rcount;
1052 slen = bpf_filter(d->bd_filter, (u_char *)m, pktlen, 0);
1053 if (slen != 0)
1054 catchpacket(d, (u_char *)m, pktlen, slen, bpf_mcopy);
1055 }
1056 }
1057
1058 /*
1059 * Move the packet data from interface memory (pkt) into the
1060 * store buffer. Return 1 if it's time to wakeup a listener (buffer full),
1061 * otherwise 0. "copy" is the routine called to do the actual data
1062 * transfer. bcopy is passed in to copy contiguous chunks, while
1063 * bpf_mcopy is passed in to copy mbuf chains. In the latter case,
1064 * pkt is really an mbuf.
1065 */
1066 static void
1067 catchpacket(d, pkt, pktlen, snaplen, cpfn)
1068 register struct bpf_d *d;
1069 register u_char *pkt;
1070 register u_int pktlen, snaplen;
1071 register void (*cpfn)();
1072 {
1073 register struct bpf_hdr *hp;
1074 register int totlen, curlen;
1075 register int hdrlen = d->bd_bif->bif_hdrlen;
1076 /*
1077 * Figure out how many bytes to move. If the packet is
1078 * greater or equal to the snapshot length, transfer that
1079 * much. Otherwise, transfer the whole packet (unless
1080 * we hit the buffer size limit).
1081 */
1082 totlen = hdrlen + MIN(snaplen, pktlen);
1083 if (totlen > d->bd_bufsize)
1084 totlen = d->bd_bufsize;
1085
1086 /*
1087 * Round up the end of the previous packet to the next longword.
1088 */
1089 curlen = BPF_WORDALIGN(d->bd_slen);
1090 if (curlen + totlen > d->bd_bufsize) {
1091 /*
1092 * This packet will overflow the storage buffer.
1093 * Rotate the buffers if we can, then wakeup any
1094 * pending reads.
1095 */
1096 if (d->bd_fbuf == 0) {
1097 /*
1098 * We haven't completed the previous read yet,
1099 * so drop the packet.
1100 */
1101 ++d->bd_dcount;
1102 return;
1103 }
1104 ROTATE_BUFFERS(d);
1105 bpf_wakeup(d);
1106 curlen = 0;
1107 }
1108 else if (d->bd_immediate)
1109 /*
1110 * Immediate mode is set. A packet arrived so any
1111 * reads should be woken up.
1112 */
1113 bpf_wakeup(d);
1114
1115 /*
1116 * Append the bpf header.
1117 */
1118 hp = (struct bpf_hdr *)(d->bd_sbuf + curlen);
1119 #if BSD >= 199103
1120 microtime(&hp->bh_tstamp);
1121 #elif defined(sun)
1122 uniqtime(&hp->bh_tstamp);
1123 #else
1124 hp->bh_tstamp = time;
1125 #endif
1126 hp->bh_datalen = pktlen;
1127 hp->bh_hdrlen = hdrlen;
1128 /*
1129 * Copy the packet data into the store buffer and update its length.
1130 */
1131 (*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen));
1132 d->bd_slen = curlen + totlen;
1133 }
1134
1135 /*
1136 * Initialize all nonzero fields of a descriptor.
1137 */
1138 static int
1139 bpf_allocbufs(d)
1140 register struct bpf_d *d;
1141 {
1142 d->bd_fbuf = (caddr_t)malloc(d->bd_bufsize, M_DEVBUF, M_WAITOK);
1143 if (d->bd_fbuf == 0)
1144 return (ENOBUFS);
1145
1146 d->bd_sbuf = (caddr_t)malloc(d->bd_bufsize, M_DEVBUF, M_WAITOK);
1147 if (d->bd_sbuf == 0) {
1148 free(d->bd_fbuf, M_DEVBUF);
1149 return (ENOBUFS);
1150 }
1151 d->bd_slen = 0;
1152 d->bd_hlen = 0;
1153 return (0);
1154 }
1155
1156 /*
1157 * Free buffers currently in use by a descriptor.
1158 * Called on close.
1159 */
1160 static void
1161 bpf_freed(d)
1162 register struct bpf_d *d;
1163 {
1164 /*
1165 * We don't need to lock out interrupts since this descriptor has
1166 * been detached from its interface and it yet hasn't been marked
1167 * free.
1168 */
1169 if (d->bd_sbuf != 0) {
1170 free(d->bd_sbuf, M_DEVBUF);
1171 if (d->bd_hbuf != 0)
1172 free(d->bd_hbuf, M_DEVBUF);
1173 if (d->bd_fbuf != 0)
1174 free(d->bd_fbuf, M_DEVBUF);
1175 }
1176 if (d->bd_filter)
1177 free((caddr_t)d->bd_filter, M_DEVBUF);
1178
1179 D_MARKFREE(d);
1180 }
1181
1182 /*
1183 * Attach an interface to bpf. driverp is a pointer to a (struct bpf_if *)
1184 * in the driver's softc; dlt is the link layer type; hdrlen is the fixed
1185 * size of the link header (variable length headers not yet supported).
1186 */
1187 void
1188 bpfattach(driverp, ifp, dlt, hdrlen)
1189 caddr_t *driverp;
1190 struct ifnet *ifp;
1191 u_int dlt, hdrlen;
1192 {
1193 struct bpf_if *bp;
1194 int i;
1195 #if BSD < 199103
1196 static struct bpf_if bpf_ifs[NBPFILTER];
1197 static int bpfifno;
1198
1199 bp = (bpfifno < NBPFILTER) ? &bpf_ifs[bpfifno++] : 0;
1200 #else
1201 bp = (struct bpf_if *)malloc(sizeof(*bp), M_DEVBUF, M_DONTWAIT);
1202 #endif
1203 if (bp == 0)
1204 panic("bpfattach");
1205
1206 bp->bif_dlist = 0;
1207 bp->bif_driverp = (struct bpf_if **)driverp;
1208 bp->bif_ifp = ifp;
1209 bp->bif_dlt = dlt;
1210
1211 bp->bif_next = bpf_iflist;
1212 bpf_iflist = bp;
1213
1214 *bp->bif_driverp = 0;
1215
1216 /*
1217 * Compute the length of the bpf header. This is not necessarily
1218 * equal to SIZEOF_BPF_HDR because we want to insert spacing such
1219 * that the network layer header begins on a longword boundary (for
1220 * performance reasons and to alleviate alignment restrictions).
1221 */
1222 bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen;
1223
1224 /*
1225 * Mark all the descriptors free if this hasn't been done.
1226 */
1227 if (!D_ISFREE(&bpf_dtab[0]))
1228 for (i = 0; i < NBPFILTER; ++i)
1229 D_MARKFREE(&bpf_dtab[i]);
1230
1231 printf("bpf: %s%d attached\n", ifp->if_name, ifp->if_unit);
1232 }
1233
1234 #if BSD >= 199103
1235 /* XXX This routine belongs in net/if.c. */
1236 /*
1237 * Set/clear promiscuous mode on interface ifp based on the truth value
1238 * of pswitch. The calls are reference counted so that only the first
1239 * "on" request actually has an effect, as does the final "off" request.
1240 * Results are undefined if the "off" and "on" requests are not matched.
1241 */
1242 int
1243 ifpromisc(ifp, pswitch)
1244 struct ifnet *ifp;
1245 int pswitch;
1246 {
1247 struct ifreq ifr;
1248 /*
1249 * If the device is not configured up, we cannot put it in
1250 * promiscuous mode.
1251 */
1252 if ((ifp->if_flags & IFF_UP) == 0)
1253 return (ENETDOWN);
1254
1255 if (pswitch) {
1256 if (ifp->if_pcount++ != 0)
1257 return (0);
1258 ifp->if_flags |= IFF_PROMISC;
1259 } else {
1260 if (--ifp->if_pcount > 0)
1261 return (0);
1262 ifp->if_flags &= ~IFF_PROMISC;
1263 }
1264 ifr.ifr_flags = ifp->if_flags;
1265 return ((*ifp->if_ioctl)(ifp, SIOCSIFFLAGS, (caddr_t)&ifr));
1266 }
1267 #endif
1268
1269 #if BSD < 199103
1270 /*
1271 * Allocate some memory for bpf. This is temporary SunOS support, and
1272 * is admittedly a hack.
1273 * If resources unavaiable, return 0.
1274 */
1275 static caddr_t
1276 bpf_alloc(size, canwait)
1277 register int size;
1278 register int canwait;
1279 {
1280 register struct mbuf *m;
1281
1282 if ((unsigned)size > (MCLBYTES-8))
1283 return 0;
1284
1285 MGET(m, canwait, MT_DATA);
1286 if (m == 0)
1287 return 0;
1288 if ((unsigned)size > (MLEN-8)) {
1289 MCLGET(m);
1290 if (m->m_len != MCLBYTES) {
1291 m_freem(m);
1292 return 0;
1293 }
1294 }
1295 *mtod(m, struct mbuf **) = m;
1296 return mtod(m, caddr_t) + 8;
1297 }
1298 #endif
1299 #endif
1300