bpf.c revision 1.28 1 /* $NetBSD: bpf.c,v 1.28 1996/05/22 13:41:54 mycroft Exp $ */
2
3 /*
4 * Copyright (c) 1990, 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from the Stanford/CMU enet packet filter,
8 * (net/enet.c) distributed as part of 4.3BSD, and code contributed
9 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
10 * Berkeley Laboratory.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the University of
23 * California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 * @(#)bpf.c 8.2 (Berkeley) 3/28/94
41 */
42
43 #include "bpfilter.h"
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/mbuf.h>
48 #include <sys/buf.h>
49 #include <sys/time.h>
50 #include <sys/proc.h>
51 #include <sys/user.h>
52 #include <sys/ioctl.h>
53 #include <sys/map.h>
54 #include <sys/conf.h>
55
56 #include <sys/file.h>
57 #if defined(sparc) && BSD < 199103
58 #include <sys/stream.h>
59 #endif
60 #include <sys/tty.h>
61 #include <sys/uio.h>
62
63 #include <sys/protosw.h>
64 #include <sys/socket.h>
65 #include <net/if.h>
66
67 #include <net/bpf.h>
68 #include <net/bpfdesc.h>
69
70 #include <sys/errno.h>
71
72 #include <netinet/in.h>
73 #include <netinet/if_arc.h>
74 #include <netinet/if_ether.h>
75 #include <sys/kernel.h>
76
77 /*
78 * Older BSDs don't have kernel malloc.
79 */
80 #if BSD < 199103
81 extern bcopy();
82 static caddr_t bpf_alloc();
83 #include <net/bpf_compat.h>
84 #define BPF_BUFSIZE (MCLBYTES-8)
85 #define UIOMOVE(cp, len, code, uio) uiomove(cp, len, code, uio)
86 #else
87 #define BPF_BUFSIZE 4096
88 #define UIOMOVE(cp, len, code, uio) uiomove(cp, len, uio)
89 #endif
90
91 #define PRINET 26 /* interruptible */
92
93 /*
94 * The default read buffer size is patchable.
95 */
96 int bpf_bufsize = BPF_BUFSIZE;
97
98 /*
99 * bpf_iflist is the list of interfaces; each corresponds to an ifnet
100 * bpf_dtab holds the descriptors, indexed by minor device #
101 */
102 struct bpf_if *bpf_iflist;
103 struct bpf_d bpf_dtab[NBPFILTER];
104
105 #if BSD >= 199207 || NetBSD0_9 >= 2
106 /*
107 * bpfilterattach() is called at boot time in new systems. We do
108 * nothing here since old systems will not call this.
109 */
110 /* ARGSUSED */
111 void
112 bpfilterattach(n)
113 int n;
114 {
115 }
116 #endif
117
118 static int bpf_allocbufs __P((struct bpf_d *));
119 static int bpf_allocbufs __P((struct bpf_d *));
120 static void bpf_freed __P((struct bpf_d *));
121 static void bpf_freed __P((struct bpf_d *));
122 static void bpf_ifname __P((struct ifnet *, struct ifreq *));
123 static void bpf_ifname __P((struct ifnet *, struct ifreq *));
124 static void bpf_mcopy __P((const void *, void *, size_t));
125 static int bpf_movein __P((struct uio *, int,
126 struct mbuf **, struct sockaddr *));
127 static void bpf_attachd __P((struct bpf_d *, struct bpf_if *));
128 static void bpf_detachd __P((struct bpf_d *));
129 static int bpf_setif __P((struct bpf_d *, struct ifreq *));
130 #if BSD >= 199103
131 int bpfselect __P((dev_t, int, struct proc *));
132 #endif
133 static __inline void
134 bpf_wakeup __P((struct bpf_d *));
135 static void catchpacket __P((struct bpf_d *, u_char *, size_t, size_t,
136 void (*)(const void *, void *, size_t)));
137 static void reset_d __P((struct bpf_d *));
138
139 static int
140 bpf_movein(uio, linktype, mp, sockp)
141 register struct uio *uio;
142 int linktype;
143 register struct mbuf **mp;
144 register struct sockaddr *sockp;
145 {
146 struct mbuf *m;
147 int error;
148 int len;
149 int hlen;
150
151 /*
152 * Build a sockaddr based on the data link layer type.
153 * We do this at this level because the ethernet header
154 * is copied directly into the data field of the sockaddr.
155 * In the case of SLIP, there is no header and the packet
156 * is forwarded as is.
157 * Also, we are careful to leave room at the front of the mbuf
158 * for the link level header.
159 */
160 switch (linktype) {
161
162 case DLT_SLIP:
163 sockp->sa_family = AF_INET;
164 hlen = 0;
165 break;
166
167 case DLT_PPP:
168 sockp->sa_family = AF_UNSPEC;
169 hlen = 0;
170 break;
171
172 case DLT_EN10MB:
173 sockp->sa_family = AF_UNSPEC;
174 /* XXX Would MAXLINKHDR be better? */
175 hlen = sizeof(struct ether_header);
176 break;
177
178 case DLT_ARCNET:
179 sockp->sa_family = AF_UNSPEC;
180 hlen = ARC_HDRLEN;
181 break;
182
183 case DLT_FDDI:
184 sockp->sa_family = AF_UNSPEC;
185 /* XXX 4(FORMAC)+6(dst)+6(src)+3(LLC)+5(SNAP) */
186 hlen = 24;
187 break;
188
189 case DLT_NULL:
190 sockp->sa_family = AF_UNSPEC;
191 hlen = 0;
192 break;
193
194 default:
195 return (EIO);
196 }
197
198 len = uio->uio_resid;
199 if ((unsigned)len > MCLBYTES)
200 return (EIO);
201
202 MGETHDR(m, M_WAIT, MT_DATA);
203 if (m == 0)
204 return (ENOBUFS);
205 m->m_pkthdr.rcvif = 0;
206 m->m_pkthdr.len = len - hlen;
207
208 if (len > MHLEN) {
209 #if BSD >= 199103
210 MCLGET(m, M_WAIT);
211 if ((m->m_flags & M_EXT) == 0) {
212 #else
213 MCLGET(m);
214 if (m->m_len != MCLBYTES) {
215 #endif
216 error = ENOBUFS;
217 goto bad;
218 }
219 }
220 m->m_len = len;
221 *mp = m;
222 /*
223 * Make room for link header.
224 */
225 if (hlen != 0) {
226 m->m_len -= hlen;
227 #if BSD >= 199103
228 m->m_data += hlen; /* XXX */
229 #else
230 m->m_off += hlen;
231 #endif
232 error = UIOMOVE((caddr_t)sockp->sa_data, hlen, UIO_WRITE, uio);
233 if (error)
234 goto bad;
235 }
236 error = UIOMOVE(mtod(m, caddr_t), len - hlen, UIO_WRITE, uio);
237 if (!error)
238 return (0);
239 bad:
240 m_freem(m);
241 return (error);
242 }
243
244 /*
245 * Attach file to the bpf interface, i.e. make d listen on bp.
246 * Must be called at splimp.
247 */
248 static void
249 bpf_attachd(d, bp)
250 struct bpf_d *d;
251 struct bpf_if *bp;
252 {
253 /*
254 * Point d at bp, and add d to the interface's list of listeners.
255 * Finally, point the driver's bpf cookie at the interface so
256 * it will divert packets to bpf.
257 */
258 d->bd_bif = bp;
259 d->bd_next = bp->bif_dlist;
260 bp->bif_dlist = d;
261
262 *bp->bif_driverp = bp;
263 }
264
265 /*
266 * Detach a file from its interface.
267 */
268 static void
269 bpf_detachd(d)
270 struct bpf_d *d;
271 {
272 struct bpf_d **p;
273 struct bpf_if *bp;
274
275 bp = d->bd_bif;
276 /*
277 * Check if this descriptor had requested promiscuous mode.
278 * If so, turn it off.
279 */
280 if (d->bd_promisc) {
281 int error;
282
283 d->bd_promisc = 0;
284 error = ifpromisc(bp->bif_ifp, 0);
285 if (error && error != EINVAL)
286 /*
287 * Something is really wrong if we were able to put
288 * the driver into promiscuous mode, but can't
289 * take it out.
290 */
291 panic("bpf: ifpromisc failed");
292 }
293 /* Remove d from the interface's descriptor list. */
294 p = &bp->bif_dlist;
295 while (*p != d) {
296 p = &(*p)->bd_next;
297 if (*p == 0)
298 panic("bpf_detachd: descriptor not in list");
299 }
300 *p = (*p)->bd_next;
301 if (bp->bif_dlist == 0)
302 /*
303 * Let the driver know that there are no more listeners.
304 */
305 *d->bd_bif->bif_driverp = 0;
306 d->bd_bif = 0;
307 }
308
309
310 /*
311 * Mark a descriptor free by making it point to itself.
312 * This is probably cheaper than marking with a constant since
313 * the address should be in a register anyway.
314 */
315 #define D_ISFREE(d) ((d) == (d)->bd_next)
316 #define D_MARKFREE(d) ((d)->bd_next = (d))
317 #define D_MARKUSED(d) ((d)->bd_next = 0)
318
319 /*
320 * Open ethernet device. Returns ENXIO for illegal minor device number,
321 * EBUSY if file is open by another process.
322 */
323 /* ARGSUSED */
324 int
325 bpfopen(dev, flag, mode, p)
326 dev_t dev;
327 int flag;
328 int mode;
329 struct proc *p;
330 {
331 register struct bpf_d *d;
332
333 if (minor(dev) >= NBPFILTER)
334 return (ENXIO);
335 /*
336 * Each minor can be opened by only one process. If the requested
337 * minor is in use, return EBUSY.
338 */
339 d = &bpf_dtab[minor(dev)];
340 if (!D_ISFREE(d))
341 return (EBUSY);
342
343 /* Mark "free" and do most initialization. */
344 bzero((char *)d, sizeof(*d));
345 d->bd_bufsize = bpf_bufsize;
346 d->bd_sig = SIGIO;
347
348 return (0);
349 }
350
351 /*
352 * Close the descriptor by detaching it from its interface,
353 * deallocating its buffers, and marking it free.
354 */
355 /* ARGSUSED */
356 int
357 bpfclose(dev, flag, mode, p)
358 dev_t dev;
359 int flag;
360 int mode;
361 struct proc *p;
362 {
363 register struct bpf_d *d = &bpf_dtab[minor(dev)];
364 register int s;
365
366 s = splimp();
367 if (d->bd_bif)
368 bpf_detachd(d);
369 splx(s);
370 bpf_freed(d);
371
372 return (0);
373 }
374
375 /*
376 * Support for SunOS, which does not have tsleep.
377 */
378 #if BSD < 199103
379 static
380 bpf_timeout(arg)
381 caddr_t arg;
382 {
383 struct bpf_d *d = (struct bpf_d *)arg;
384 d->bd_timedout = 1;
385 wakeup(arg);
386 }
387
388 #define BPF_SLEEP(chan, pri, s, t) bpf_sleep((struct bpf_d *)chan)
389
390 int
391 bpf_sleep(d)
392 register struct bpf_d *d;
393 {
394 register int rto = d->bd_rtout;
395 register int st;
396
397 if (rto != 0) {
398 d->bd_timedout = 0;
399 timeout(bpf_timeout, (caddr_t)d, rto);
400 }
401 st = sleep((caddr_t)d, PRINET|PCATCH);
402 if (rto != 0) {
403 if (d->bd_timedout == 0)
404 untimeout(bpf_timeout, (caddr_t)d);
405 else if (st == 0)
406 return EWOULDBLOCK;
407 }
408 return (st != 0) ? EINTR : 0;
409 }
410 #else
411 #define BPF_SLEEP tsleep
412 #endif
413
414 /*
415 * Rotate the packet buffers in descriptor d. Move the store buffer
416 * into the hold slot, and the free buffer into the store slot.
417 * Zero the length of the new store buffer.
418 */
419 #define ROTATE_BUFFERS(d) \
420 (d)->bd_hbuf = (d)->bd_sbuf; \
421 (d)->bd_hlen = (d)->bd_slen; \
422 (d)->bd_sbuf = (d)->bd_fbuf; \
423 (d)->bd_slen = 0; \
424 (d)->bd_fbuf = 0;
425 /*
426 * bpfread - read next chunk of packets from buffers
427 */
428 int
429 bpfread(dev, uio, ioflag)
430 dev_t dev;
431 register struct uio *uio;
432 int ioflag;
433 {
434 register struct bpf_d *d = &bpf_dtab[minor(dev)];
435 int error;
436 int s;
437
438 /*
439 * Restrict application to use a buffer the same size as
440 * as kernel buffers.
441 */
442 if (uio->uio_resid != d->bd_bufsize)
443 return (EINVAL);
444
445 s = splimp();
446 /*
447 * If the hold buffer is empty, then do a timed sleep, which
448 * ends when the timeout expires or when enough packets
449 * have arrived to fill the store buffer.
450 */
451 while (d->bd_hbuf == 0) {
452 if (d->bd_immediate && d->bd_slen != 0) {
453 /*
454 * A packet(s) either arrived since the previous
455 * read or arrived while we were asleep.
456 * Rotate the buffers and return what's here.
457 */
458 ROTATE_BUFFERS(d);
459 break;
460 }
461 if (d->bd_rtout != -1)
462 error = BPF_SLEEP((caddr_t)d, PRINET|PCATCH, "bpf",
463 d->bd_rtout);
464 else
465 error = EWOULDBLOCK; /* User requested non-blocking I/O */
466 if (error == EINTR || error == ERESTART) {
467 splx(s);
468 return (error);
469 }
470 if (error == EWOULDBLOCK) {
471 /*
472 * On a timeout, return what's in the buffer,
473 * which may be nothing. If there is something
474 * in the store buffer, we can rotate the buffers.
475 */
476 if (d->bd_hbuf)
477 /*
478 * We filled up the buffer in between
479 * getting the timeout and arriving
480 * here, so we don't need to rotate.
481 */
482 break;
483
484 if (d->bd_slen == 0) {
485 splx(s);
486 return (0);
487 }
488 ROTATE_BUFFERS(d);
489 break;
490 }
491 }
492 /*
493 * At this point, we know we have something in the hold slot.
494 */
495 splx(s);
496
497 /*
498 * Move data from hold buffer into user space.
499 * We know the entire buffer is transferred since
500 * we checked above that the read buffer is bpf_bufsize bytes.
501 */
502 error = UIOMOVE(d->bd_hbuf, d->bd_hlen, UIO_READ, uio);
503
504 s = splimp();
505 d->bd_fbuf = d->bd_hbuf;
506 d->bd_hbuf = 0;
507 d->bd_hlen = 0;
508 splx(s);
509
510 return (error);
511 }
512
513
514 /*
515 * If there are processes sleeping on this descriptor, wake them up.
516 */
517 static __inline void
518 bpf_wakeup(d)
519 register struct bpf_d *d;
520 {
521 struct proc *p;
522
523 wakeup((caddr_t)d);
524 if (d->bd_async && d->bd_sig)
525 if (d->bd_pgid > 0)
526 gsignal (d->bd_pgid, d->bd_sig);
527 else if ((p = pfind (-d->bd_pgid)) != NULL)
528 psignal (p, d->bd_sig);
529
530 #if BSD >= 199103
531 selwakeup(&d->bd_sel);
532 /* XXX */
533 d->bd_sel.si_pid = 0;
534 #else
535 if (d->bd_selproc) {
536 selwakeup(d->bd_selproc, (int)d->bd_selcoll);
537 d->bd_selcoll = 0;
538 d->bd_selproc = 0;
539 }
540 #endif
541 }
542
543 int
544 bpfwrite(dev, uio, ioflag)
545 dev_t dev;
546 struct uio *uio;
547 int ioflag;
548 {
549 register struct bpf_d *d = &bpf_dtab[minor(dev)];
550 struct ifnet *ifp;
551 struct mbuf *m;
552 int error, s;
553 static struct sockaddr dst;
554
555 if (d->bd_bif == 0)
556 return (ENXIO);
557
558 ifp = d->bd_bif->bif_ifp;
559
560 if (uio->uio_resid == 0)
561 return (0);
562
563 error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, &m, &dst);
564 if (error)
565 return (error);
566
567 if (m->m_pkthdr.len > ifp->if_mtu)
568 return (EMSGSIZE);
569
570 s = splsoftnet();
571 #if BSD >= 199103
572 error = (*ifp->if_output)(ifp, m, &dst, (struct rtentry *)0);
573 #else
574 error = (*ifp->if_output)(ifp, m, &dst);
575 #endif
576 splx(s);
577 /*
578 * The driver frees the mbuf.
579 */
580 return (error);
581 }
582
583 /*
584 * Reset a descriptor by flushing its packet buffer and clearing the
585 * receive and drop counts. Should be called at splimp.
586 */
587 static void
588 reset_d(d)
589 struct bpf_d *d;
590 {
591 if (d->bd_hbuf) {
592 /* Free the hold buffer. */
593 d->bd_fbuf = d->bd_hbuf;
594 d->bd_hbuf = 0;
595 }
596 d->bd_slen = 0;
597 d->bd_hlen = 0;
598 d->bd_rcount = 0;
599 d->bd_dcount = 0;
600 }
601
602 /*
603 * FIONREAD Check for read packet available.
604 * BIOCGBLEN Get buffer len [for read()].
605 * BIOCSETF Set ethernet read filter.
606 * BIOCFLUSH Flush read packet buffer.
607 * BIOCPROMISC Put interface into promiscuous mode.
608 * BIOCGDLT Get link layer type.
609 * BIOCGETIF Get interface name.
610 * BIOCSETIF Set interface.
611 * BIOCSRTIMEOUT Set read timeout.
612 * BIOCGRTIMEOUT Get read timeout.
613 * BIOCGSTATS Get packet stats.
614 * BIOCIMMEDIATE Set immediate mode.
615 * BIOCVERSION Get filter language version.
616 */
617 /* ARGSUSED */
618 int
619 bpfioctl(dev, cmd, addr, flag, p)
620 dev_t dev;
621 u_long cmd;
622 caddr_t addr;
623 int flag;
624 struct proc *p;
625 {
626 register struct bpf_d *d = &bpf_dtab[minor(dev)];
627 int s, error = 0;
628
629 switch (cmd) {
630
631 default:
632 error = EINVAL;
633 break;
634
635 /*
636 * Check for read packet available.
637 */
638 case FIONREAD:
639 {
640 int n;
641
642 s = splimp();
643 n = d->bd_slen;
644 if (d->bd_hbuf)
645 n += d->bd_hlen;
646 splx(s);
647
648 *(int *)addr = n;
649 break;
650 }
651
652 /*
653 * Get buffer len [for read()].
654 */
655 case BIOCGBLEN:
656 *(u_int *)addr = d->bd_bufsize;
657 break;
658
659 /*
660 * Set buffer length.
661 */
662 case BIOCSBLEN:
663 #if BSD < 199103
664 error = EINVAL;
665 #else
666 if (d->bd_bif != 0)
667 error = EINVAL;
668 else {
669 register u_int size = *(u_int *)addr;
670
671 if (size > BPF_MAXBUFSIZE)
672 *(u_int *)addr = size = BPF_MAXBUFSIZE;
673 else if (size < BPF_MINBUFSIZE)
674 *(u_int *)addr = size = BPF_MINBUFSIZE;
675 d->bd_bufsize = size;
676 }
677 #endif
678 break;
679
680 /*
681 * Set link layer read filter.
682 */
683 case BIOCSETF:
684 error = bpf_setf(d, (struct bpf_program *)addr);
685 break;
686
687 /*
688 * Flush read packet buffer.
689 */
690 case BIOCFLUSH:
691 s = splimp();
692 reset_d(d);
693 splx(s);
694 break;
695
696 /*
697 * Put interface into promiscuous mode.
698 */
699 case BIOCPROMISC:
700 if (d->bd_bif == 0) {
701 /*
702 * No interface attached yet.
703 */
704 error = EINVAL;
705 break;
706 }
707 s = splimp();
708 if (d->bd_promisc == 0) {
709 error = ifpromisc(d->bd_bif->bif_ifp, 1);
710 if (error == 0)
711 d->bd_promisc = 1;
712 }
713 splx(s);
714 break;
715
716 /*
717 * Get device parameters.
718 */
719 case BIOCGDLT:
720 if (d->bd_bif == 0)
721 error = EINVAL;
722 else
723 *(u_int *)addr = d->bd_bif->bif_dlt;
724 break;
725
726 /*
727 * Set interface name.
728 */
729 case BIOCGETIF:
730 if (d->bd_bif == 0)
731 error = EINVAL;
732 else
733 bpf_ifname(d->bd_bif->bif_ifp, (struct ifreq *)addr);
734 break;
735
736 /*
737 * Set interface.
738 */
739 case BIOCSETIF:
740 error = bpf_setif(d, (struct ifreq *)addr);
741 break;
742
743 /*
744 * Set read timeout.
745 */
746 case BIOCSRTIMEOUT:
747 {
748 struct timeval *tv = (struct timeval *)addr;
749
750 /* Compute number of ticks. */
751 d->bd_rtout = tv->tv_sec * hz + tv->tv_usec / tick;
752 break;
753 }
754
755 /*
756 * Get read timeout.
757 */
758 case BIOCGRTIMEOUT:
759 {
760 struct timeval *tv = (struct timeval *)addr;
761
762 tv->tv_sec = d->bd_rtout / hz;
763 tv->tv_usec = (d->bd_rtout % hz) * tick;
764 break;
765 }
766
767 /*
768 * Get packet stats.
769 */
770 case BIOCGSTATS:
771 {
772 struct bpf_stat *bs = (struct bpf_stat *)addr;
773
774 bs->bs_recv = d->bd_rcount;
775 bs->bs_drop = d->bd_dcount;
776 break;
777 }
778
779 /*
780 * Set immediate mode.
781 */
782 case BIOCIMMEDIATE:
783 d->bd_immediate = *(u_int *)addr;
784 break;
785
786 case BIOCVERSION:
787 {
788 struct bpf_version *bv = (struct bpf_version *)addr;
789
790 bv->bv_major = BPF_MAJOR_VERSION;
791 bv->bv_minor = BPF_MINOR_VERSION;
792 break;
793 }
794
795
796 case FIONBIO: /* Non-blocking I/O */
797 if (*(int *)addr)
798 d->bd_rtout = -1;
799 else
800 d->bd_rtout = 0;
801 break;
802
803 case FIOASYNC: /* Send signal on receive packets */
804 d->bd_async = *(int *)addr;
805 break;
806
807 /*
808 * N.B. ioctl (FIOSETOWN) and fcntl (F_SETOWN) both end up doing
809 * the equivalent of a TIOCSPGRP and hence end up here. *However*
810 * TIOCSPGRP's arg is a process group if it's positive and a process
811 * id if it's negative. This is exactly the opposite of what the
812 * other two functions want! Therefore there is code in ioctl and
813 * fcntl to negate the arg before calling here.
814 */
815 case TIOCSPGRP: /* Process or group to send signals to */
816 d->bd_pgid = *(int *)addr;
817 break;
818
819 case TIOCGPGRP:
820 *(int *)addr = d->bd_pgid;
821 break;
822
823 case BIOCSRSIG: /* Set receive signal */
824 {
825 u_int sig;
826
827 sig = *(u_int *)addr;
828
829 if (sig >= NSIG)
830 error = EINVAL;
831 else
832 d->bd_sig = sig;
833 break;
834 }
835 case BIOCGRSIG:
836 *(u_int *)addr = d->bd_sig;
837 break;
838 }
839 return (error);
840 }
841
842 /*
843 * Set d's packet filter program to fp. If this file already has a filter,
844 * free it and replace it. Returns EINVAL for bogus requests.
845 */
846 int
847 bpf_setf(d, fp)
848 struct bpf_d *d;
849 struct bpf_program *fp;
850 {
851 struct bpf_insn *fcode, *old;
852 u_int flen, size;
853 int s;
854
855 old = d->bd_filter;
856 if (fp->bf_insns == 0) {
857 if (fp->bf_len != 0)
858 return (EINVAL);
859 s = splimp();
860 d->bd_filter = 0;
861 reset_d(d);
862 splx(s);
863 if (old != 0)
864 free((caddr_t)old, M_DEVBUF);
865 return (0);
866 }
867 flen = fp->bf_len;
868 if (flen > BPF_MAXINSNS)
869 return (EINVAL);
870
871 size = flen * sizeof(*fp->bf_insns);
872 fcode = (struct bpf_insn *)malloc(size, M_DEVBUF, M_WAITOK);
873 if (copyin((caddr_t)fp->bf_insns, (caddr_t)fcode, size) == 0 &&
874 bpf_validate(fcode, (int)flen)) {
875 s = splimp();
876 d->bd_filter = fcode;
877 reset_d(d);
878 splx(s);
879 if (old != 0)
880 free((caddr_t)old, M_DEVBUF);
881
882 return (0);
883 }
884 free((caddr_t)fcode, M_DEVBUF);
885 return (EINVAL);
886 }
887
888 /*
889 * Detach a file from its current interface (if attached at all) and attach
890 * to the interface indicated by the name stored in ifr.
891 * Return an errno or 0.
892 */
893 static int
894 bpf_setif(d, ifr)
895 struct bpf_d *d;
896 struct ifreq *ifr;
897 {
898 struct bpf_if *bp;
899 char *cp;
900 int unit_seen, i, s, error;
901
902 /*
903 * Make sure the provided name has a unit number, and default
904 * it to '0' if not specified.
905 * XXX This is ugly ... do this differently?
906 */
907 unit_seen = 0;
908 cp = ifr->ifr_name;
909 cp[sizeof(ifr->ifr_name) - 1] = '\0'; /* sanity */
910 while (*cp++)
911 if (*cp >= '0' && *cp <= '9')
912 unit_seen = 1;
913 if (!unit_seen) {
914 /* Make sure to leave room for the '\0'. */
915 for (i = 0; i < (IFNAMSIZ - 1); ++i) {
916 if ((ifr->ifr_name[i] >= 'a' &&
917 ifr->ifr_name[i] <= 'z') ||
918 (ifr->ifr_name[i] >= 'A' &&
919 ifr->ifr_name[i] <= 'Z'))
920 continue;
921 ifr->ifr_name[i] = '0';
922 }
923 }
924
925 /*
926 * Look through attached interfaces for the named one.
927 */
928 for (bp = bpf_iflist; bp != 0; bp = bp->bif_next) {
929 struct ifnet *ifp = bp->bif_ifp;
930
931 if (ifp == 0 ||
932 strcmp(ifp->if_xname, ifr->ifr_name) != 0)
933 continue;
934 /*
935 * We found the requested interface.
936 * If it's not up, return an error.
937 * Allocate the packet buffers if we need to.
938 * If we're already attached to requested interface,
939 * just flush the buffer.
940 */
941 if ((ifp->if_flags & IFF_UP) == 0)
942 return (ENETDOWN);
943
944 if (d->bd_sbuf == 0) {
945 error = bpf_allocbufs(d);
946 if (error != 0)
947 return (error);
948 }
949 s = splimp();
950 if (bp != d->bd_bif) {
951 if (d->bd_bif)
952 /*
953 * Detach if attached to something else.
954 */
955 bpf_detachd(d);
956
957 bpf_attachd(d, bp);
958 }
959 reset_d(d);
960 splx(s);
961 return (0);
962 }
963 /* Not found. */
964 return (ENXIO);
965 }
966
967 /*
968 * Copy the interface name to the ifreq.
969 */
970 static void
971 bpf_ifname(ifp, ifr)
972 struct ifnet *ifp;
973 struct ifreq *ifr;
974 {
975
976 bcopy(ifp->if_xname, ifr->ifr_name, IFNAMSIZ);
977 }
978
979 /*
980 * The new select interface passes down the proc pointer; the old select
981 * stubs had to grab it out of the user struct. This glue allows either case.
982 */
983 #if BSD >= 199103
984 #define bpf_select bpfselect
985 #else
986 int
987 bpfselect(dev, rw)
988 register dev_t dev;
989 int rw;
990 {
991 return (bpf_select(dev, rw, u.u_procp));
992 }
993 #endif
994
995 /*
996 * Support for select() system call
997 *
998 * Return true iff the specific operation will not block indefinitely.
999 * Otherwise, return false but make a note that a selwakeup() must be done.
1000 */
1001 int
1002 bpf_select(dev, rw, p)
1003 register dev_t dev;
1004 int rw;
1005 struct proc *p;
1006 {
1007 register struct bpf_d *d;
1008 register int s;
1009
1010 if (rw != FREAD)
1011 return (0);
1012 /*
1013 * An imitation of the FIONREAD ioctl code.
1014 */
1015 d = &bpf_dtab[minor(dev)];
1016
1017 s = splimp();
1018 if (d->bd_hlen != 0 || (d->bd_immediate && d->bd_slen != 0)) {
1019 /*
1020 * There is data waiting.
1021 */
1022 splx(s);
1023 return (1);
1024 }
1025 #if BSD >= 199103
1026 selrecord(p, &d->bd_sel);
1027 #else
1028 /*
1029 * No data ready. If there's already a select() waiting on this
1030 * minor device then this is a collision. This shouldn't happen
1031 * because minors really should not be shared, but if a process
1032 * forks while one of these is open, it is possible that both
1033 * processes could select on the same descriptor.
1034 */
1035 if (d->bd_selproc && d->bd_selproc->p_wchan == (caddr_t)&selwait)
1036 d->bd_selcoll = 1;
1037 else
1038 d->bd_selproc = p;
1039 #endif
1040 splx(s);
1041 return (0);
1042 }
1043
1044 /*
1045 * Incoming linkage from device drivers. Process the packet pkt, of length
1046 * pktlen, which is stored in a contiguous buffer. The packet is parsed
1047 * by each process' filter, and if accepted, stashed into the corresponding
1048 * buffer.
1049 */
1050 void
1051 bpf_tap(arg, pkt, pktlen)
1052 caddr_t arg;
1053 register u_char *pkt;
1054 register u_int pktlen;
1055 {
1056 struct bpf_if *bp;
1057 register struct bpf_d *d;
1058 register size_t slen;
1059 /*
1060 * Note that the ipl does not have to be raised at this point.
1061 * The only problem that could arise here is that if two different
1062 * interfaces shared any data. This is not the case.
1063 */
1064 bp = (struct bpf_if *)arg;
1065 for (d = bp->bif_dlist; d != 0; d = d->bd_next) {
1066 ++d->bd_rcount;
1067 slen = bpf_filter(d->bd_filter, pkt, pktlen, pktlen);
1068 if (slen != 0)
1069 catchpacket(d, pkt, pktlen, slen, bcopy);
1070 }
1071 }
1072
1073 /*
1074 * Copy data from an mbuf chain into a buffer. This code is derived
1075 * from m_copydata in sys/uipc_mbuf.c.
1076 */
1077 static void
1078 bpf_mcopy(src_arg, dst_arg, len)
1079 const void *src_arg;
1080 void *dst_arg;
1081 register size_t len;
1082 {
1083 register const struct mbuf *m;
1084 register u_int count;
1085 u_char *dst;
1086
1087 m = src_arg;
1088 dst = dst_arg;
1089 while (len > 0) {
1090 if (m == 0)
1091 panic("bpf_mcopy");
1092 count = min(m->m_len, len);
1093 bcopy(mtod(m, caddr_t), (caddr_t)dst, count);
1094 m = m->m_next;
1095 dst += count;
1096 len -= count;
1097 }
1098 }
1099
1100 /*
1101 * Incoming linkage from device drivers, when packet is in an mbuf chain.
1102 */
1103 void
1104 bpf_mtap(arg, m)
1105 caddr_t arg;
1106 struct mbuf *m;
1107 {
1108 struct bpf_if *bp = (struct bpf_if *)arg;
1109 struct bpf_d *d;
1110 size_t pktlen, slen;
1111 struct mbuf *m0;
1112
1113 pktlen = 0;
1114 for (m0 = m; m0 != 0; m0 = m0->m_next)
1115 pktlen += m0->m_len;
1116
1117 for (d = bp->bif_dlist; d != 0; d = d->bd_next) {
1118 ++d->bd_rcount;
1119 slen = bpf_filter(d->bd_filter, (u_char *)m, pktlen, 0);
1120 if (slen != 0)
1121 catchpacket(d, (u_char *)m, pktlen, slen, bpf_mcopy);
1122 }
1123 }
1124
1125 /*
1126 * Move the packet data from interface memory (pkt) into the
1127 * store buffer. Return 1 if it's time to wakeup a listener (buffer full),
1128 * otherwise 0. "copy" is the routine called to do the actual data
1129 * transfer. bcopy is passed in to copy contiguous chunks, while
1130 * bpf_mcopy is passed in to copy mbuf chains. In the latter case,
1131 * pkt is really an mbuf.
1132 */
1133 static void
1134 catchpacket(d, pkt, pktlen, snaplen, cpfn)
1135 register struct bpf_d *d;
1136 register u_char *pkt;
1137 register size_t pktlen, snaplen;
1138 register void (*cpfn) __P((const void *, void *, size_t));
1139 {
1140 register struct bpf_hdr *hp;
1141 register int totlen, curlen;
1142 register int hdrlen = d->bd_bif->bif_hdrlen;
1143 /*
1144 * Figure out how many bytes to move. If the packet is
1145 * greater or equal to the snapshot length, transfer that
1146 * much. Otherwise, transfer the whole packet (unless
1147 * we hit the buffer size limit).
1148 */
1149 totlen = hdrlen + min(snaplen, pktlen);
1150 if (totlen > d->bd_bufsize)
1151 totlen = d->bd_bufsize;
1152
1153 /*
1154 * Round up the end of the previous packet to the next longword.
1155 */
1156 curlen = BPF_WORDALIGN(d->bd_slen);
1157 if (curlen + totlen > d->bd_bufsize) {
1158 /*
1159 * This packet will overflow the storage buffer.
1160 * Rotate the buffers if we can, then wakeup any
1161 * pending reads.
1162 */
1163 if (d->bd_fbuf == 0) {
1164 /*
1165 * We haven't completed the previous read yet,
1166 * so drop the packet.
1167 */
1168 ++d->bd_dcount;
1169 return;
1170 }
1171 ROTATE_BUFFERS(d);
1172 bpf_wakeup(d);
1173 curlen = 0;
1174 }
1175 else if (d->bd_immediate)
1176 /*
1177 * Immediate mode is set. A packet arrived so any
1178 * reads should be woken up.
1179 */
1180 bpf_wakeup(d);
1181
1182 /*
1183 * Append the bpf header.
1184 */
1185 hp = (struct bpf_hdr *)(d->bd_sbuf + curlen);
1186 #if BSD >= 199103
1187 microtime(&hp->bh_tstamp);
1188 #elif defined(sun)
1189 uniqtime(&hp->bh_tstamp);
1190 #else
1191 hp->bh_tstamp = time;
1192 #endif
1193 hp->bh_datalen = pktlen;
1194 hp->bh_hdrlen = hdrlen;
1195 /*
1196 * Copy the packet data into the store buffer and update its length.
1197 */
1198 (*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen));
1199 d->bd_slen = curlen + totlen;
1200 }
1201
1202 /*
1203 * Initialize all nonzero fields of a descriptor.
1204 */
1205 static int
1206 bpf_allocbufs(d)
1207 register struct bpf_d *d;
1208 {
1209 d->bd_fbuf = (caddr_t)malloc(d->bd_bufsize, M_DEVBUF, M_WAITOK);
1210 if (d->bd_fbuf == 0)
1211 return (ENOBUFS);
1212
1213 d->bd_sbuf = (caddr_t)malloc(d->bd_bufsize, M_DEVBUF, M_WAITOK);
1214 if (d->bd_sbuf == 0) {
1215 free(d->bd_fbuf, M_DEVBUF);
1216 return (ENOBUFS);
1217 }
1218 d->bd_slen = 0;
1219 d->bd_hlen = 0;
1220 return (0);
1221 }
1222
1223 /*
1224 * Free buffers currently in use by a descriptor.
1225 * Called on close.
1226 */
1227 static void
1228 bpf_freed(d)
1229 register struct bpf_d *d;
1230 {
1231 /*
1232 * We don't need to lock out interrupts since this descriptor has
1233 * been detached from its interface and it yet hasn't been marked
1234 * free.
1235 */
1236 if (d->bd_sbuf != 0) {
1237 free(d->bd_sbuf, M_DEVBUF);
1238 if (d->bd_hbuf != 0)
1239 free(d->bd_hbuf, M_DEVBUF);
1240 if (d->bd_fbuf != 0)
1241 free(d->bd_fbuf, M_DEVBUF);
1242 }
1243 if (d->bd_filter)
1244 free((caddr_t)d->bd_filter, M_DEVBUF);
1245
1246 D_MARKFREE(d);
1247 }
1248
1249 /*
1250 * Attach an interface to bpf. driverp is a pointer to a (struct bpf_if *)
1251 * in the driver's softc; dlt is the link layer type; hdrlen is the fixed
1252 * size of the link header (variable length headers not yet supported).
1253 */
1254 void
1255 bpfattach(driverp, ifp, dlt, hdrlen)
1256 caddr_t *driverp;
1257 struct ifnet *ifp;
1258 u_int dlt, hdrlen;
1259 {
1260 struct bpf_if *bp;
1261 int i;
1262 #if BSD < 199103
1263 static struct bpf_if bpf_ifs[NBPFILTER];
1264 static int bpfifno;
1265
1266 bp = (bpfifno < NBPFILTER) ? &bpf_ifs[bpfifno++] : 0;
1267 #else
1268 bp = (struct bpf_if *)malloc(sizeof(*bp), M_DEVBUF, M_DONTWAIT);
1269 #endif
1270 if (bp == 0)
1271 panic("bpfattach");
1272
1273 bp->bif_dlist = 0;
1274 bp->bif_driverp = (struct bpf_if **)driverp;
1275 bp->bif_ifp = ifp;
1276 bp->bif_dlt = dlt;
1277
1278 bp->bif_next = bpf_iflist;
1279 bpf_iflist = bp;
1280
1281 *bp->bif_driverp = 0;
1282
1283 /*
1284 * Compute the length of the bpf header. This is not necessarily
1285 * equal to SIZEOF_BPF_HDR because we want to insert spacing such
1286 * that the network layer header begins on a longword boundary (for
1287 * performance reasons and to alleviate alignment restrictions).
1288 */
1289 bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen;
1290
1291 /*
1292 * Mark all the descriptors free if this hasn't been done.
1293 */
1294 if (!D_ISFREE(&bpf_dtab[0]))
1295 for (i = 0; i < NBPFILTER; ++i)
1296 D_MARKFREE(&bpf_dtab[i]);
1297
1298 #if 0
1299 printf("bpf: %s attached\n", ifp->if_xname);
1300 #endif
1301 }
1302
1303 #if BSD >= 199103
1304 /* XXX This routine belongs in net/if.c. */
1305 /*
1306 * Set/clear promiscuous mode on interface ifp based on the truth value
1307 * of pswitch. The calls are reference counted so that only the first
1308 * "on" request actually has an effect, as does the final "off" request.
1309 * Results are undefined if the "off" and "on" requests are not matched.
1310 */
1311 int
1312 ifpromisc(ifp, pswitch)
1313 struct ifnet *ifp;
1314 int pswitch;
1315 {
1316 struct ifreq ifr;
1317
1318 if (pswitch) {
1319 /*
1320 * If the device is not configured up, we cannot put it in
1321 * promiscuous mode.
1322 */
1323 if ((ifp->if_flags & IFF_UP) == 0)
1324 return (ENETDOWN);
1325 if (ifp->if_pcount++ != 0)
1326 return (0);
1327 ifp->if_flags |= IFF_PROMISC;
1328 } else {
1329 if (--ifp->if_pcount > 0)
1330 return (0);
1331 ifp->if_flags &= ~IFF_PROMISC;
1332 /*
1333 * If the device is not configured up, we should not need to
1334 * turn off promiscuous mode (device should have turned it
1335 * off when interface went down; and will look at IFF_PROMISC
1336 * again next time interface comes up).
1337 */
1338 if ((ifp->if_flags & IFF_UP) == 0)
1339 return (0);
1340 }
1341 ifr.ifr_flags = ifp->if_flags;
1342 return ((*ifp->if_ioctl)(ifp, SIOCSIFFLAGS, (caddr_t)&ifr));
1343 }
1344 #endif
1345
1346 #if BSD < 199103
1347 /*
1348 * Allocate some memory for bpf. This is temporary SunOS support, and
1349 * is admittedly a hack.
1350 * If resources unavaiable, return 0.
1351 */
1352 static caddr_t
1353 bpf_alloc(size, canwait)
1354 register int size;
1355 register int canwait;
1356 {
1357 register struct mbuf *m;
1358
1359 if ((unsigned)size > (MCLBYTES-8))
1360 return 0;
1361
1362 MGET(m, canwait, MT_DATA);
1363 if (m == 0)
1364 return 0;
1365 if ((unsigned)size > (MLEN-8)) {
1366 MCLGET(m);
1367 if (m->m_len != MCLBYTES) {
1368 m_freem(m);
1369 return 0;
1370 }
1371 }
1372 *mtod(m, struct mbuf **) = m;
1373 return mtod(m, caddr_t) + 8;
1374 }
1375 #endif
1376