if_tun.c revision 1.51.4.1 1 /* $NetBSD: if_tun.c,v 1.51.4.1 2002/05/16 03:57:33 gehenna Exp $ */
2
3 /*
4 * Copyright (c) 1988, Julian Onions <jpo (at) cs.nott.ac.uk>
5 * Nottingham University 1987.
6 *
7 * This source may be freely distributed, however I would be interested
8 * in any changes that are made.
9 *
10 * This driver takes packets off the IP i/f and hands them up to a
11 * user process to have its wicked way with. This driver has its
12 * roots in a similar driver written by Phil Cockcroft (formerly) at
13 * UCL. This driver is based much more on read/write/poll mode of
14 * operation though.
15 */
16
17 #include <sys/cdefs.h>
18 __KERNEL_RCSID(0, "$NetBSD: if_tun.c,v 1.51.4.1 2002/05/16 03:57:33 gehenna Exp $");
19
20 #include "tun.h"
21
22 #include "opt_inet.h"
23 #include "opt_ns.h"
24
25 #include <sys/param.h>
26 #include <sys/proc.h>
27 #include <sys/systm.h>
28 #include <sys/mbuf.h>
29 #include <sys/buf.h>
30 #include <sys/protosw.h>
31 #include <sys/socket.h>
32 #include <sys/ioctl.h>
33 #include <sys/errno.h>
34 #include <sys/syslog.h>
35 #include <sys/select.h>
36 #include <sys/poll.h>
37 #include <sys/file.h>
38 #include <sys/signalvar.h>
39 #include <sys/conf.h>
40
41 #include <machine/cpu.h>
42
43 #include <net/if.h>
44 #include <net/if_ether.h>
45 #include <net/netisr.h>
46 #include <net/route.h>
47
48
49 #ifdef INET
50 #include <netinet/in.h>
51 #include <netinet/in_systm.h>
52 #include <netinet/in_var.h>
53 #include <netinet/ip.h>
54 #include <netinet/if_inarp.h>
55 #endif
56
57 #ifdef NS
58 #include <netns/ns.h>
59 #include <netns/ns_if.h>
60 #endif
61
62 #include "bpfilter.h"
63 #if NBPFILTER > 0
64 #include <sys/time.h>
65 #include <net/bpf.h>
66 #endif
67
68 #include <net/if_tun.h>
69
70 #define TUNDEBUG if (tundebug) printf
71 int tundebug = 0;
72
73 extern int ifqmaxlen;
74 void tunattach __P((int));
75 LIST_HEAD(, tun_softc) tun_softc_list;
76 static struct simplelock tun_softc_lock;
77
78 int tun_ioctl __P((struct ifnet *, u_long, caddr_t));
79 int tun_output __P((struct ifnet *, struct mbuf *, struct sockaddr *,
80 struct rtentry *rt));
81 int tun_clone_create __P((struct if_clone *, int));
82 void tun_clone_destroy __P((struct ifnet *));
83
84 struct if_clone tun_cloner =
85 IF_CLONE_INITIALIZER("tun", tun_clone_create, tun_clone_destroy);
86
87 static void tunattach0 __P((struct tun_softc *));
88 static void tuninit __P((struct tun_softc *));
89 #ifdef ALTQ
90 static void tunstart __P((struct ifnet *));
91 #endif
92 static struct tun_softc *tun_find_unit __P((dev_t));
93
94 dev_type_open(tunopen);
95 dev_type_close(tunclose);
96 dev_type_read(tunread);
97 dev_type_write(tunwrite);
98 dev_type_ioctl(tunioctl);
99 dev_type_poll(tunpoll);
100
101 const struct cdevsw tun_cdevsw = {
102 tunopen, tunclose, tunread, tunwrite, tunioctl,
103 nostop, notty, tunpoll, nommap,
104 };
105
106 void
107 tunattach(unused)
108 int unused;
109 {
110
111 simple_lock_init(&tun_softc_lock);
112 LIST_INIT(&tun_softc_list);
113 if_clone_attach(&tun_cloner);
114 }
115
116 int
117 tun_clone_create(ifc, unit)
118 struct if_clone *ifc;
119 int unit;
120 {
121 struct tun_softc *sc;
122
123 sc = malloc(sizeof(struct tun_softc), M_DEVBUF, M_WAITOK);
124 (void)memset(sc, 0, sizeof(struct tun_softc));
125
126 (void)snprintf(sc->tun_if.if_xname, sizeof(sc->tun_if.if_xname),
127 "%s%d", ifc->ifc_name, unit);
128 sc->tun_unit = unit;
129 simple_lock_init(&sc->tun_lock);
130
131 tunattach0(sc);
132
133 simple_lock(&tun_softc_lock);
134 LIST_INSERT_HEAD(&tun_softc_list, sc, tun_list);
135 simple_unlock(&tun_softc_lock);
136
137 return (0);
138 }
139
140 void
141 tunattach0(sc)
142 struct tun_softc *sc;
143 {
144 struct ifnet *ifp = (void *)sc;
145
146 sc->tun_flags = TUN_INITED;
147
148 ifp = &sc->tun_if;
149 ifp->if_softc = sc;
150 ifp->if_mtu = TUNMTU;
151 ifp->if_ioctl = tun_ioctl;
152 ifp->if_output = tun_output;
153 #ifdef ALTQ
154 ifp->if_start = tunstart;
155 #endif
156 ifp->if_flags = IFF_POINTOPOINT;
157 ifp->if_snd.ifq_maxlen = ifqmaxlen;
158 ifp->if_collisions = 0;
159 ifp->if_ierrors = 0;
160 ifp->if_oerrors = 0;
161 ifp->if_ipackets = 0;
162 ifp->if_opackets = 0;
163 ifp->if_dlt = DLT_NULL;
164 IFQ_SET_READY(&ifp->if_snd);
165 if_attach(ifp);
166 if_alloc_sadl(ifp);
167 #if NBPFILTER > 0
168 bpfattach(ifp, DLT_NULL, sizeof(u_int32_t));
169 #endif
170 }
171
172 void
173 tun_clone_destroy(ifp)
174 struct ifnet *ifp;
175 {
176 struct tun_softc *tp = (void *)ifp;
177 struct proc *p;
178
179 simple_lock(&tun_softc_lock);
180 simple_lock(&tp->tun_lock);
181 LIST_REMOVE(tp, tun_list);
182 simple_unlock(&tp->tun_lock);
183 simple_unlock(&tun_softc_lock);
184
185 if (tp->tun_flags & TUN_RWAIT) {
186 tp->tun_flags &= ~TUN_RWAIT;
187 wakeup((caddr_t)tp);
188 }
189 if (tp->tun_flags & TUN_ASYNC && tp->tun_pgrp) {
190 if (tp->tun_pgrp > 0)
191 gsignal(tp->tun_pgrp, SIGIO);
192 else if ((p = pfind(-tp->tun_pgrp)) != NULL)
193 psignal(p, SIGIO);
194 }
195 selwakeup(&tp->tun_rsel);
196
197 #if NBPFILTER > 0
198 bpfdetach(ifp);
199 #endif
200 if_detach(ifp);
201
202 free(tp, M_DEVBUF);
203 }
204
205 static struct tun_softc *
206 tun_find_unit(dev)
207 dev_t dev;
208 {
209 struct tun_softc *tp;
210 int unit = minor(dev);
211
212 simple_lock(&tun_softc_lock);
213 LIST_FOREACH(tp, &tun_softc_list, tun_list)
214 if (unit == tp->tun_unit)
215 break;
216 if (tp)
217 simple_lock(&tp->tun_lock);
218 simple_unlock(&tun_softc_lock);
219
220 return (tp);
221 }
222
223 /*
224 * tunnel open - must be superuser & the device must be
225 * configured in
226 */
227 int
228 tunopen(dev, flag, mode, p)
229 dev_t dev;
230 int flag, mode;
231 struct proc *p;
232 {
233 struct ifnet *ifp;
234 struct tun_softc *tp;
235 int error;
236
237 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
238 return (error);
239
240 if (NTUN < 1)
241 return (ENXIO);
242
243 tp = tun_find_unit(dev);
244
245 if (!tp)
246 return (ENXIO);
247
248 if (tp->tun_flags & TUN_OPEN) {
249 simple_unlock(&tp->tun_lock);
250 return (EBUSY);
251 }
252
253 ifp = &tp->tun_if;
254 tp->tun_flags |= TUN_OPEN;
255 TUNDEBUG("%s: open\n", ifp->if_xname);
256 simple_unlock(&tp->tun_lock);
257 return (0);
258 }
259
260 /*
261 * tunclose - close the device - mark i/f down & delete
262 * routing info
263 */
264 int
265 tunclose(dev, flag, mode, p)
266 dev_t dev;
267 int flag;
268 int mode;
269 struct proc *p;
270 {
271 int s;
272 struct tun_softc *tp;
273 struct ifnet *ifp;
274
275 tp = tun_find_unit(dev);
276
277 /* interface was "destroyed" before the close */
278 if (tp == NULL)
279 return (0);
280
281 ifp = &tp->tun_if;
282
283 tp->tun_flags &= ~TUN_OPEN;
284
285 /*
286 * junk all pending output
287 */
288 s = splnet();
289 IFQ_PURGE(&ifp->if_snd);
290 splx(s);
291
292 if (ifp->if_flags & IFF_UP) {
293 s = splnet();
294 if_down(ifp);
295 if (ifp->if_flags & IFF_RUNNING) {
296 /* find internet addresses and delete routes */
297 struct ifaddr *ifa;
298 TAILQ_FOREACH(ifa, &ifp->if_addrlist, ifa_list) {
299 #ifdef INET
300 if (ifa->ifa_addr->sa_family == AF_INET) {
301 rtinit(ifa, (int)RTM_DELETE,
302 tp->tun_flags & TUN_DSTADDR
303 ? RTF_HOST
304 : 0);
305 }
306 #endif
307 }
308 }
309 splx(s);
310 }
311 tp->tun_pgrp = 0;
312 selwakeup(&tp->tun_rsel);
313
314 TUNDEBUG ("%s: closed\n", ifp->if_xname);
315 simple_unlock(&tp->tun_lock);
316 return (0);
317 }
318
319 static void
320 tuninit(tp)
321 struct tun_softc *tp;
322 {
323 struct ifnet *ifp = &tp->tun_if;
324 struct ifaddr *ifa;
325
326 TUNDEBUG("%s: tuninit\n", ifp->if_xname);
327
328 ifp->if_flags |= IFF_UP | IFF_RUNNING;
329
330 tp->tun_flags &= ~(TUN_IASET|TUN_DSTADDR);
331 TAILQ_FOREACH(ifa, &ifp->if_addrlist, ifa_list) {
332 #ifdef INET
333 if (ifa->ifa_addr->sa_family == AF_INET) {
334 struct sockaddr_in *sin;
335
336 sin = satosin(ifa->ifa_addr);
337 if (sin && sin->sin_addr.s_addr)
338 tp->tun_flags |= TUN_IASET;
339
340 if (ifp->if_flags & IFF_POINTOPOINT) {
341 sin = satosin(ifa->ifa_dstaddr);
342 if (sin && sin->sin_addr.s_addr)
343 tp->tun_flags |= TUN_DSTADDR;
344 }
345 }
346 #endif
347 }
348
349 return;
350 }
351
352 /*
353 * Process an ioctl request.
354 */
355 int
356 tun_ioctl(ifp, cmd, data)
357 struct ifnet *ifp;
358 u_long cmd;
359 caddr_t data;
360 {
361 int error = 0, s;
362 struct tun_softc *tp = (struct tun_softc *)(ifp->if_softc);
363
364 simple_lock(&tp->tun_lock);
365
366 s = splnet();
367 switch(cmd) {
368 case SIOCSIFADDR:
369 tuninit((struct tun_softc *)(ifp->if_softc));
370 TUNDEBUG("%s: address set\n", ifp->if_xname);
371 break;
372 case SIOCSIFDSTADDR:
373 tuninit((struct tun_softc *)(ifp->if_softc));
374 TUNDEBUG("%s: destination address set\n", ifp->if_xname);
375 break;
376 case SIOCSIFBRDADDR:
377 TUNDEBUG("%s: broadcast address set\n", ifp->if_xname);
378 break;
379 case SIOCSIFMTU: {
380 struct ifreq *ifr = (struct ifreq *) data;
381 if (ifr->ifr_mtu > TUNMTU || ifr->ifr_mtu < 576) {
382 error = EINVAL;
383 break;
384 }
385 TUNDEBUG("%s: interface mtu set\n", ifp->if_xname);
386 ifp->if_mtu = ifr->ifr_mtu;
387 break;
388 }
389 case SIOCADDMULTI:
390 case SIOCDELMULTI: {
391 struct ifreq *ifr = (struct ifreq *) data;
392 if (ifr == 0) {
393 error = EAFNOSUPPORT; /* XXX */
394 break;
395 }
396 switch (ifr->ifr_addr.sa_family) {
397
398 #ifdef INET
399 case AF_INET:
400 break;
401 #endif
402
403 default:
404 error = EAFNOSUPPORT;
405 break;
406 }
407 break;
408 }
409 case SIOCSIFFLAGS:
410 break;
411 default:
412 error = EINVAL;
413 }
414 splx(s);
415 simple_unlock(&tp->tun_lock);
416 return (error);
417 }
418
419 /*
420 * tun_output - queue packets from higher level ready to put out.
421 */
422 int
423 tun_output(ifp, m0, dst, rt)
424 struct ifnet *ifp;
425 struct mbuf *m0;
426 struct sockaddr *dst;
427 struct rtentry *rt;
428 {
429 struct tun_softc *tp = ifp->if_softc;
430 struct proc *p;
431 #ifdef INET
432 int s;
433 int error;
434 #endif
435 ALTQ_DECL(struct altq_pktattr pktattr;)
436
437 simple_lock(&tp->tun_lock);
438 TUNDEBUG ("%s: tun_output\n", ifp->if_xname);
439
440 if ((tp->tun_flags & TUN_READY) != TUN_READY) {
441 TUNDEBUG ("%s: not ready 0%o\n", ifp->if_xname,
442 tp->tun_flags);
443 m_freem (m0);
444 simple_unlock(&tp->tun_lock);
445 return (EHOSTDOWN);
446 }
447
448 /*
449 * if the queueing discipline needs packet classification,
450 * do it before prepending link headers.
451 */
452 IFQ_CLASSIFY(&ifp->if_snd, m0, dst->sa_family, &pktattr);
453
454 #if NBPFILTER > 0
455 if (ifp->if_bpf) {
456 /*
457 * We need to prepend the address family as
458 * a four byte field. Cons up a dummy header
459 * to pacify bpf. This is safe because bpf
460 * will only read from the mbuf (i.e., it won't
461 * try to free it or keep a pointer to it).
462 */
463 struct mbuf m;
464 u_int32_t af = dst->sa_family;
465
466 m.m_next = m0;
467 m.m_len = sizeof(af);
468 m.m_data = (char *)⁡
469
470 bpf_mtap(ifp->if_bpf, &m);
471 }
472 #endif
473
474 switch(dst->sa_family) {
475 #ifdef INET
476 case AF_INET:
477 if (tp->tun_flags & TUN_PREPADDR) {
478 /* Simple link-layer header */
479 M_PREPEND(m0, dst->sa_len, M_DONTWAIT);
480 if (m0 == NULL) {
481 IF_DROP(&ifp->if_snd);
482 simple_unlock(&tp->tun_lock);
483 return (ENOBUFS);
484 }
485 bcopy(dst, mtod(m0, char *), dst->sa_len);
486 }
487 /* FALLTHROUGH */
488 case AF_UNSPEC:
489 s = splnet();
490 IFQ_ENQUEUE(&ifp->if_snd, m0, &pktattr, error);
491 if (error) {
492 splx(s);
493 ifp->if_collisions++;
494 return (error);
495 }
496 splx(s);
497 ifp->if_opackets++;
498 break;
499 #endif
500 default:
501 m_freem(m0);
502 simple_unlock(&tp->tun_lock);
503 return (EAFNOSUPPORT);
504 }
505
506 if (tp->tun_flags & TUN_RWAIT) {
507 tp->tun_flags &= ~TUN_RWAIT;
508 wakeup((caddr_t)tp);
509 }
510 if (tp->tun_flags & TUN_ASYNC && tp->tun_pgrp) {
511 if (tp->tun_pgrp > 0)
512 gsignal(tp->tun_pgrp, SIGIO);
513 else if ((p = pfind(-tp->tun_pgrp)) != NULL)
514 psignal(p, SIGIO);
515 }
516 selwakeup(&tp->tun_rsel);
517 simple_unlock(&tp->tun_lock);
518 return (0);
519 }
520
521 /*
522 * the cdevsw interface is now pretty minimal.
523 */
524 int
525 tunioctl(dev, cmd, data, flag, p)
526 dev_t dev;
527 u_long cmd;
528 caddr_t data;
529 int flag;
530 struct proc *p;
531 {
532 int s;
533 struct tun_softc *tp;
534
535 tp = tun_find_unit(dev);
536
537 /* interface was "destroyed" already */
538 if (tp == NULL)
539 return (ENXIO);
540
541 switch (cmd) {
542 case TUNSDEBUG:
543 tundebug = *(int *)data;
544 break;
545
546 case TUNGDEBUG:
547 *(int *)data = tundebug;
548 break;
549
550 case TUNSIFMODE:
551 switch (*(int *)data & (IFF_POINTOPOINT|IFF_BROADCAST)) {
552 case IFF_POINTOPOINT:
553 case IFF_BROADCAST:
554 s = splnet();
555 if (tp->tun_if.if_flags & IFF_UP) {
556 splx(s);
557 simple_unlock(&tp->tun_lock);
558 return (EBUSY);
559 }
560 tp->tun_if.if_flags &=
561 ~(IFF_BROADCAST|IFF_POINTOPOINT|IFF_MULTICAST);
562 tp->tun_if.if_flags |= *(int *)data;
563 splx(s);
564 break;
565 default:
566 simple_unlock(&tp->tun_lock);
567 return (EINVAL);
568 break;
569 }
570 break;
571
572 case TUNSLMODE:
573 if (*(int *)data)
574 tp->tun_flags |= TUN_PREPADDR;
575 else
576 tp->tun_flags &= ~TUN_PREPADDR;
577 break;
578
579 case FIONBIO:
580 if (*(int *)data)
581 tp->tun_flags |= TUN_NBIO;
582 else
583 tp->tun_flags &= ~TUN_NBIO;
584 break;
585
586 case FIOASYNC:
587 if (*(int *)data)
588 tp->tun_flags |= TUN_ASYNC;
589 else
590 tp->tun_flags &= ~TUN_ASYNC;
591 break;
592
593 case FIONREAD:
594 s = splnet();
595 if (tp->tun_if.if_snd.ifq_head)
596 *(int *)data = tp->tun_if.if_snd.ifq_head->m_pkthdr.len;
597 else
598 *(int *)data = 0;
599 splx(s);
600 break;
601
602 case TIOCSPGRP:
603 tp->tun_pgrp = *(int *)data;
604 break;
605
606 case TIOCGPGRP:
607 *(int *)data = tp->tun_pgrp;
608 break;
609
610 default:
611 simple_unlock(&tp->tun_lock);
612 return (ENOTTY);
613 }
614 simple_unlock(&tp->tun_lock);
615 return (0);
616 }
617
618 /*
619 * The cdevsw read interface - reads a packet at a time, or at
620 * least as much of a packet as can be read.
621 */
622 int
623 tunread(dev, uio, ioflag)
624 dev_t dev;
625 struct uio *uio;
626 int ioflag;
627 {
628 struct tun_softc *tp;
629 struct ifnet *ifp;
630 struct mbuf *m, *m0;
631 int error=0, len, s, index;
632
633 tp = tun_find_unit(dev);
634
635 /* interface was "destroyed" already */
636 if (tp == NULL)
637 return (ENXIO);
638
639 index = tp->tun_if.if_index;
640 ifp = &tp->tun_if;
641
642 TUNDEBUG ("%s: read\n", ifp->if_xname);
643 if ((tp->tun_flags & TUN_READY) != TUN_READY) {
644 TUNDEBUG ("%s: not ready 0%o\n", ifp->if_xname, tp->tun_flags);
645 simple_unlock(&tp->tun_lock);
646 return EHOSTDOWN;
647 }
648
649 tp->tun_flags &= ~TUN_RWAIT;
650
651 s = splnet();
652 do {
653 IFQ_DEQUEUE(&ifp->if_snd, m0);
654 if (m0 == 0) {
655 if (tp->tun_flags & TUN_NBIO) {
656 splx(s);
657 simple_unlock(&tp->tun_lock);
658 return (EWOULDBLOCK);
659 }
660 tp->tun_flags |= TUN_RWAIT;
661 simple_unlock(&tp->tun_lock);
662 if (tsleep((caddr_t)tp, PZERO|PCATCH, "tunread", 0)) {
663 splx(s);
664 return (EINTR);
665 } else {
666 /*
667 * Maybe the interface was destroyed while
668 * we were sleeping, so let's ensure that
669 * we're looking at the same (valid) tun
670 * interface before looping.
671 */
672 tp = tun_find_unit(dev);
673 if (tp == NULL ||
674 tp->tun_if.if_index != index) {
675 splx(s);
676 if (tp)
677 simple_unlock(&tp->tun_lock);
678 return (ENXIO);
679 }
680 }
681 }
682 } while (m0 == 0);
683 splx(s);
684
685 while (m0 && uio->uio_resid > 0 && error == 0) {
686 len = min(uio->uio_resid, m0->m_len);
687 if (len != 0)
688 error = uiomove(mtod(m0, caddr_t), len, uio);
689 MFREE(m0, m);
690 m0 = m;
691 }
692
693 if (m0) {
694 TUNDEBUG("Dropping mbuf\n");
695 m_freem(m0);
696 }
697 if (error)
698 ifp->if_ierrors++;
699 simple_unlock(&tp->tun_lock);
700 return (error);
701 }
702
703 /*
704 * the cdevsw write interface - an atomic write is a packet - or else!
705 */
706 int
707 tunwrite(dev, uio, ioflag)
708 dev_t dev;
709 struct uio *uio;
710 int ioflag;
711 {
712 struct tun_softc *tp;
713 struct ifnet *ifp;
714 struct mbuf *top, **mp, *m;
715 struct ifqueue *ifq;
716 struct sockaddr dst;
717 int isr, error=0, s, tlen, mlen;
718
719 tp = tun_find_unit(dev);
720
721 /* interface was "destroyed" already */
722 if (tp == NULL)
723 return (ENXIO);
724
725 ifp = &tp->tun_if;
726
727 TUNDEBUG("%s: tunwrite\n", ifp->if_xname);
728
729 if (tp->tun_flags & TUN_PREPADDR) {
730 if (uio->uio_resid < sizeof(dst)) {
731 simple_unlock(&tp->tun_lock);
732 return (EIO);
733 }
734 error = uiomove((caddr_t)&dst, sizeof(dst), uio);
735 if (dst.sa_len > sizeof(dst)) {
736 /* Duh.. */
737 char discard;
738 int n = dst.sa_len - sizeof(dst);
739 while (n--)
740 if ((error = uiomove(&discard, 1, uio)) != 0) {
741 simple_unlock(&tp->tun_lock);
742 return (error);
743 }
744 }
745 } else {
746 #ifdef INET
747 dst.sa_family = AF_INET;
748 #endif
749 }
750
751 if (uio->uio_resid < 0 || uio->uio_resid > TUNMTU) {
752 TUNDEBUG("%s: len=%lu!\n", ifp->if_xname,
753 (unsigned long)uio->uio_resid);
754 simple_unlock(&tp->tun_lock);
755 return (EIO);
756 }
757
758 switch (dst.sa_family) {
759 #ifdef INET
760 case AF_INET:
761 ifq = &ipintrq;
762 isr = NETISR_IP;
763 break;
764 #endif
765 default:
766 simple_unlock(&tp->tun_lock);
767 return (EAFNOSUPPORT);
768 }
769
770 tlen = uio->uio_resid;
771
772 /* get a header mbuf */
773 MGETHDR(m, M_DONTWAIT, MT_DATA);
774 if (m == NULL) {
775 simple_unlock(&tp->tun_lock);
776 return (ENOBUFS);
777 }
778 mlen = MHLEN;
779
780 top = 0;
781 mp = ⊤
782 while (error == 0 && uio->uio_resid > 0) {
783 m->m_len = min(mlen, uio->uio_resid);
784 error = uiomove(mtod (m, caddr_t), m->m_len, uio);
785 *mp = m;
786 mp = &m->m_next;
787 if (uio->uio_resid > 0) {
788 MGET (m, M_DONTWAIT, MT_DATA);
789 if (m == 0) {
790 error = ENOBUFS;
791 break;
792 }
793 mlen = MLEN;
794 }
795 }
796 if (error) {
797 if (top)
798 m_freem (top);
799 ifp->if_ierrors++;
800 simple_unlock(&tp->tun_lock);
801 return (error);
802 }
803
804 top->m_pkthdr.len = tlen;
805 top->m_pkthdr.rcvif = ifp;
806
807 #if NBPFILTER > 0
808 if (ifp->if_bpf) {
809 /*
810 * We need to prepend the address family as
811 * a four byte field. Cons up a dummy header
812 * to pacify bpf. This is safe because bpf
813 * will only read from the mbuf (i.e., it won't
814 * try to free it or keep a pointer to it).
815 */
816 struct mbuf m;
817 u_int32_t af = AF_INET;
818
819 m.m_next = top;
820 m.m_len = sizeof(af);
821 m.m_data = (char *)⁡
822
823 bpf_mtap(ifp->if_bpf, &m);
824 }
825 #endif
826
827 s = splnet();
828 if (IF_QFULL(ifq)) {
829 IF_DROP(ifq);
830 splx(s);
831 ifp->if_collisions++;
832 m_freem(top);
833 simple_unlock(&tp->tun_lock);
834 return (ENOBUFS);
835 }
836 IF_ENQUEUE(ifq, top);
837 splx(s);
838 ifp->if_ipackets++;
839 schednetisr(isr);
840 simple_unlock(&tp->tun_lock);
841 return (error);
842 }
843
844 #ifdef ALTQ
845 /*
846 * Start packet transmission on the interface.
847 * when the interface queue is rate-limited by ALTQ or TBR,
848 * if_start is needed to drain packets from the queue in order
849 * to notify readers when outgoing packets become ready.
850 */
851 static void
852 tunstart(ifp)
853 struct ifnet *ifp;
854 {
855 struct tun_softc *tp = ifp->if_softc;
856 struct mbuf *m;
857 struct proc *p;
858
859 if (!ALTQ_IS_ENABLED(&ifp->if_snd) && !TBR_IS_ENABLED(&ifp->if_snd))
860 return;
861
862 IFQ_POLL(&ifp->if_snd, m);
863 if (m != NULL) {
864 if (tp->tun_flags & TUN_RWAIT) {
865 tp->tun_flags &= ~TUN_RWAIT;
866 wakeup((caddr_t)tp);
867 }
868 if (tp->tun_flags & TUN_ASYNC && tp->tun_pgrp) {
869 if (tp->tun_pgrp > 0)
870 gsignal(tp->tun_pgrp, SIGIO);
871 else if ((p = pfind(-tp->tun_pgrp)) != NULL)
872 psignal(p, SIGIO);
873 }
874 selwakeup(&tp->tun_rsel);
875 }
876 }
877 #endif /* ALTQ */
878 /*
879 * tunpoll - the poll interface, this is only useful on reads
880 * really. The write detect always returns true, write never blocks
881 * anyway, it either accepts the packet or drops it.
882 */
883 int
884 tunpoll(dev, events, p)
885 dev_t dev;
886 int events;
887 struct proc *p;
888 {
889 struct tun_softc *tp;
890 struct ifnet *ifp;
891 int s, revents = 0;
892
893 tp = tun_find_unit(dev);
894
895 /* interface was "destroyed" already */
896 if (tp == NULL)
897 return (0);
898
899 ifp = &tp->tun_if;
900
901 s = splnet();
902 TUNDEBUG("%s: tunpoll\n", ifp->if_xname);
903
904 if (events & (POLLIN | POLLRDNORM)) {
905 if (IFQ_IS_EMPTY(&ifp->if_snd) == 0) {
906 TUNDEBUG("%s: tunpoll q=%d\n", ifp->if_xname,
907 ifp->if_snd.ifq_len);
908 revents |= events & (POLLIN | POLLRDNORM);
909 } else {
910 TUNDEBUG("%s: tunpoll waiting\n", ifp->if_xname);
911 selrecord(p, &tp->tun_rsel);
912 }
913 }
914
915 if (events & (POLLOUT | POLLWRNORM))
916 revents |= events & (POLLOUT | POLLWRNORM);
917
918 splx(s);
919 simple_unlock(&tp->tun_lock);
920 return (revents);
921 }
922