if_tun.c revision 1.64 1 /* $NetBSD: if_tun.c,v 1.64 2003/09/21 19:17:14 jdolecek Exp $ */
2
3 /*
4 * Copyright (c) 1988, Julian Onions <jpo (at) cs.nott.ac.uk>
5 * Nottingham University 1987.
6 *
7 * This source may be freely distributed, however I would be interested
8 * in any changes that are made.
9 *
10 * This driver takes packets off the IP i/f and hands them up to a
11 * user process to have its wicked way with. This driver has its
12 * roots in a similar driver written by Phil Cockcroft (formerly) at
13 * UCL. This driver is based much more on read/write/poll mode of
14 * operation though.
15 */
16
17 #include <sys/cdefs.h>
18 __KERNEL_RCSID(0, "$NetBSD: if_tun.c,v 1.64 2003/09/21 19:17:14 jdolecek Exp $");
19
20 #include "tun.h"
21
22 #include "opt_inet.h"
23 #include "opt_ns.h"
24
25 #include <sys/param.h>
26 #include <sys/proc.h>
27 #include <sys/systm.h>
28 #include <sys/mbuf.h>
29 #include <sys/buf.h>
30 #include <sys/protosw.h>
31 #include <sys/socket.h>
32 #include <sys/ioctl.h>
33 #include <sys/errno.h>
34 #include <sys/syslog.h>
35 #include <sys/select.h>
36 #include <sys/poll.h>
37 #include <sys/file.h>
38 #include <sys/signalvar.h>
39 #include <sys/conf.h>
40
41 #include <machine/cpu.h>
42
43 #include <net/if.h>
44 #include <net/if_ether.h>
45 #include <net/netisr.h>
46 #include <net/route.h>
47
48
49 #ifdef INET
50 #include <netinet/in.h>
51 #include <netinet/in_systm.h>
52 #include <netinet/in_var.h>
53 #include <netinet/ip.h>
54 #include <netinet/if_inarp.h>
55 #endif
56
57 #ifdef NS
58 #include <netns/ns.h>
59 #include <netns/ns_if.h>
60 #endif
61
62 #include "bpfilter.h"
63 #if NBPFILTER > 0
64 #include <sys/time.h>
65 #include <net/bpf.h>
66 #endif
67
68 #include <net/if_tun.h>
69
70 #define TUNDEBUG if (tundebug) printf
71 int tundebug = 0;
72
73 extern int ifqmaxlen;
74 void tunattach __P((int));
75 LIST_HEAD(, tun_softc) tun_softc_list;
76 static struct simplelock tun_softc_lock;
77
78 int tun_ioctl __P((struct ifnet *, u_long, caddr_t));
79 int tun_output __P((struct ifnet *, struct mbuf *, struct sockaddr *,
80 struct rtentry *rt));
81 int tun_clone_create __P((struct if_clone *, int));
82 void tun_clone_destroy __P((struct ifnet *));
83
84 struct if_clone tun_cloner =
85 IF_CLONE_INITIALIZER("tun", tun_clone_create, tun_clone_destroy);
86
87 static void tunattach0 __P((struct tun_softc *));
88 static void tuninit __P((struct tun_softc *));
89 #ifdef ALTQ
90 static void tunstart __P((struct ifnet *));
91 #endif
92 static struct tun_softc *tun_find_unit __P((dev_t));
93
94 dev_type_open(tunopen);
95 dev_type_close(tunclose);
96 dev_type_read(tunread);
97 dev_type_write(tunwrite);
98 dev_type_ioctl(tunioctl);
99 dev_type_poll(tunpoll);
100 dev_type_kqfilter(tunkqfilter);
101
102 const struct cdevsw tun_cdevsw = {
103 tunopen, tunclose, tunread, tunwrite, tunioctl,
104 nostop, notty, tunpoll, nommap, tunkqfilter,
105 };
106
107 void
108 tunattach(unused)
109 int unused;
110 {
111
112 simple_lock_init(&tun_softc_lock);
113 LIST_INIT(&tun_softc_list);
114 if_clone_attach(&tun_cloner);
115 }
116
117 int
118 tun_clone_create(ifc, unit)
119 struct if_clone *ifc;
120 int unit;
121 {
122 struct tun_softc *sc;
123
124 sc = malloc(sizeof(struct tun_softc), M_DEVBUF, M_WAITOK);
125 (void)memset(sc, 0, sizeof(struct tun_softc));
126
127 (void)snprintf(sc->tun_if.if_xname, sizeof(sc->tun_if.if_xname),
128 "%s%d", ifc->ifc_name, unit);
129 sc->tun_unit = unit;
130 simple_lock_init(&sc->tun_lock);
131
132 tunattach0(sc);
133
134 simple_lock(&tun_softc_lock);
135 LIST_INSERT_HEAD(&tun_softc_list, sc, tun_list);
136 simple_unlock(&tun_softc_lock);
137
138 return (0);
139 }
140
141 void
142 tunattach0(sc)
143 struct tun_softc *sc;
144 {
145 struct ifnet *ifp = (void *)sc;
146
147 sc->tun_flags = TUN_INITED;
148
149 ifp = &sc->tun_if;
150 ifp->if_softc = sc;
151 ifp->if_mtu = TUNMTU;
152 ifp->if_ioctl = tun_ioctl;
153 ifp->if_output = tun_output;
154 #ifdef ALTQ
155 ifp->if_start = tunstart;
156 #endif
157 ifp->if_flags = IFF_POINTOPOINT;
158 ifp->if_snd.ifq_maxlen = ifqmaxlen;
159 ifp->if_collisions = 0;
160 ifp->if_ierrors = 0;
161 ifp->if_oerrors = 0;
162 ifp->if_ipackets = 0;
163 ifp->if_opackets = 0;
164 ifp->if_ibytes = 0;
165 ifp->if_obytes = 0;
166 ifp->if_dlt = DLT_NULL;
167 IFQ_SET_READY(&ifp->if_snd);
168 if_attach(ifp);
169 if_alloc_sadl(ifp);
170 #if NBPFILTER > 0
171 bpfattach(ifp, DLT_NULL, sizeof(u_int32_t));
172 #endif
173 }
174
175 void
176 tun_clone_destroy(ifp)
177 struct ifnet *ifp;
178 {
179 struct tun_softc *tp = (void *)ifp;
180
181 simple_lock(&tun_softc_lock);
182 simple_lock(&tp->tun_lock);
183 LIST_REMOVE(tp, tun_list);
184 simple_unlock(&tp->tun_lock);
185 simple_unlock(&tun_softc_lock);
186
187 if (tp->tun_flags & TUN_RWAIT) {
188 tp->tun_flags &= ~TUN_RWAIT;
189 wakeup((caddr_t)tp);
190 }
191 if (tp->tun_flags & TUN_ASYNC && tp->tun_pgid)
192 fownsignal(tp->tun_pgid, POLL_HUP, 0, NULL);
193
194 selwakeup(&tp->tun_rsel);
195
196 #if NBPFILTER > 0
197 bpfdetach(ifp);
198 #endif
199 if_detach(ifp);
200
201 free(tp, M_DEVBUF);
202 }
203
204 static struct tun_softc *
205 tun_find_unit(dev)
206 dev_t dev;
207 {
208 struct tun_softc *tp;
209 int unit = minor(dev);
210
211 simple_lock(&tun_softc_lock);
212 LIST_FOREACH(tp, &tun_softc_list, tun_list)
213 if (unit == tp->tun_unit)
214 break;
215 if (tp)
216 simple_lock(&tp->tun_lock);
217 simple_unlock(&tun_softc_lock);
218
219 return (tp);
220 }
221
222 /*
223 * tunnel open - must be superuser & the device must be
224 * configured in
225 */
226 int
227 tunopen(dev, flag, mode, p)
228 dev_t dev;
229 int flag, mode;
230 struct proc *p;
231 {
232 struct ifnet *ifp;
233 struct tun_softc *tp;
234 int error;
235
236 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
237 return (error);
238
239 if (NTUN < 1)
240 return (ENXIO);
241
242 tp = tun_find_unit(dev);
243
244 if (!tp) {
245 (void)tun_clone_create(&tun_cloner, minor(dev));
246 tp = tun_find_unit(dev);
247 }
248
249 if (!tp)
250 return (ENXIO);
251
252 if (tp->tun_flags & TUN_OPEN) {
253 simple_unlock(&tp->tun_lock);
254 return (EBUSY);
255 }
256
257 ifp = &tp->tun_if;
258 tp->tun_flags |= TUN_OPEN;
259 TUNDEBUG("%s: open\n", ifp->if_xname);
260 simple_unlock(&tp->tun_lock);
261 return (0);
262 }
263
264 /*
265 * tunclose - close the device - mark i/f down & delete
266 * routing info
267 */
268 int
269 tunclose(dev, flag, mode, p)
270 dev_t dev;
271 int flag;
272 int mode;
273 struct proc *p;
274 {
275 int s;
276 struct tun_softc *tp;
277 struct ifnet *ifp;
278
279 tp = tun_find_unit(dev);
280
281 /* interface was "destroyed" before the close */
282 if (tp == NULL)
283 return (0);
284
285 ifp = &tp->tun_if;
286
287 tp->tun_flags &= ~TUN_OPEN;
288
289 /*
290 * junk all pending output
291 */
292 s = splnet();
293 IFQ_PURGE(&ifp->if_snd);
294 splx(s);
295
296 if (ifp->if_flags & IFF_UP) {
297 s = splnet();
298 if_down(ifp);
299 if (ifp->if_flags & IFF_RUNNING) {
300 /* find internet addresses and delete routes */
301 struct ifaddr *ifa;
302 TAILQ_FOREACH(ifa, &ifp->if_addrlist, ifa_list) {
303 #ifdef INET
304 if (ifa->ifa_addr->sa_family == AF_INET) {
305 rtinit(ifa, (int)RTM_DELETE,
306 tp->tun_flags & TUN_DSTADDR
307 ? RTF_HOST
308 : 0);
309 }
310 #endif
311 }
312 }
313 splx(s);
314 }
315 tp->tun_pgid = 0;
316 selnotify(&tp->tun_rsel, 0);
317
318 TUNDEBUG ("%s: closed\n", ifp->if_xname);
319 simple_unlock(&tp->tun_lock);
320 return (0);
321 }
322
323 static void
324 tuninit(tp)
325 struct tun_softc *tp;
326 {
327 struct ifnet *ifp = &tp->tun_if;
328 struct ifaddr *ifa;
329
330 TUNDEBUG("%s: tuninit\n", ifp->if_xname);
331
332 ifp->if_flags |= IFF_UP | IFF_RUNNING;
333
334 tp->tun_flags &= ~(TUN_IASET|TUN_DSTADDR);
335 TAILQ_FOREACH(ifa, &ifp->if_addrlist, ifa_list) {
336 #ifdef INET
337 if (ifa->ifa_addr->sa_family == AF_INET) {
338 struct sockaddr_in *sin;
339
340 sin = satosin(ifa->ifa_addr);
341 if (sin && sin->sin_addr.s_addr)
342 tp->tun_flags |= TUN_IASET;
343
344 if (ifp->if_flags & IFF_POINTOPOINT) {
345 sin = satosin(ifa->ifa_dstaddr);
346 if (sin && sin->sin_addr.s_addr)
347 tp->tun_flags |= TUN_DSTADDR;
348 }
349 }
350 #endif
351 }
352
353 return;
354 }
355
356 /*
357 * Process an ioctl request.
358 */
359 int
360 tun_ioctl(ifp, cmd, data)
361 struct ifnet *ifp;
362 u_long cmd;
363 caddr_t data;
364 {
365 int error = 0, s;
366 struct tun_softc *tp = (struct tun_softc *)(ifp->if_softc);
367
368 simple_lock(&tp->tun_lock);
369
370 s = splnet();
371 switch (cmd) {
372 case SIOCSIFADDR:
373 tuninit((struct tun_softc *)(ifp->if_softc));
374 TUNDEBUG("%s: address set\n", ifp->if_xname);
375 break;
376 case SIOCSIFDSTADDR:
377 tuninit((struct tun_softc *)(ifp->if_softc));
378 TUNDEBUG("%s: destination address set\n", ifp->if_xname);
379 break;
380 case SIOCSIFBRDADDR:
381 TUNDEBUG("%s: broadcast address set\n", ifp->if_xname);
382 break;
383 case SIOCSIFMTU: {
384 struct ifreq *ifr = (struct ifreq *) data;
385 if (ifr->ifr_mtu > TUNMTU || ifr->ifr_mtu < 576) {
386 error = EINVAL;
387 break;
388 }
389 TUNDEBUG("%s: interface mtu set\n", ifp->if_xname);
390 ifp->if_mtu = ifr->ifr_mtu;
391 break;
392 }
393 case SIOCADDMULTI:
394 case SIOCDELMULTI: {
395 struct ifreq *ifr = (struct ifreq *) data;
396 if (ifr == 0) {
397 error = EAFNOSUPPORT; /* XXX */
398 break;
399 }
400 switch (ifr->ifr_addr.sa_family) {
401
402 #ifdef INET
403 case AF_INET:
404 break;
405 #endif
406
407 default:
408 error = EAFNOSUPPORT;
409 break;
410 }
411 break;
412 }
413 case SIOCSIFFLAGS:
414 break;
415 default:
416 error = EINVAL;
417 }
418 splx(s);
419 simple_unlock(&tp->tun_lock);
420 return (error);
421 }
422
423 /*
424 * tun_output - queue packets from higher level ready to put out.
425 */
426 int
427 tun_output(ifp, m0, dst, rt)
428 struct ifnet *ifp;
429 struct mbuf *m0;
430 struct sockaddr *dst;
431 struct rtentry *rt;
432 {
433 struct tun_softc *tp = ifp->if_softc;
434 #ifdef INET
435 int s;
436 int error;
437 #endif
438 int mlen;
439 ALTQ_DECL(struct altq_pktattr pktattr;)
440
441 simple_lock(&tp->tun_lock);
442 TUNDEBUG ("%s: tun_output\n", ifp->if_xname);
443
444 if ((tp->tun_flags & TUN_READY) != TUN_READY) {
445 TUNDEBUG ("%s: not ready 0%o\n", ifp->if_xname,
446 tp->tun_flags);
447 m_freem (m0);
448 simple_unlock(&tp->tun_lock);
449 return (EHOSTDOWN);
450 }
451
452 /*
453 * if the queueing discipline needs packet classification,
454 * do it before prepending link headers.
455 */
456 IFQ_CLASSIFY(&ifp->if_snd, m0, dst->sa_family, &pktattr);
457
458 #if NBPFILTER > 0
459 if (ifp->if_bpf) {
460 /*
461 * We need to prepend the address family as
462 * a four byte field. Cons up a dummy header
463 * to pacify bpf. This is safe because bpf
464 * will only read from the mbuf (i.e., it won't
465 * try to free it or keep a pointer to it).
466 */
467 struct mbuf m;
468 u_int32_t af = dst->sa_family;
469
470 m.m_flags = 0;
471 m.m_next = m0;
472 m.m_len = sizeof(af);
473 m.m_data = (char *)⁡
474
475 bpf_mtap(ifp->if_bpf, &m);
476 }
477 #endif
478
479 switch(dst->sa_family) {
480 #ifdef INET
481 case AF_INET:
482 if (tp->tun_flags & TUN_PREPADDR) {
483 /* Simple link-layer header */
484 M_PREPEND(m0, dst->sa_len, M_DONTWAIT);
485 if (m0 == NULL) {
486 IF_DROP(&ifp->if_snd);
487 simple_unlock(&tp->tun_lock);
488 return (ENOBUFS);
489 }
490 bcopy(dst, mtod(m0, char *), dst->sa_len);
491 }
492 /* FALLTHROUGH */
493 case AF_UNSPEC:
494 s = splnet();
495 IFQ_ENQUEUE(&ifp->if_snd, m0, &pktattr, error);
496 if (error) {
497 splx(s);
498 ifp->if_collisions++;
499 return (error);
500 }
501 mlen = m0->m_pkthdr.len;
502 splx(s);
503 ifp->if_opackets++;
504 ifp->if_obytes += mlen;
505 break;
506 #endif
507 default:
508 m_freem(m0);
509 simple_unlock(&tp->tun_lock);
510 return (EAFNOSUPPORT);
511 }
512
513 if (tp->tun_flags & TUN_RWAIT) {
514 tp->tun_flags &= ~TUN_RWAIT;
515 wakeup((caddr_t)tp);
516 }
517 if (tp->tun_flags & TUN_ASYNC && tp->tun_pgid)
518 fownsignal(tp->tun_pgid, POLL_IN, POLLIN|POLLRDNORM, NULL);
519
520 selnotify(&tp->tun_rsel, 0);
521 simple_unlock(&tp->tun_lock);
522 return (0);
523 }
524
525 /*
526 * the cdevsw interface is now pretty minimal.
527 */
528 int
529 tunioctl(dev, cmd, data, flag, p)
530 dev_t dev;
531 u_long cmd;
532 caddr_t data;
533 int flag;
534 struct proc *p;
535 {
536 int s;
537 struct tun_softc *tp;
538 int error=0;
539
540 tp = tun_find_unit(dev);
541
542 /* interface was "destroyed" already */
543 if (tp == NULL)
544 return (ENXIO);
545
546 switch (cmd) {
547 case TUNSDEBUG:
548 tundebug = *(int *)data;
549 break;
550
551 case TUNGDEBUG:
552 *(int *)data = tundebug;
553 break;
554
555 case TUNSIFMODE:
556 switch (*(int *)data & (IFF_POINTOPOINT|IFF_BROADCAST)) {
557 case IFF_POINTOPOINT:
558 case IFF_BROADCAST:
559 s = splnet();
560 if (tp->tun_if.if_flags & IFF_UP) {
561 splx(s);
562 simple_unlock(&tp->tun_lock);
563 return (EBUSY);
564 }
565 tp->tun_if.if_flags &=
566 ~(IFF_BROADCAST|IFF_POINTOPOINT|IFF_MULTICAST);
567 tp->tun_if.if_flags |= *(int *)data;
568 splx(s);
569 break;
570 default:
571 simple_unlock(&tp->tun_lock);
572 return (EINVAL);
573 }
574 break;
575
576 case TUNSLMODE:
577 if (*(int *)data)
578 tp->tun_flags |= TUN_PREPADDR;
579 else
580 tp->tun_flags &= ~TUN_PREPADDR;
581 break;
582
583 case FIONBIO:
584 if (*(int *)data)
585 tp->tun_flags |= TUN_NBIO;
586 else
587 tp->tun_flags &= ~TUN_NBIO;
588 break;
589
590 case FIOASYNC:
591 if (*(int *)data)
592 tp->tun_flags |= TUN_ASYNC;
593 else
594 tp->tun_flags &= ~TUN_ASYNC;
595 break;
596
597 case FIONREAD:
598 s = splnet();
599 if (tp->tun_if.if_snd.ifq_head)
600 *(int *)data = tp->tun_if.if_snd.ifq_head->m_pkthdr.len;
601 else
602 *(int *)data = 0;
603 splx(s);
604 break;
605
606 case TIOCSPGRP:
607 case FIOSETOWN:
608 error = fsetown(p, &tp->tun_pgid, cmd, data);
609 break;
610
611 case TIOCGPGRP:
612 case FIOGETOWN:
613 error = fgetown(p, tp->tun_pgid, cmd, data);
614 break;
615
616 default:
617 simple_unlock(&tp->tun_lock);
618 return (ENOTTY);
619 }
620 simple_unlock(&tp->tun_lock);
621 return (error);
622 }
623
624 /*
625 * The cdevsw read interface - reads a packet at a time, or at
626 * least as much of a packet as can be read.
627 */
628 int
629 tunread(dev, uio, ioflag)
630 dev_t dev;
631 struct uio *uio;
632 int ioflag;
633 {
634 struct tun_softc *tp;
635 struct ifnet *ifp;
636 struct mbuf *m, *m0;
637 int error=0, len, s, index;
638
639 tp = tun_find_unit(dev);
640
641 /* interface was "destroyed" already */
642 if (tp == NULL)
643 return (ENXIO);
644
645 index = tp->tun_if.if_index;
646 ifp = &tp->tun_if;
647
648 TUNDEBUG ("%s: read\n", ifp->if_xname);
649 if ((tp->tun_flags & TUN_READY) != TUN_READY) {
650 TUNDEBUG ("%s: not ready 0%o\n", ifp->if_xname, tp->tun_flags);
651 simple_unlock(&tp->tun_lock);
652 return EHOSTDOWN;
653 }
654
655 tp->tun_flags &= ~TUN_RWAIT;
656
657 s = splnet();
658 do {
659 IFQ_DEQUEUE(&ifp->if_snd, m0);
660 if (m0 == 0) {
661 if (tp->tun_flags & TUN_NBIO) {
662 splx(s);
663 simple_unlock(&tp->tun_lock);
664 return (EWOULDBLOCK);
665 }
666 tp->tun_flags |= TUN_RWAIT;
667 simple_unlock(&tp->tun_lock);
668 if (tsleep((caddr_t)tp, PZERO|PCATCH, "tunread", 0)) {
669 splx(s);
670 return (EINTR);
671 } else {
672 /*
673 * Maybe the interface was destroyed while
674 * we were sleeping, so let's ensure that
675 * we're looking at the same (valid) tun
676 * interface before looping.
677 */
678 tp = tun_find_unit(dev);
679 if (tp == NULL ||
680 tp->tun_if.if_index != index) {
681 splx(s);
682 if (tp)
683 simple_unlock(&tp->tun_lock);
684 return (ENXIO);
685 }
686 }
687 }
688 } while (m0 == 0);
689 splx(s);
690
691 while (m0 && uio->uio_resid > 0 && error == 0) {
692 len = min(uio->uio_resid, m0->m_len);
693 if (len != 0)
694 error = uiomove(mtod(m0, caddr_t), len, uio);
695 MFREE(m0, m);
696 m0 = m;
697 }
698
699 if (m0) {
700 TUNDEBUG("Dropping mbuf\n");
701 m_freem(m0);
702 }
703 if (error)
704 ifp->if_ierrors++;
705 simple_unlock(&tp->tun_lock);
706 return (error);
707 }
708
709 /*
710 * the cdevsw write interface - an atomic write is a packet - or else!
711 */
712 int
713 tunwrite(dev, uio, ioflag)
714 dev_t dev;
715 struct uio *uio;
716 int ioflag;
717 {
718 struct tun_softc *tp;
719 struct ifnet *ifp;
720 struct mbuf *top, **mp, *m;
721 struct ifqueue *ifq;
722 struct sockaddr dst;
723 int isr, error=0, s, tlen, mlen;
724
725 tp = tun_find_unit(dev);
726
727 /* interface was "destroyed" already */
728 if (tp == NULL)
729 return (ENXIO);
730
731 ifp = &tp->tun_if;
732
733 TUNDEBUG("%s: tunwrite\n", ifp->if_xname);
734
735 if (tp->tun_flags & TUN_PREPADDR) {
736 if (uio->uio_resid < sizeof(dst)) {
737 simple_unlock(&tp->tun_lock);
738 return (EIO);
739 }
740 error = uiomove((caddr_t)&dst, sizeof(dst), uio);
741 if (dst.sa_len > sizeof(dst)) {
742 /* Duh.. */
743 char discard;
744 int n = dst.sa_len - sizeof(dst);
745 while (n--)
746 if ((error = uiomove(&discard, 1, uio)) != 0) {
747 simple_unlock(&tp->tun_lock);
748 return (error);
749 }
750 }
751 } else {
752 #ifdef INET
753 dst.sa_family = AF_INET;
754 #endif
755 }
756
757 if (uio->uio_resid > TUNMTU) {
758 TUNDEBUG("%s: len=%lu!\n", ifp->if_xname,
759 (unsigned long)uio->uio_resid);
760 simple_unlock(&tp->tun_lock);
761 return (EIO);
762 }
763
764 switch (dst.sa_family) {
765 #ifdef INET
766 case AF_INET:
767 ifq = &ipintrq;
768 isr = NETISR_IP;
769 break;
770 #endif
771 default:
772 simple_unlock(&tp->tun_lock);
773 return (EAFNOSUPPORT);
774 }
775
776 tlen = uio->uio_resid;
777
778 /* get a header mbuf */
779 MGETHDR(m, M_DONTWAIT, MT_DATA);
780 if (m == NULL) {
781 simple_unlock(&tp->tun_lock);
782 return (ENOBUFS);
783 }
784 mlen = MHLEN;
785
786 top = 0;
787 mp = ⊤
788 while (error == 0 && uio->uio_resid > 0) {
789 m->m_len = min(mlen, uio->uio_resid);
790 error = uiomove(mtod (m, caddr_t), m->m_len, uio);
791 *mp = m;
792 mp = &m->m_next;
793 if (uio->uio_resid > 0) {
794 MGET (m, M_DONTWAIT, MT_DATA);
795 if (m == 0) {
796 error = ENOBUFS;
797 break;
798 }
799 mlen = MLEN;
800 }
801 }
802 if (error) {
803 if (top)
804 m_freem (top);
805 ifp->if_ierrors++;
806 simple_unlock(&tp->tun_lock);
807 return (error);
808 }
809
810 top->m_pkthdr.len = tlen;
811 top->m_pkthdr.rcvif = ifp;
812
813 #if NBPFILTER > 0
814 if (ifp->if_bpf) {
815 /*
816 * We need to prepend the address family as
817 * a four byte field. Cons up a dummy header
818 * to pacify bpf. This is safe because bpf
819 * will only read from the mbuf (i.e., it won't
820 * try to free it or keep a pointer to it).
821 */
822 struct mbuf m;
823 u_int32_t af = AF_INET;
824
825 m.m_flags = 0;
826 m.m_next = top;
827 m.m_len = sizeof(af);
828 m.m_data = (char *)⁡
829
830 bpf_mtap(ifp->if_bpf, &m);
831 }
832 #endif
833
834 s = splnet();
835 if (IF_QFULL(ifq)) {
836 IF_DROP(ifq);
837 splx(s);
838 ifp->if_collisions++;
839 m_freem(top);
840 simple_unlock(&tp->tun_lock);
841 return (ENOBUFS);
842 }
843 IF_ENQUEUE(ifq, top);
844 splx(s);
845 ifp->if_ipackets++;
846 ifp->if_ibytes += tlen;
847 schednetisr(isr);
848 simple_unlock(&tp->tun_lock);
849 return (error);
850 }
851
852 #ifdef ALTQ
853 /*
854 * Start packet transmission on the interface.
855 * when the interface queue is rate-limited by ALTQ or TBR,
856 * if_start is needed to drain packets from the queue in order
857 * to notify readers when outgoing packets become ready.
858 */
859 static void
860 tunstart(ifp)
861 struct ifnet *ifp;
862 {
863 struct tun_softc *tp = ifp->if_softc;
864 struct mbuf *m;
865 struct proc *p;
866
867 if (!ALTQ_IS_ENABLED(&ifp->if_snd) && !TBR_IS_ENABLED(&ifp->if_snd))
868 return;
869
870 IFQ_POLL(&ifp->if_snd, m);
871 if (m != NULL) {
872 if (tp->tun_flags & TUN_RWAIT) {
873 tp->tun_flags &= ~TUN_RWAIT;
874 wakeup((caddr_t)tp);
875 }
876 if (tp->tun_flags & TUN_ASYNC && tp->tun_pgid)
877 fownsignal(tp->tun_pgid, POLL_OUT, POLLOUT|POLLWRNORM,
878 NULL);
879
880 selwakeup(&tp->tun_rsel);
881 }
882 }
883 #endif /* ALTQ */
884 /*
885 * tunpoll - the poll interface, this is only useful on reads
886 * really. The write detect always returns true, write never blocks
887 * anyway, it either accepts the packet or drops it.
888 */
889 int
890 tunpoll(dev, events, p)
891 dev_t dev;
892 int events;
893 struct proc *p;
894 {
895 struct tun_softc *tp;
896 struct ifnet *ifp;
897 int s, revents = 0;
898
899 tp = tun_find_unit(dev);
900
901 /* interface was "destroyed" already */
902 if (tp == NULL)
903 return (0);
904
905 ifp = &tp->tun_if;
906
907 s = splnet();
908 TUNDEBUG("%s: tunpoll\n", ifp->if_xname);
909
910 if (events & (POLLIN | POLLRDNORM)) {
911 if (IFQ_IS_EMPTY(&ifp->if_snd) == 0) {
912 TUNDEBUG("%s: tunpoll q=%d\n", ifp->if_xname,
913 ifp->if_snd.ifq_len);
914 revents |= events & (POLLIN | POLLRDNORM);
915 } else {
916 TUNDEBUG("%s: tunpoll waiting\n", ifp->if_xname);
917 selrecord(p, &tp->tun_rsel);
918 }
919 }
920
921 if (events & (POLLOUT | POLLWRNORM))
922 revents |= events & (POLLOUT | POLLWRNORM);
923
924 splx(s);
925 simple_unlock(&tp->tun_lock);
926 return (revents);
927 }
928
929 static void
930 filt_tunrdetach(struct knote *kn)
931 {
932 struct tun_softc *tp = kn->kn_hook;
933 int s;
934
935 s = splnet();
936 SLIST_REMOVE(&tp->tun_rsel.sel_klist, kn, knote, kn_selnext);
937 splx(s);
938 }
939
940 static int
941 filt_tunread(struct knote *kn, long hint)
942 {
943 struct tun_softc *tp = kn->kn_hook;
944 struct ifnet *ifp = &tp->tun_if;
945 struct mbuf *m;
946 int s;
947
948 s = splnet();
949 IF_POLL(&ifp->if_snd, m);
950 if (m == NULL) {
951 splx(s);
952 return (0);
953 }
954
955 for (kn->kn_data = 0; m != NULL; m = m->m_next)
956 kn->kn_data += m->m_len;
957
958 splx(s);
959 return (1);
960 }
961
962 static const struct filterops tunread_filtops =
963 { 1, NULL, filt_tunrdetach, filt_tunread };
964
965 static const struct filterops tun_seltrue_filtops =
966 { 1, NULL, filt_tunrdetach, filt_seltrue };
967
968 int
969 tunkqfilter(dev_t dev, struct knote *kn)
970 {
971 struct tun_softc *tp = tun_find_unit(dev);
972 struct klist *klist;
973 int s;
974
975 switch (kn->kn_filter) {
976 case EVFILT_READ:
977 klist = &tp->tun_rsel.sel_klist;
978 kn->kn_fop = &tunread_filtops;
979 break;
980
981 case EVFILT_WRITE:
982 klist = &tp->tun_rsel.sel_klist;
983 kn->kn_fop = &tun_seltrue_filtops;
984 break;
985
986 default:
987 return (1);
988 }
989
990 kn->kn_hook = tp;
991
992 s = splnet();
993 SLIST_INSERT_HEAD(klist, kn, kn_selnext);
994 splx(s);
995
996 return (0);
997 }
998