if_tun.c revision 1.68 1 /* $NetBSD: if_tun.c,v 1.68 2004/03/01 13:54:02 tron Exp $ */
2
3 /*
4 * Copyright (c) 1988, Julian Onions <jpo (at) cs.nott.ac.uk>
5 * Nottingham University 1987.
6 *
7 * This source may be freely distributed, however I would be interested
8 * in any changes that are made.
9 *
10 * This driver takes packets off the IP i/f and hands them up to a
11 * user process to have its wicked way with. This driver has its
12 * roots in a similar driver written by Phil Cockcroft (formerly) at
13 * UCL. This driver is based much more on read/write/poll mode of
14 * operation though.
15 */
16
17 #include <sys/cdefs.h>
18 __KERNEL_RCSID(0, "$NetBSD: if_tun.c,v 1.68 2004/03/01 13:54:02 tron Exp $");
19
20 #include "tun.h"
21
22 #include "opt_inet.h"
23 #include "opt_ns.h"
24
25 #include <sys/param.h>
26 #include <sys/proc.h>
27 #include <sys/systm.h>
28 #include <sys/mbuf.h>
29 #include <sys/buf.h>
30 #include <sys/protosw.h>
31 #include <sys/socket.h>
32 #include <sys/ioctl.h>
33 #include <sys/errno.h>
34 #include <sys/syslog.h>
35 #include <sys/select.h>
36 #include <sys/poll.h>
37 #include <sys/file.h>
38 #include <sys/signalvar.h>
39 #include <sys/conf.h>
40
41 #include <machine/cpu.h>
42
43 #include <net/if.h>
44 #include <net/if_ether.h>
45 #include <net/netisr.h>
46 #include <net/route.h>
47
48
49 #ifdef INET
50 #include <netinet/in.h>
51 #include <netinet/in_systm.h>
52 #include <netinet/in_var.h>
53 #include <netinet/ip.h>
54 #include <netinet/if_inarp.h>
55 #endif
56
57 #ifdef NS
58 #include <netns/ns.h>
59 #include <netns/ns_if.h>
60 #endif
61
62 #include "bpfilter.h"
63 #if NBPFILTER > 0
64 #include <sys/time.h>
65 #include <net/bpf.h>
66 #endif
67
68 #include <net/if_tun.h>
69
70 #define TUNDEBUG if (tundebug) printf
71 int tundebug = 0;
72
73 extern int ifqmaxlen;
74 void tunattach __P((int));
75 LIST_HEAD(, tun_softc) tun_softc_list;
76 static struct simplelock tun_softc_lock;
77
78 int tun_ioctl __P((struct ifnet *, u_long, caddr_t));
79 int tun_output __P((struct ifnet *, struct mbuf *, struct sockaddr *,
80 struct rtentry *rt));
81 int tun_clone_create __P((struct if_clone *, int));
82 void tun_clone_destroy __P((struct ifnet *));
83
84 struct if_clone tun_cloner =
85 IF_CLONE_INITIALIZER("tun", tun_clone_create, tun_clone_destroy);
86
87 static void tunattach0 __P((struct tun_softc *));
88 static void tuninit __P((struct tun_softc *));
89 #ifdef ALTQ
90 static void tunstart __P((struct ifnet *));
91 #endif
92 static struct tun_softc *tun_find_unit __P((dev_t));
93
94 dev_type_open(tunopen);
95 dev_type_close(tunclose);
96 dev_type_read(tunread);
97 dev_type_write(tunwrite);
98 dev_type_ioctl(tunioctl);
99 dev_type_poll(tunpoll);
100 dev_type_kqfilter(tunkqfilter);
101
102 const struct cdevsw tun_cdevsw = {
103 tunopen, tunclose, tunread, tunwrite, tunioctl,
104 nostop, notty, tunpoll, nommap, tunkqfilter,
105 };
106
107 void
108 tunattach(unused)
109 int unused;
110 {
111
112 simple_lock_init(&tun_softc_lock);
113 LIST_INIT(&tun_softc_list);
114 if_clone_attach(&tun_cloner);
115 }
116
117 int
118 tun_clone_create(ifc, unit)
119 struct if_clone *ifc;
120 int unit;
121 {
122 struct tun_softc *sc;
123
124 sc = malloc(sizeof(struct tun_softc), M_DEVBUF, M_WAITOK);
125 (void)memset(sc, 0, sizeof(struct tun_softc));
126
127 (void)snprintf(sc->tun_if.if_xname, sizeof(sc->tun_if.if_xname),
128 "%s%d", ifc->ifc_name, unit);
129 sc->tun_unit = unit;
130 simple_lock_init(&sc->tun_lock);
131
132 tunattach0(sc);
133
134 simple_lock(&tun_softc_lock);
135 LIST_INSERT_HEAD(&tun_softc_list, sc, tun_list);
136 simple_unlock(&tun_softc_lock);
137
138 return (0);
139 }
140
141 void
142 tunattach0(sc)
143 struct tun_softc *sc;
144 {
145 struct ifnet *ifp = (void *)sc;
146
147 sc->tun_flags = TUN_INITED;
148
149 ifp = &sc->tun_if;
150 ifp->if_softc = sc;
151 ifp->if_mtu = TUNMTU;
152 ifp->if_ioctl = tun_ioctl;
153 ifp->if_output = tun_output;
154 #ifdef ALTQ
155 ifp->if_start = tunstart;
156 #endif
157 ifp->if_flags = IFF_POINTOPOINT;
158 ifp->if_snd.ifq_maxlen = ifqmaxlen;
159 ifp->if_collisions = 0;
160 ifp->if_ierrors = 0;
161 ifp->if_oerrors = 0;
162 ifp->if_ipackets = 0;
163 ifp->if_opackets = 0;
164 ifp->if_ibytes = 0;
165 ifp->if_obytes = 0;
166 ifp->if_dlt = DLT_NULL;
167 IFQ_SET_READY(&ifp->if_snd);
168 if_attach(ifp);
169 if_alloc_sadl(ifp);
170 #if NBPFILTER > 0
171 bpfattach(ifp, DLT_NULL, sizeof(u_int32_t));
172 #endif
173 }
174
175 void
176 tun_clone_destroy(ifp)
177 struct ifnet *ifp;
178 {
179 struct tun_softc *tp = (void *)ifp;
180
181 simple_lock(&tun_softc_lock);
182 simple_lock(&tp->tun_lock);
183 LIST_REMOVE(tp, tun_list);
184 simple_unlock(&tp->tun_lock);
185 simple_unlock(&tun_softc_lock);
186
187 if (tp->tun_flags & TUN_RWAIT) {
188 tp->tun_flags &= ~TUN_RWAIT;
189 wakeup((caddr_t)tp);
190 }
191 if (tp->tun_flags & TUN_ASYNC && tp->tun_pgid)
192 fownsignal(tp->tun_pgid, SIGIO, POLL_HUP, 0, NULL);
193
194 selwakeup(&tp->tun_rsel);
195
196 #if NBPFILTER > 0
197 bpfdetach(ifp);
198 #endif
199 if_detach(ifp);
200
201 free(tp, M_DEVBUF);
202 }
203
204 static struct tun_softc *
205 tun_find_unit(dev)
206 dev_t dev;
207 {
208 struct tun_softc *tp;
209 int unit = minor(dev);
210
211 simple_lock(&tun_softc_lock);
212 LIST_FOREACH(tp, &tun_softc_list, tun_list)
213 if (unit == tp->tun_unit)
214 break;
215 if (tp)
216 simple_lock(&tp->tun_lock);
217 simple_unlock(&tun_softc_lock);
218
219 return (tp);
220 }
221
222 /*
223 * tunnel open - must be superuser & the device must be
224 * configured in
225 */
226 int
227 tunopen(dev, flag, mode, p)
228 dev_t dev;
229 int flag, mode;
230 struct proc *p;
231 {
232 struct ifnet *ifp;
233 struct tun_softc *tp;
234 int error;
235
236 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
237 return (error);
238
239 if (NTUN < 1)
240 return (ENXIO);
241
242 tp = tun_find_unit(dev);
243
244 if (!tp) {
245 (void)tun_clone_create(&tun_cloner, minor(dev));
246 tp = tun_find_unit(dev);
247 }
248
249 if (!tp)
250 return (ENXIO);
251
252 if (tp->tun_flags & TUN_OPEN) {
253 simple_unlock(&tp->tun_lock);
254 return (EBUSY);
255 }
256
257 ifp = &tp->tun_if;
258 tp->tun_flags |= TUN_OPEN;
259 TUNDEBUG("%s: open\n", ifp->if_xname);
260 simple_unlock(&tp->tun_lock);
261 return (0);
262 }
263
264 /*
265 * tunclose - close the device - mark i/f down & delete
266 * routing info
267 */
268 int
269 tunclose(dev, flag, mode, p)
270 dev_t dev;
271 int flag;
272 int mode;
273 struct proc *p;
274 {
275 int s;
276 struct tun_softc *tp;
277 struct ifnet *ifp;
278
279 tp = tun_find_unit(dev);
280
281 /* interface was "destroyed" before the close */
282 if (tp == NULL)
283 return (0);
284
285 ifp = &tp->tun_if;
286
287 tp->tun_flags &= ~TUN_OPEN;
288
289 /*
290 * junk all pending output
291 */
292 s = splnet();
293 IFQ_PURGE(&ifp->if_snd);
294 splx(s);
295
296 if (ifp->if_flags & IFF_UP) {
297 s = splnet();
298 if_down(ifp);
299 if (ifp->if_flags & IFF_RUNNING) {
300 /* find internet addresses and delete routes */
301 struct ifaddr *ifa;
302 TAILQ_FOREACH(ifa, &ifp->if_addrlist, ifa_list) {
303 #ifdef INET
304 if (ifa->ifa_addr->sa_family == AF_INET) {
305 rtinit(ifa, (int)RTM_DELETE,
306 tp->tun_flags & TUN_DSTADDR
307 ? RTF_HOST
308 : 0);
309 }
310 #endif
311 }
312 }
313 splx(s);
314 }
315 tp->tun_pgid = 0;
316 selnotify(&tp->tun_rsel, 0);
317
318 TUNDEBUG ("%s: closed\n", ifp->if_xname);
319 simple_unlock(&tp->tun_lock);
320 return (0);
321 }
322
323 static void
324 tuninit(tp)
325 struct tun_softc *tp;
326 {
327 struct ifnet *ifp = &tp->tun_if;
328 struct ifaddr *ifa;
329
330 TUNDEBUG("%s: tuninit\n", ifp->if_xname);
331
332 ifp->if_flags |= IFF_UP | IFF_RUNNING;
333
334 tp->tun_flags &= ~(TUN_IASET|TUN_DSTADDR);
335 TAILQ_FOREACH(ifa, &ifp->if_addrlist, ifa_list) {
336 #ifdef INET
337 if (ifa->ifa_addr->sa_family == AF_INET) {
338 struct sockaddr_in *sin;
339
340 sin = satosin(ifa->ifa_addr);
341 if (sin && sin->sin_addr.s_addr)
342 tp->tun_flags |= TUN_IASET;
343
344 if (ifp->if_flags & IFF_POINTOPOINT) {
345 sin = satosin(ifa->ifa_dstaddr);
346 if (sin && sin->sin_addr.s_addr)
347 tp->tun_flags |= TUN_DSTADDR;
348 }
349 }
350 #endif
351 }
352
353 return;
354 }
355
356 /*
357 * Process an ioctl request.
358 */
359 int
360 tun_ioctl(ifp, cmd, data)
361 struct ifnet *ifp;
362 u_long cmd;
363 caddr_t data;
364 {
365 int error = 0, s;
366 struct tun_softc *tp = (struct tun_softc *)(ifp->if_softc);
367
368 simple_lock(&tp->tun_lock);
369
370 s = splnet();
371 switch (cmd) {
372 case SIOCSIFADDR:
373 tuninit((struct tun_softc *)(ifp->if_softc));
374 TUNDEBUG("%s: address set\n", ifp->if_xname);
375 break;
376 case SIOCSIFDSTADDR:
377 tuninit((struct tun_softc *)(ifp->if_softc));
378 TUNDEBUG("%s: destination address set\n", ifp->if_xname);
379 break;
380 case SIOCSIFBRDADDR:
381 TUNDEBUG("%s: broadcast address set\n", ifp->if_xname);
382 break;
383 case SIOCSIFMTU: {
384 struct ifreq *ifr = (struct ifreq *) data;
385 if (ifr->ifr_mtu > TUNMTU || ifr->ifr_mtu < 576) {
386 error = EINVAL;
387 break;
388 }
389 TUNDEBUG("%s: interface mtu set\n", ifp->if_xname);
390 ifp->if_mtu = ifr->ifr_mtu;
391 break;
392 }
393 case SIOCADDMULTI:
394 case SIOCDELMULTI: {
395 struct ifreq *ifr = (struct ifreq *) data;
396 if (ifr == 0) {
397 error = EAFNOSUPPORT; /* XXX */
398 break;
399 }
400 switch (ifr->ifr_addr.sa_family) {
401
402 #ifdef INET
403 case AF_INET:
404 break;
405 #endif
406
407 default:
408 error = EAFNOSUPPORT;
409 break;
410 }
411 break;
412 }
413 case SIOCSIFFLAGS:
414 break;
415 default:
416 error = EINVAL;
417 }
418 splx(s);
419 simple_unlock(&tp->tun_lock);
420 return (error);
421 }
422
423 /*
424 * tun_output - queue packets from higher level ready to put out.
425 */
426 int
427 tun_output(ifp, m0, dst, rt)
428 struct ifnet *ifp;
429 struct mbuf *m0;
430 struct sockaddr *dst;
431 struct rtentry *rt;
432 {
433 struct tun_softc *tp = ifp->if_softc;
434 #ifdef INET
435 int s;
436 int error;
437 #endif
438 int mlen;
439 ALTQ_DECL(struct altq_pktattr pktattr;)
440
441 simple_lock(&tp->tun_lock);
442 TUNDEBUG ("%s: tun_output\n", ifp->if_xname);
443
444 if ((tp->tun_flags & TUN_READY) != TUN_READY) {
445 TUNDEBUG ("%s: not ready 0%o\n", ifp->if_xname,
446 tp->tun_flags);
447 m_freem (m0);
448 simple_unlock(&tp->tun_lock);
449 return (EHOSTDOWN);
450 }
451
452 /*
453 * if the queueing discipline needs packet classification,
454 * do it before prepending link headers.
455 */
456 IFQ_CLASSIFY(&ifp->if_snd, m0, dst->sa_family, &pktattr);
457
458 #if NBPFILTER > 0
459 if (ifp->if_bpf) {
460 /*
461 * We need to prepend the address family as
462 * a four byte field. Cons up a dummy header
463 * to pacify bpf. This is safe because bpf
464 * will only read from the mbuf (i.e., it won't
465 * try to free it or keep a pointer to it).
466 */
467 struct mbuf m;
468 u_int32_t af = dst->sa_family;
469
470 m.m_flags = 0;
471 m.m_next = m0;
472 m.m_len = sizeof(af);
473 m.m_data = (char *)⁡
474
475 bpf_mtap(ifp->if_bpf, &m);
476 }
477 #endif
478
479 switch(dst->sa_family) {
480 #ifdef INET
481 case AF_INET:
482 if (tp->tun_flags & TUN_PREPADDR) {
483 /* Simple link-layer header */
484 M_PREPEND(m0, dst->sa_len, M_DONTWAIT);
485 if (m0 == NULL) {
486 IF_DROP(&ifp->if_snd);
487 simple_unlock(&tp->tun_lock);
488 return (ENOBUFS);
489 }
490 bcopy(dst, mtod(m0, char *), dst->sa_len);
491 }
492 /* FALLTHROUGH */
493 case AF_UNSPEC:
494 s = splnet();
495 IFQ_ENQUEUE(&ifp->if_snd, m0, &pktattr, error);
496 if (error) {
497 splx(s);
498 ifp->if_collisions++;
499 return (error);
500 }
501 mlen = m0->m_pkthdr.len;
502 splx(s);
503 ifp->if_opackets++;
504 ifp->if_obytes += mlen;
505 break;
506 #endif
507 default:
508 m_freem(m0);
509 simple_unlock(&tp->tun_lock);
510 return (EAFNOSUPPORT);
511 }
512
513 if (tp->tun_flags & TUN_RWAIT) {
514 tp->tun_flags &= ~TUN_RWAIT;
515 wakeup((caddr_t)tp);
516 }
517 if (tp->tun_flags & TUN_ASYNC && tp->tun_pgid)
518 fownsignal(tp->tun_pgid, SIGIO, POLL_IN, POLLIN|POLLRDNORM,
519 NULL);
520
521 selnotify(&tp->tun_rsel, 0);
522 simple_unlock(&tp->tun_lock);
523 return (0);
524 }
525
526 /*
527 * the cdevsw interface is now pretty minimal.
528 */
529 int
530 tunioctl(dev, cmd, data, flag, p)
531 dev_t dev;
532 u_long cmd;
533 caddr_t data;
534 int flag;
535 struct proc *p;
536 {
537 int s;
538 struct tun_softc *tp;
539 int error=0;
540
541 tp = tun_find_unit(dev);
542
543 /* interface was "destroyed" already */
544 if (tp == NULL)
545 return (ENXIO);
546
547 switch (cmd) {
548 case TUNSDEBUG:
549 tundebug = *(int *)data;
550 break;
551
552 case TUNGDEBUG:
553 *(int *)data = tundebug;
554 break;
555
556 case TUNSIFMODE:
557 switch (*(int *)data & (IFF_POINTOPOINT|IFF_BROADCAST)) {
558 case IFF_POINTOPOINT:
559 case IFF_BROADCAST:
560 s = splnet();
561 if (tp->tun_if.if_flags & IFF_UP) {
562 splx(s);
563 simple_unlock(&tp->tun_lock);
564 return (EBUSY);
565 }
566 tp->tun_if.if_flags &=
567 ~(IFF_BROADCAST|IFF_POINTOPOINT|IFF_MULTICAST);
568 tp->tun_if.if_flags |= *(int *)data;
569 splx(s);
570 break;
571 default:
572 simple_unlock(&tp->tun_lock);
573 return (EINVAL);
574 }
575 break;
576
577 case TUNSLMODE:
578 if (*(int *)data)
579 tp->tun_flags |= TUN_PREPADDR;
580 else
581 tp->tun_flags &= ~TUN_PREPADDR;
582 break;
583
584 case FIONBIO:
585 if (*(int *)data)
586 tp->tun_flags |= TUN_NBIO;
587 else
588 tp->tun_flags &= ~TUN_NBIO;
589 break;
590
591 case FIOASYNC:
592 if (*(int *)data)
593 tp->tun_flags |= TUN_ASYNC;
594 else
595 tp->tun_flags &= ~TUN_ASYNC;
596 break;
597
598 case FIONREAD:
599 s = splnet();
600 if (tp->tun_if.if_snd.ifq_head)
601 *(int *)data = tp->tun_if.if_snd.ifq_head->m_pkthdr.len;
602 else
603 *(int *)data = 0;
604 splx(s);
605 break;
606
607 case TIOCSPGRP:
608 case FIOSETOWN:
609 error = fsetown(p, &tp->tun_pgid, cmd, data);
610 break;
611
612 case TIOCGPGRP:
613 case FIOGETOWN:
614 error = fgetown(p, tp->tun_pgid, cmd, data);
615 break;
616
617 default:
618 simple_unlock(&tp->tun_lock);
619 return (ENOTTY);
620 }
621 simple_unlock(&tp->tun_lock);
622 return (error);
623 }
624
625 /*
626 * The cdevsw read interface - reads a packet at a time, or at
627 * least as much of a packet as can be read.
628 */
629 int
630 tunread(dev, uio, ioflag)
631 dev_t dev;
632 struct uio *uio;
633 int ioflag;
634 {
635 struct tun_softc *tp;
636 struct ifnet *ifp;
637 struct mbuf *m, *m0;
638 int error=0, len, s, index;
639
640 tp = tun_find_unit(dev);
641
642 /* interface was "destroyed" already */
643 if (tp == NULL)
644 return (ENXIO);
645
646 index = tp->tun_if.if_index;
647 ifp = &tp->tun_if;
648
649 TUNDEBUG ("%s: read\n", ifp->if_xname);
650 if ((tp->tun_flags & TUN_READY) != TUN_READY) {
651 TUNDEBUG ("%s: not ready 0%o\n", ifp->if_xname, tp->tun_flags);
652 simple_unlock(&tp->tun_lock);
653 return EHOSTDOWN;
654 }
655
656 tp->tun_flags &= ~TUN_RWAIT;
657
658 s = splnet();
659 do {
660 IFQ_DEQUEUE(&ifp->if_snd, m0);
661 if (m0 == 0) {
662 if (tp->tun_flags & TUN_NBIO) {
663 splx(s);
664 simple_unlock(&tp->tun_lock);
665 return (EWOULDBLOCK);
666 }
667 tp->tun_flags |= TUN_RWAIT;
668 simple_unlock(&tp->tun_lock);
669 if (tsleep((caddr_t)tp, PZERO|PCATCH, "tunread", 0)) {
670 splx(s);
671 return (EINTR);
672 } else {
673 /*
674 * Maybe the interface was destroyed while
675 * we were sleeping, so let's ensure that
676 * we're looking at the same (valid) tun
677 * interface before looping.
678 */
679 tp = tun_find_unit(dev);
680 if (tp == NULL ||
681 tp->tun_if.if_index != index) {
682 splx(s);
683 if (tp)
684 simple_unlock(&tp->tun_lock);
685 return (ENXIO);
686 }
687 }
688 }
689 } while (m0 == 0);
690 splx(s);
691
692 while (m0 && uio->uio_resid > 0 && error == 0) {
693 len = min(uio->uio_resid, m0->m_len);
694 if (len != 0)
695 error = uiomove(mtod(m0, caddr_t), len, uio);
696 MFREE(m0, m);
697 m0 = m;
698 }
699
700 if (m0) {
701 TUNDEBUG("Dropping mbuf\n");
702 m_freem(m0);
703 }
704 if (error)
705 ifp->if_ierrors++;
706 simple_unlock(&tp->tun_lock);
707 return (error);
708 }
709
710 /*
711 * the cdevsw write interface - an atomic write is a packet - or else!
712 */
713 int
714 tunwrite(dev, uio, ioflag)
715 dev_t dev;
716 struct uio *uio;
717 int ioflag;
718 {
719 struct tun_softc *tp;
720 struct ifnet *ifp;
721 struct mbuf *top, **mp, *m;
722 struct ifqueue *ifq;
723 struct sockaddr dst;
724 int isr, error=0, s, tlen, mlen;
725
726 tp = tun_find_unit(dev);
727
728 /* interface was "destroyed" already */
729 if (tp == NULL)
730 return (ENXIO);
731
732 ifp = &tp->tun_if;
733
734 TUNDEBUG("%s: tunwrite\n", ifp->if_xname);
735
736 if (tp->tun_flags & TUN_PREPADDR) {
737 if (uio->uio_resid < sizeof(dst)) {
738 simple_unlock(&tp->tun_lock);
739 return (EIO);
740 }
741 error = uiomove((caddr_t)&dst, sizeof(dst), uio);
742 if (dst.sa_len > sizeof(dst)) {
743 /* Duh.. */
744 char discard;
745 int n = dst.sa_len - sizeof(dst);
746 while (n--)
747 if ((error = uiomove(&discard, 1, uio)) != 0) {
748 simple_unlock(&tp->tun_lock);
749 return (error);
750 }
751 }
752 } else {
753 #ifdef INET
754 dst.sa_family = AF_INET;
755 #endif
756 }
757
758 if (uio->uio_resid > TUNMTU) {
759 TUNDEBUG("%s: len=%lu!\n", ifp->if_xname,
760 (unsigned long)uio->uio_resid);
761 simple_unlock(&tp->tun_lock);
762 return (EIO);
763 }
764
765 switch (dst.sa_family) {
766 #ifdef INET
767 case AF_INET:
768 ifq = &ipintrq;
769 isr = NETISR_IP;
770 break;
771 #endif
772 default:
773 simple_unlock(&tp->tun_lock);
774 return (EAFNOSUPPORT);
775 }
776
777 tlen = uio->uio_resid;
778
779 /* get a header mbuf */
780 MGETHDR(m, M_DONTWAIT, MT_DATA);
781 if (m == NULL) {
782 simple_unlock(&tp->tun_lock);
783 return (ENOBUFS);
784 }
785 mlen = MHLEN;
786
787 top = NULL;
788 mp = ⊤
789 while (error == 0 && uio->uio_resid > 0) {
790 m->m_len = min(mlen, uio->uio_resid);
791 error = uiomove(mtod(m, caddr_t), m->m_len, uio);
792 *mp = m;
793 mp = &m->m_next;
794 if (error == 0 && uio->uio_resid > 0) {
795 MGET(m, M_DONTWAIT, MT_DATA);
796 if (m == NULL) {
797 error = ENOBUFS;
798 break;
799 }
800 mlen = MLEN;
801 }
802 }
803 if (error) {
804 if (top != NULL)
805 m_freem (top);
806 ifp->if_ierrors++;
807 simple_unlock(&tp->tun_lock);
808 return (error);
809 }
810
811 top->m_pkthdr.len = tlen;
812 top->m_pkthdr.rcvif = ifp;
813
814 #if NBPFILTER > 0
815 if (ifp->if_bpf) {
816 /*
817 * We need to prepend the address family as
818 * a four byte field. Cons up a dummy header
819 * to pacify bpf. This is safe because bpf
820 * will only read from the mbuf (i.e., it won't
821 * try to free it or keep a pointer to it).
822 */
823 struct mbuf m;
824 u_int32_t af = AF_INET;
825
826 m.m_flags = 0;
827 m.m_next = top;
828 m.m_len = sizeof(af);
829 m.m_data = (char *)⁡
830
831 bpf_mtap(ifp->if_bpf, &m);
832 }
833 #endif
834
835 s = splnet();
836 if (IF_QFULL(ifq)) {
837 IF_DROP(ifq);
838 splx(s);
839 ifp->if_collisions++;
840 m_freem(top);
841 simple_unlock(&tp->tun_lock);
842 return (ENOBUFS);
843 }
844 IF_ENQUEUE(ifq, top);
845 splx(s);
846 ifp->if_ipackets++;
847 ifp->if_ibytes += tlen;
848 schednetisr(isr);
849 simple_unlock(&tp->tun_lock);
850 return (error);
851 }
852
853 #ifdef ALTQ
854 /*
855 * Start packet transmission on the interface.
856 * when the interface queue is rate-limited by ALTQ or TBR,
857 * if_start is needed to drain packets from the queue in order
858 * to notify readers when outgoing packets become ready.
859 */
860 static void
861 tunstart(ifp)
862 struct ifnet *ifp;
863 {
864 struct tun_softc *tp = ifp->if_softc;
865 struct mbuf *m;
866
867 if (!ALTQ_IS_ENABLED(&ifp->if_snd) && !TBR_IS_ENABLED(&ifp->if_snd))
868 return;
869
870 IFQ_POLL(&ifp->if_snd, m);
871 if (m != NULL) {
872 if (tp->tun_flags & TUN_RWAIT) {
873 tp->tun_flags &= ~TUN_RWAIT;
874 wakeup((caddr_t)tp);
875 }
876 if (tp->tun_flags & TUN_ASYNC && tp->tun_pgid)
877 fownsignal(tp->tun_pgid, SIGIO, POLL_OUT,
878 POLLOUT|POLLWRNORM, NULL);
879
880 selwakeup(&tp->tun_rsel);
881 }
882 }
883 #endif /* ALTQ */
884 /*
885 * tunpoll - the poll interface, this is only useful on reads
886 * really. The write detect always returns true, write never blocks
887 * anyway, it either accepts the packet or drops it.
888 */
889 int
890 tunpoll(dev, events, p)
891 dev_t dev;
892 int events;
893 struct proc *p;
894 {
895 struct tun_softc *tp;
896 struct ifnet *ifp;
897 int s, revents = 0;
898
899 tp = tun_find_unit(dev);
900
901 /* interface was "destroyed" already */
902 if (tp == NULL)
903 return (0);
904
905 ifp = &tp->tun_if;
906
907 s = splnet();
908 TUNDEBUG("%s: tunpoll\n", ifp->if_xname);
909
910 if (events & (POLLIN | POLLRDNORM)) {
911 if (IFQ_IS_EMPTY(&ifp->if_snd) == 0) {
912 TUNDEBUG("%s: tunpoll q=%d\n", ifp->if_xname,
913 ifp->if_snd.ifq_len);
914 revents |= events & (POLLIN | POLLRDNORM);
915 } else {
916 TUNDEBUG("%s: tunpoll waiting\n", ifp->if_xname);
917 selrecord(p, &tp->tun_rsel);
918 }
919 }
920
921 if (events & (POLLOUT | POLLWRNORM))
922 revents |= events & (POLLOUT | POLLWRNORM);
923
924 splx(s);
925 simple_unlock(&tp->tun_lock);
926 return (revents);
927 }
928
929 static void
930 filt_tunrdetach(struct knote *kn)
931 {
932 struct tun_softc *tp = kn->kn_hook;
933 int s;
934
935 s = splnet();
936 SLIST_REMOVE(&tp->tun_rsel.sel_klist, kn, knote, kn_selnext);
937 splx(s);
938 }
939
940 static int
941 filt_tunread(struct knote *kn, long hint)
942 {
943 struct tun_softc *tp = kn->kn_hook;
944 struct ifnet *ifp = &tp->tun_if;
945 struct mbuf *m;
946 int s;
947
948 s = splnet();
949 IF_POLL(&ifp->if_snd, m);
950 if (m == NULL) {
951 splx(s);
952 return (0);
953 }
954
955 for (kn->kn_data = 0; m != NULL; m = m->m_next)
956 kn->kn_data += m->m_len;
957
958 splx(s);
959 return (1);
960 }
961
962 static const struct filterops tunread_filtops =
963 { 1, NULL, filt_tunrdetach, filt_tunread };
964
965 static const struct filterops tun_seltrue_filtops =
966 { 1, NULL, filt_tunrdetach, filt_seltrue };
967
968 int
969 tunkqfilter(dev_t dev, struct knote *kn)
970 {
971 struct tun_softc *tp = tun_find_unit(dev);
972 struct klist *klist;
973 int s;
974
975 switch (kn->kn_filter) {
976 case EVFILT_READ:
977 klist = &tp->tun_rsel.sel_klist;
978 kn->kn_fop = &tunread_filtops;
979 break;
980
981 case EVFILT_WRITE:
982 klist = &tp->tun_rsel.sel_klist;
983 kn->kn_fop = &tun_seltrue_filtops;
984 break;
985
986 default:
987 return (1);
988 }
989
990 kn->kn_hook = tp;
991
992 s = splnet();
993 SLIST_INSERT_HEAD(klist, kn, kn_selnext);
994 splx(s);
995
996 return (0);
997 }
998