if_tun.c revision 1.57 1 /* $NetBSD: if_tun.c,v 1.57 2002/11/26 18:51:19 christos Exp $ */
2
3 /*
4 * Copyright (c) 1988, Julian Onions <jpo (at) cs.nott.ac.uk>
5 * Nottingham University 1987.
6 *
7 * This source may be freely distributed, however I would be interested
8 * in any changes that are made.
9 *
10 * This driver takes packets off the IP i/f and hands them up to a
11 * user process to have its wicked way with. This driver has its
12 * roots in a similar driver written by Phil Cockcroft (formerly) at
13 * UCL. This driver is based much more on read/write/poll mode of
14 * operation though.
15 */
16
17 #include <sys/cdefs.h>
18 __KERNEL_RCSID(0, "$NetBSD: if_tun.c,v 1.57 2002/11/26 18:51:19 christos Exp $");
19
20 #include "tun.h"
21
22 #include "opt_inet.h"
23 #include "opt_ns.h"
24
25 #include <sys/param.h>
26 #include <sys/proc.h>
27 #include <sys/systm.h>
28 #include <sys/mbuf.h>
29 #include <sys/buf.h>
30 #include <sys/protosw.h>
31 #include <sys/socket.h>
32 #include <sys/ioctl.h>
33 #include <sys/errno.h>
34 #include <sys/syslog.h>
35 #include <sys/select.h>
36 #include <sys/poll.h>
37 #include <sys/file.h>
38 #include <sys/signalvar.h>
39 #include <sys/conf.h>
40
41 #include <machine/cpu.h>
42
43 #include <net/if.h>
44 #include <net/if_ether.h>
45 #include <net/netisr.h>
46 #include <net/route.h>
47
48
49 #ifdef INET
50 #include <netinet/in.h>
51 #include <netinet/in_systm.h>
52 #include <netinet/in_var.h>
53 #include <netinet/ip.h>
54 #include <netinet/if_inarp.h>
55 #endif
56
57 #ifdef NS
58 #include <netns/ns.h>
59 #include <netns/ns_if.h>
60 #endif
61
62 #include "bpfilter.h"
63 #if NBPFILTER > 0
64 #include <sys/time.h>
65 #include <net/bpf.h>
66 #endif
67
68 #include <net/if_tun.h>
69
70 #define TUNDEBUG if (tundebug) printf
71 int tundebug = 0;
72
73 extern int ifqmaxlen;
74 void tunattach __P((int));
75 LIST_HEAD(, tun_softc) tun_softc_list;
76 static struct simplelock tun_softc_lock;
77
78 int tun_ioctl __P((struct ifnet *, u_long, caddr_t));
79 int tun_output __P((struct ifnet *, struct mbuf *, struct sockaddr *,
80 struct rtentry *rt));
81 int tun_clone_create __P((struct if_clone *, int));
82 void tun_clone_destroy __P((struct ifnet *));
83
84 struct if_clone tun_cloner =
85 IF_CLONE_INITIALIZER("tun", tun_clone_create, tun_clone_destroy);
86
87 static void tunattach0 __P((struct tun_softc *));
88 static void tuninit __P((struct tun_softc *));
89 #ifdef ALTQ
90 static void tunstart __P((struct ifnet *));
91 #endif
92 static struct tun_softc *tun_find_unit __P((dev_t));
93
94 dev_type_open(tunopen);
95 dev_type_close(tunclose);
96 dev_type_read(tunread);
97 dev_type_write(tunwrite);
98 dev_type_ioctl(tunioctl);
99 dev_type_poll(tunpoll);
100 dev_type_kqfilter(tunkqfilter);
101
102 const struct cdevsw tun_cdevsw = {
103 tunopen, tunclose, tunread, tunwrite, tunioctl,
104 nostop, notty, tunpoll, nommap, tunkqfilter,
105 };
106
107 void
108 tunattach(unused)
109 int unused;
110 {
111
112 simple_lock_init(&tun_softc_lock);
113 LIST_INIT(&tun_softc_list);
114 if_clone_attach(&tun_cloner);
115 }
116
117 int
118 tun_clone_create(ifc, unit)
119 struct if_clone *ifc;
120 int unit;
121 {
122 struct tun_softc *sc;
123
124 sc = malloc(sizeof(struct tun_softc), M_DEVBUF, M_WAITOK);
125 (void)memset(sc, 0, sizeof(struct tun_softc));
126
127 (void)snprintf(sc->tun_if.if_xname, sizeof(sc->tun_if.if_xname),
128 "%s%d", ifc->ifc_name, unit);
129 sc->tun_unit = unit;
130 simple_lock_init(&sc->tun_lock);
131
132 tunattach0(sc);
133
134 simple_lock(&tun_softc_lock);
135 LIST_INSERT_HEAD(&tun_softc_list, sc, tun_list);
136 simple_unlock(&tun_softc_lock);
137
138 return (0);
139 }
140
141 void
142 tunattach0(sc)
143 struct tun_softc *sc;
144 {
145 struct ifnet *ifp = (void *)sc;
146
147 sc->tun_flags = TUN_INITED;
148
149 ifp = &sc->tun_if;
150 ifp->if_softc = sc;
151 ifp->if_mtu = TUNMTU;
152 ifp->if_ioctl = tun_ioctl;
153 ifp->if_output = tun_output;
154 #ifdef ALTQ
155 ifp->if_start = tunstart;
156 #endif
157 ifp->if_flags = IFF_POINTOPOINT;
158 ifp->if_snd.ifq_maxlen = ifqmaxlen;
159 ifp->if_collisions = 0;
160 ifp->if_ierrors = 0;
161 ifp->if_oerrors = 0;
162 ifp->if_ipackets = 0;
163 ifp->if_opackets = 0;
164 ifp->if_dlt = DLT_NULL;
165 IFQ_SET_READY(&ifp->if_snd);
166 if_attach(ifp);
167 if_alloc_sadl(ifp);
168 #if NBPFILTER > 0
169 bpfattach(ifp, DLT_NULL, sizeof(u_int32_t));
170 #endif
171 }
172
173 void
174 tun_clone_destroy(ifp)
175 struct ifnet *ifp;
176 {
177 struct tun_softc *tp = (void *)ifp;
178 struct proc *p;
179
180 simple_lock(&tun_softc_lock);
181 simple_lock(&tp->tun_lock);
182 LIST_REMOVE(tp, tun_list);
183 simple_unlock(&tp->tun_lock);
184 simple_unlock(&tun_softc_lock);
185
186 if (tp->tun_flags & TUN_RWAIT) {
187 tp->tun_flags &= ~TUN_RWAIT;
188 wakeup((caddr_t)tp);
189 }
190 if (tp->tun_flags & TUN_ASYNC && tp->tun_pgrp) {
191 if (tp->tun_pgrp > 0)
192 gsignal(tp->tun_pgrp, SIGIO);
193 else if ((p = pfind(-tp->tun_pgrp)) != NULL)
194 psignal(p, SIGIO);
195 }
196 selwakeup(&tp->tun_rsel);
197
198 #if NBPFILTER > 0
199 bpfdetach(ifp);
200 #endif
201 if_detach(ifp);
202
203 free(tp, M_DEVBUF);
204 }
205
206 static struct tun_softc *
207 tun_find_unit(dev)
208 dev_t dev;
209 {
210 struct tun_softc *tp;
211 int unit = minor(dev);
212
213 simple_lock(&tun_softc_lock);
214 LIST_FOREACH(tp, &tun_softc_list, tun_list)
215 if (unit == tp->tun_unit)
216 break;
217 if (tp)
218 simple_lock(&tp->tun_lock);
219 simple_unlock(&tun_softc_lock);
220
221 return (tp);
222 }
223
224 /*
225 * tunnel open - must be superuser & the device must be
226 * configured in
227 */
228 int
229 tunopen(dev, flag, mode, p)
230 dev_t dev;
231 int flag, mode;
232 struct proc *p;
233 {
234 struct ifnet *ifp;
235 struct tun_softc *tp;
236 int error;
237
238 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
239 return (error);
240
241 if (NTUN < 1)
242 return (ENXIO);
243
244 tp = tun_find_unit(dev);
245
246 if (!tp) {
247 (void)tun_clone_create(&tun_cloner, minor(dev));
248 tp = tun_find_unit(dev);
249 }
250
251 if (!tp)
252 return (ENXIO);
253
254 if (tp->tun_flags & TUN_OPEN) {
255 simple_unlock(&tp->tun_lock);
256 return (EBUSY);
257 }
258
259 ifp = &tp->tun_if;
260 tp->tun_flags |= TUN_OPEN;
261 TUNDEBUG("%s: open\n", ifp->if_xname);
262 simple_unlock(&tp->tun_lock);
263 return (0);
264 }
265
266 /*
267 * tunclose - close the device - mark i/f down & delete
268 * routing info
269 */
270 int
271 tunclose(dev, flag, mode, p)
272 dev_t dev;
273 int flag;
274 int mode;
275 struct proc *p;
276 {
277 int s;
278 struct tun_softc *tp;
279 struct ifnet *ifp;
280
281 tp = tun_find_unit(dev);
282
283 /* interface was "destroyed" before the close */
284 if (tp == NULL)
285 return (0);
286
287 ifp = &tp->tun_if;
288
289 tp->tun_flags &= ~TUN_OPEN;
290
291 /*
292 * junk all pending output
293 */
294 s = splnet();
295 IFQ_PURGE(&ifp->if_snd);
296 splx(s);
297
298 if (ifp->if_flags & IFF_UP) {
299 s = splnet();
300 if_down(ifp);
301 if (ifp->if_flags & IFF_RUNNING) {
302 /* find internet addresses and delete routes */
303 struct ifaddr *ifa;
304 TAILQ_FOREACH(ifa, &ifp->if_addrlist, ifa_list) {
305 #ifdef INET
306 if (ifa->ifa_addr->sa_family == AF_INET) {
307 rtinit(ifa, (int)RTM_DELETE,
308 tp->tun_flags & TUN_DSTADDR
309 ? RTF_HOST
310 : 0);
311 }
312 #endif
313 }
314 }
315 splx(s);
316 }
317 tp->tun_pgrp = 0;
318 selnotify(&tp->tun_rsel, 0);
319
320 TUNDEBUG ("%s: closed\n", ifp->if_xname);
321 simple_unlock(&tp->tun_lock);
322 return (0);
323 }
324
325 static void
326 tuninit(tp)
327 struct tun_softc *tp;
328 {
329 struct ifnet *ifp = &tp->tun_if;
330 struct ifaddr *ifa;
331
332 TUNDEBUG("%s: tuninit\n", ifp->if_xname);
333
334 ifp->if_flags |= IFF_UP | IFF_RUNNING;
335
336 tp->tun_flags &= ~(TUN_IASET|TUN_DSTADDR);
337 TAILQ_FOREACH(ifa, &ifp->if_addrlist, ifa_list) {
338 #ifdef INET
339 if (ifa->ifa_addr->sa_family == AF_INET) {
340 struct sockaddr_in *sin;
341
342 sin = satosin(ifa->ifa_addr);
343 if (sin && sin->sin_addr.s_addr)
344 tp->tun_flags |= TUN_IASET;
345
346 if (ifp->if_flags & IFF_POINTOPOINT) {
347 sin = satosin(ifa->ifa_dstaddr);
348 if (sin && sin->sin_addr.s_addr)
349 tp->tun_flags |= TUN_DSTADDR;
350 }
351 }
352 #endif
353 }
354
355 return;
356 }
357
358 /*
359 * Process an ioctl request.
360 */
361 int
362 tun_ioctl(ifp, cmd, data)
363 struct ifnet *ifp;
364 u_long cmd;
365 caddr_t data;
366 {
367 int error = 0, s;
368 struct tun_softc *tp = (struct tun_softc *)(ifp->if_softc);
369
370 simple_lock(&tp->tun_lock);
371
372 s = splnet();
373 switch(cmd) {
374 case SIOCSIFADDR:
375 tuninit((struct tun_softc *)(ifp->if_softc));
376 TUNDEBUG("%s: address set\n", ifp->if_xname);
377 break;
378 case SIOCSIFDSTADDR:
379 tuninit((struct tun_softc *)(ifp->if_softc));
380 TUNDEBUG("%s: destination address set\n", ifp->if_xname);
381 break;
382 case SIOCSIFBRDADDR:
383 TUNDEBUG("%s: broadcast address set\n", ifp->if_xname);
384 break;
385 case SIOCSIFMTU: {
386 struct ifreq *ifr = (struct ifreq *) data;
387 if (ifr->ifr_mtu > TUNMTU || ifr->ifr_mtu < 576) {
388 error = EINVAL;
389 break;
390 }
391 TUNDEBUG("%s: interface mtu set\n", ifp->if_xname);
392 ifp->if_mtu = ifr->ifr_mtu;
393 break;
394 }
395 case SIOCADDMULTI:
396 case SIOCDELMULTI: {
397 struct ifreq *ifr = (struct ifreq *) data;
398 if (ifr == 0) {
399 error = EAFNOSUPPORT; /* XXX */
400 break;
401 }
402 switch (ifr->ifr_addr.sa_family) {
403
404 #ifdef INET
405 case AF_INET:
406 break;
407 #endif
408
409 default:
410 error = EAFNOSUPPORT;
411 break;
412 }
413 break;
414 }
415 case SIOCSIFFLAGS:
416 break;
417 default:
418 error = EINVAL;
419 }
420 splx(s);
421 simple_unlock(&tp->tun_lock);
422 return (error);
423 }
424
425 /*
426 * tun_output - queue packets from higher level ready to put out.
427 */
428 int
429 tun_output(ifp, m0, dst, rt)
430 struct ifnet *ifp;
431 struct mbuf *m0;
432 struct sockaddr *dst;
433 struct rtentry *rt;
434 {
435 struct tun_softc *tp = ifp->if_softc;
436 struct proc *p;
437 #ifdef INET
438 int s;
439 int error;
440 #endif
441 ALTQ_DECL(struct altq_pktattr pktattr;)
442
443 simple_lock(&tp->tun_lock);
444 TUNDEBUG ("%s: tun_output\n", ifp->if_xname);
445
446 if ((tp->tun_flags & TUN_READY) != TUN_READY) {
447 TUNDEBUG ("%s: not ready 0%o\n", ifp->if_xname,
448 tp->tun_flags);
449 m_freem (m0);
450 simple_unlock(&tp->tun_lock);
451 return (EHOSTDOWN);
452 }
453
454 /*
455 * if the queueing discipline needs packet classification,
456 * do it before prepending link headers.
457 */
458 IFQ_CLASSIFY(&ifp->if_snd, m0, dst->sa_family, &pktattr);
459
460 #if NBPFILTER > 0
461 if (ifp->if_bpf) {
462 /*
463 * We need to prepend the address family as
464 * a four byte field. Cons up a dummy header
465 * to pacify bpf. This is safe because bpf
466 * will only read from the mbuf (i.e., it won't
467 * try to free it or keep a pointer to it).
468 */
469 struct mbuf m;
470 u_int32_t af = dst->sa_family;
471
472 m.m_next = m0;
473 m.m_len = sizeof(af);
474 m.m_data = (char *)⁡
475
476 bpf_mtap(ifp->if_bpf, &m);
477 }
478 #endif
479
480 switch(dst->sa_family) {
481 #ifdef INET
482 case AF_INET:
483 if (tp->tun_flags & TUN_PREPADDR) {
484 /* Simple link-layer header */
485 M_PREPEND(m0, dst->sa_len, M_DONTWAIT);
486 if (m0 == NULL) {
487 IF_DROP(&ifp->if_snd);
488 simple_unlock(&tp->tun_lock);
489 return (ENOBUFS);
490 }
491 bcopy(dst, mtod(m0, char *), dst->sa_len);
492 }
493 /* FALLTHROUGH */
494 case AF_UNSPEC:
495 s = splnet();
496 IFQ_ENQUEUE(&ifp->if_snd, m0, &pktattr, error);
497 if (error) {
498 splx(s);
499 ifp->if_collisions++;
500 return (error);
501 }
502 splx(s);
503 ifp->if_opackets++;
504 break;
505 #endif
506 default:
507 m_freem(m0);
508 simple_unlock(&tp->tun_lock);
509 return (EAFNOSUPPORT);
510 }
511
512 if (tp->tun_flags & TUN_RWAIT) {
513 tp->tun_flags &= ~TUN_RWAIT;
514 wakeup((caddr_t)tp);
515 }
516 if (tp->tun_flags & TUN_ASYNC && tp->tun_pgrp) {
517 if (tp->tun_pgrp > 0)
518 gsignal(tp->tun_pgrp, SIGIO);
519 else if ((p = pfind(-tp->tun_pgrp)) != NULL)
520 psignal(p, SIGIO);
521 }
522 selnotify(&tp->tun_rsel, 0);
523 simple_unlock(&tp->tun_lock);
524 return (0);
525 }
526
527 /*
528 * the cdevsw interface is now pretty minimal.
529 */
530 int
531 tunioctl(dev, cmd, data, flag, p)
532 dev_t dev;
533 u_long cmd;
534 caddr_t data;
535 int flag;
536 struct proc *p;
537 {
538 int s;
539 struct tun_softc *tp;
540
541 tp = tun_find_unit(dev);
542
543 /* interface was "destroyed" already */
544 if (tp == NULL)
545 return (ENXIO);
546
547 switch (cmd) {
548 case TUNSDEBUG:
549 tundebug = *(int *)data;
550 break;
551
552 case TUNGDEBUG:
553 *(int *)data = tundebug;
554 break;
555
556 case TUNSIFMODE:
557 switch (*(int *)data & (IFF_POINTOPOINT|IFF_BROADCAST)) {
558 case IFF_POINTOPOINT:
559 case IFF_BROADCAST:
560 s = splnet();
561 if (tp->tun_if.if_flags & IFF_UP) {
562 splx(s);
563 simple_unlock(&tp->tun_lock);
564 return (EBUSY);
565 }
566 tp->tun_if.if_flags &=
567 ~(IFF_BROADCAST|IFF_POINTOPOINT|IFF_MULTICAST);
568 tp->tun_if.if_flags |= *(int *)data;
569 splx(s);
570 break;
571 default:
572 simple_unlock(&tp->tun_lock);
573 return (EINVAL);
574 }
575 break;
576
577 case TUNSLMODE:
578 if (*(int *)data)
579 tp->tun_flags |= TUN_PREPADDR;
580 else
581 tp->tun_flags &= ~TUN_PREPADDR;
582 break;
583
584 case FIONBIO:
585 if (*(int *)data)
586 tp->tun_flags |= TUN_NBIO;
587 else
588 tp->tun_flags &= ~TUN_NBIO;
589 break;
590
591 case FIOASYNC:
592 if (*(int *)data)
593 tp->tun_flags |= TUN_ASYNC;
594 else
595 tp->tun_flags &= ~TUN_ASYNC;
596 break;
597
598 case FIONREAD:
599 s = splnet();
600 if (tp->tun_if.if_snd.ifq_head)
601 *(int *)data = tp->tun_if.if_snd.ifq_head->m_pkthdr.len;
602 else
603 *(int *)data = 0;
604 splx(s);
605 break;
606
607 case TIOCSPGRP:
608 tp->tun_pgrp = *(int *)data;
609 break;
610
611 case TIOCGPGRP:
612 *(int *)data = tp->tun_pgrp;
613 break;
614
615 default:
616 simple_unlock(&tp->tun_lock);
617 return (ENOTTY);
618 }
619 simple_unlock(&tp->tun_lock);
620 return (0);
621 }
622
623 /*
624 * The cdevsw read interface - reads a packet at a time, or at
625 * least as much of a packet as can be read.
626 */
627 int
628 tunread(dev, uio, ioflag)
629 dev_t dev;
630 struct uio *uio;
631 int ioflag;
632 {
633 struct tun_softc *tp;
634 struct ifnet *ifp;
635 struct mbuf *m, *m0;
636 int error=0, len, s, index;
637
638 tp = tun_find_unit(dev);
639
640 /* interface was "destroyed" already */
641 if (tp == NULL)
642 return (ENXIO);
643
644 index = tp->tun_if.if_index;
645 ifp = &tp->tun_if;
646
647 TUNDEBUG ("%s: read\n", ifp->if_xname);
648 if ((tp->tun_flags & TUN_READY) != TUN_READY) {
649 TUNDEBUG ("%s: not ready 0%o\n", ifp->if_xname, tp->tun_flags);
650 simple_unlock(&tp->tun_lock);
651 return EHOSTDOWN;
652 }
653
654 tp->tun_flags &= ~TUN_RWAIT;
655
656 s = splnet();
657 do {
658 IFQ_DEQUEUE(&ifp->if_snd, m0);
659 if (m0 == 0) {
660 if (tp->tun_flags & TUN_NBIO) {
661 splx(s);
662 simple_unlock(&tp->tun_lock);
663 return (EWOULDBLOCK);
664 }
665 tp->tun_flags |= TUN_RWAIT;
666 simple_unlock(&tp->tun_lock);
667 if (tsleep((caddr_t)tp, PZERO|PCATCH, "tunread", 0)) {
668 splx(s);
669 return (EINTR);
670 } else {
671 /*
672 * Maybe the interface was destroyed while
673 * we were sleeping, so let's ensure that
674 * we're looking at the same (valid) tun
675 * interface before looping.
676 */
677 tp = tun_find_unit(dev);
678 if (tp == NULL ||
679 tp->tun_if.if_index != index) {
680 splx(s);
681 if (tp)
682 simple_unlock(&tp->tun_lock);
683 return (ENXIO);
684 }
685 }
686 }
687 } while (m0 == 0);
688 splx(s);
689
690 while (m0 && uio->uio_resid > 0 && error == 0) {
691 len = min(uio->uio_resid, m0->m_len);
692 if (len != 0)
693 error = uiomove(mtod(m0, caddr_t), len, uio);
694 MFREE(m0, m);
695 m0 = m;
696 }
697
698 if (m0) {
699 TUNDEBUG("Dropping mbuf\n");
700 m_freem(m0);
701 }
702 if (error)
703 ifp->if_ierrors++;
704 simple_unlock(&tp->tun_lock);
705 return (error);
706 }
707
708 /*
709 * the cdevsw write interface - an atomic write is a packet - or else!
710 */
711 int
712 tunwrite(dev, uio, ioflag)
713 dev_t dev;
714 struct uio *uio;
715 int ioflag;
716 {
717 struct tun_softc *tp;
718 struct ifnet *ifp;
719 struct mbuf *top, **mp, *m;
720 struct ifqueue *ifq;
721 struct sockaddr dst;
722 int isr, error=0, s, tlen, mlen;
723
724 tp = tun_find_unit(dev);
725
726 /* interface was "destroyed" already */
727 if (tp == NULL)
728 return (ENXIO);
729
730 ifp = &tp->tun_if;
731
732 TUNDEBUG("%s: tunwrite\n", ifp->if_xname);
733
734 if (tp->tun_flags & TUN_PREPADDR) {
735 if (uio->uio_resid < sizeof(dst)) {
736 simple_unlock(&tp->tun_lock);
737 return (EIO);
738 }
739 error = uiomove((caddr_t)&dst, sizeof(dst), uio);
740 if (dst.sa_len > sizeof(dst)) {
741 /* Duh.. */
742 char discard;
743 int n = dst.sa_len - sizeof(dst);
744 while (n--)
745 if ((error = uiomove(&discard, 1, uio)) != 0) {
746 simple_unlock(&tp->tun_lock);
747 return (error);
748 }
749 }
750 } else {
751 #ifdef INET
752 dst.sa_family = AF_INET;
753 #endif
754 }
755
756 if (uio->uio_resid > TUNMTU) {
757 TUNDEBUG("%s: len=%lu!\n", ifp->if_xname,
758 (unsigned long)uio->uio_resid);
759 simple_unlock(&tp->tun_lock);
760 return (EIO);
761 }
762
763 switch (dst.sa_family) {
764 #ifdef INET
765 case AF_INET:
766 ifq = &ipintrq;
767 isr = NETISR_IP;
768 break;
769 #endif
770 default:
771 simple_unlock(&tp->tun_lock);
772 return (EAFNOSUPPORT);
773 }
774
775 tlen = uio->uio_resid;
776
777 /* get a header mbuf */
778 MGETHDR(m, M_DONTWAIT, MT_DATA);
779 if (m == NULL) {
780 simple_unlock(&tp->tun_lock);
781 return (ENOBUFS);
782 }
783 mlen = MHLEN;
784
785 top = 0;
786 mp = ⊤
787 while (error == 0 && uio->uio_resid > 0) {
788 m->m_len = min(mlen, uio->uio_resid);
789 error = uiomove(mtod (m, caddr_t), m->m_len, uio);
790 *mp = m;
791 mp = &m->m_next;
792 if (uio->uio_resid > 0) {
793 MGET (m, M_DONTWAIT, MT_DATA);
794 if (m == 0) {
795 error = ENOBUFS;
796 break;
797 }
798 mlen = MLEN;
799 }
800 }
801 if (error) {
802 if (top)
803 m_freem (top);
804 ifp->if_ierrors++;
805 simple_unlock(&tp->tun_lock);
806 return (error);
807 }
808
809 top->m_pkthdr.len = tlen;
810 top->m_pkthdr.rcvif = ifp;
811
812 #if NBPFILTER > 0
813 if (ifp->if_bpf) {
814 /*
815 * We need to prepend the address family as
816 * a four byte field. Cons up a dummy header
817 * to pacify bpf. This is safe because bpf
818 * will only read from the mbuf (i.e., it won't
819 * try to free it or keep a pointer to it).
820 */
821 struct mbuf m;
822 u_int32_t af = AF_INET;
823
824 m.m_next = top;
825 m.m_len = sizeof(af);
826 m.m_data = (char *)⁡
827
828 bpf_mtap(ifp->if_bpf, &m);
829 }
830 #endif
831
832 s = splnet();
833 if (IF_QFULL(ifq)) {
834 IF_DROP(ifq);
835 splx(s);
836 ifp->if_collisions++;
837 m_freem(top);
838 simple_unlock(&tp->tun_lock);
839 return (ENOBUFS);
840 }
841 IF_ENQUEUE(ifq, top);
842 splx(s);
843 ifp->if_ipackets++;
844 schednetisr(isr);
845 simple_unlock(&tp->tun_lock);
846 return (error);
847 }
848
849 #ifdef ALTQ
850 /*
851 * Start packet transmission on the interface.
852 * when the interface queue is rate-limited by ALTQ or TBR,
853 * if_start is needed to drain packets from the queue in order
854 * to notify readers when outgoing packets become ready.
855 */
856 static void
857 tunstart(ifp)
858 struct ifnet *ifp;
859 {
860 struct tun_softc *tp = ifp->if_softc;
861 struct mbuf *m;
862 struct proc *p;
863
864 if (!ALTQ_IS_ENABLED(&ifp->if_snd) && !TBR_IS_ENABLED(&ifp->if_snd))
865 return;
866
867 IFQ_POLL(&ifp->if_snd, m);
868 if (m != NULL) {
869 if (tp->tun_flags & TUN_RWAIT) {
870 tp->tun_flags &= ~TUN_RWAIT;
871 wakeup((caddr_t)tp);
872 }
873 if (tp->tun_flags & TUN_ASYNC && tp->tun_pgrp) {
874 if (tp->tun_pgrp > 0)
875 gsignal(tp->tun_pgrp, SIGIO);
876 else if ((p = pfind(-tp->tun_pgrp)) != NULL)
877 psignal(p, SIGIO);
878 }
879 selwakeup(&tp->tun_rsel);
880 }
881 }
882 #endif /* ALTQ */
883 /*
884 * tunpoll - the poll interface, this is only useful on reads
885 * really. The write detect always returns true, write never blocks
886 * anyway, it either accepts the packet or drops it.
887 */
888 int
889 tunpoll(dev, events, p)
890 dev_t dev;
891 int events;
892 struct proc *p;
893 {
894 struct tun_softc *tp;
895 struct ifnet *ifp;
896 int s, revents = 0;
897
898 tp = tun_find_unit(dev);
899
900 /* interface was "destroyed" already */
901 if (tp == NULL)
902 return (0);
903
904 ifp = &tp->tun_if;
905
906 s = splnet();
907 TUNDEBUG("%s: tunpoll\n", ifp->if_xname);
908
909 if (events & (POLLIN | POLLRDNORM)) {
910 if (IFQ_IS_EMPTY(&ifp->if_snd) == 0) {
911 TUNDEBUG("%s: tunpoll q=%d\n", ifp->if_xname,
912 ifp->if_snd.ifq_len);
913 revents |= events & (POLLIN | POLLRDNORM);
914 } else {
915 TUNDEBUG("%s: tunpoll waiting\n", ifp->if_xname);
916 selrecord(p, &tp->tun_rsel);
917 }
918 }
919
920 if (events & (POLLOUT | POLLWRNORM))
921 revents |= events & (POLLOUT | POLLWRNORM);
922
923 splx(s);
924 simple_unlock(&tp->tun_lock);
925 return (revents);
926 }
927
928 static void
929 filt_tunrdetach(struct knote *kn)
930 {
931 struct tun_softc *tp = kn->kn_hook;
932 int s;
933
934 s = splnet();
935 SLIST_REMOVE(&tp->tun_rsel.sel_klist, kn, knote, kn_selnext);
936 splx(s);
937 }
938
939 static int
940 filt_tunread(struct knote *kn, long hint)
941 {
942 struct tun_softc *tp = kn->kn_hook;
943 struct ifnet *ifp = &tp->tun_if;
944 struct mbuf *m;
945 int s;
946
947 s = splnet();
948 IF_POLL(&ifp->if_snd, m);
949 if (m == NULL) {
950 splx(s);
951 return (0);
952 }
953
954 for (kn->kn_data = 0; m != NULL; m = m->m_next)
955 kn->kn_data += m->m_len;
956
957 splx(s);
958 return (1);
959 }
960
961 static const struct filterops tunread_filtops =
962 { 1, NULL, filt_tunrdetach, filt_tunread };
963
964 static const struct filterops tun_seltrue_filtops =
965 { 1, NULL, filt_tunrdetach, filt_seltrue };
966
967 int
968 tunkqfilter(dev_t dev, struct knote *kn)
969 {
970 struct tun_softc *tp = tun_find_unit(dev);
971 struct klist *klist;
972 int s;
973
974 switch (kn->kn_filter) {
975 case EVFILT_READ:
976 klist = &tp->tun_rsel.sel_klist;
977 kn->kn_fop = &tunread_filtops;
978 break;
979
980 case EVFILT_WRITE:
981 klist = &tp->tun_rsel.sel_klist;
982 kn->kn_fop = &tun_seltrue_filtops;
983 break;
984
985 default:
986 return (1);
987 }
988
989 kn->kn_hook = tp;
990
991 s = splnet();
992 SLIST_INSERT_HEAD(klist, kn, kn_selnext);
993 splx(s);
994
995 return (0);
996 }
997