if_tun.c revision 1.78 1 /* $NetBSD: if_tun.c,v 1.78 2005/12/11 23:05:25 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 1988, Julian Onions <jpo (at) cs.nott.ac.uk>
5 * Nottingham University 1987.
6 *
7 * This source may be freely distributed, however I would be interested
8 * in any changes that are made.
9 *
10 * This driver takes packets off the IP i/f and hands them up to a
11 * user process to have its wicked way with. This driver has its
12 * roots in a similar driver written by Phil Cockcroft (formerly) at
13 * UCL. This driver is based much more on read/write/poll mode of
14 * operation though.
15 */
16
17 #include <sys/cdefs.h>
18 __KERNEL_RCSID(0, "$NetBSD: if_tun.c,v 1.78 2005/12/11 23:05:25 thorpej Exp $");
19
20 #include "opt_inet.h"
21 #include "opt_ns.h"
22
23 #include <sys/param.h>
24 #include <sys/proc.h>
25 #include <sys/systm.h>
26 #include <sys/mbuf.h>
27 #include <sys/buf.h>
28 #include <sys/protosw.h>
29 #include <sys/socket.h>
30 #include <sys/ioctl.h>
31 #include <sys/errno.h>
32 #include <sys/syslog.h>
33 #include <sys/select.h>
34 #include <sys/poll.h>
35 #include <sys/file.h>
36 #include <sys/signalvar.h>
37 #include <sys/conf.h>
38
39 #include <machine/cpu.h>
40
41 #include <net/if.h>
42 #include <net/if_types.h>
43 #include <net/netisr.h>
44 #include <net/route.h>
45
46
47 #ifdef INET
48 #include <netinet/in.h>
49 #include <netinet/in_systm.h>
50 #include <netinet/in_var.h>
51 #include <netinet/ip.h>
52 #include <netinet/if_inarp.h>
53 #endif
54
55 #ifdef NS
56 #include <netns/ns.h>
57 #include <netns/ns_if.h>
58 #endif
59
60 #include "bpfilter.h"
61 #if NBPFILTER > 0
62 #include <sys/time.h>
63 #include <net/bpf.h>
64 #endif
65
66 #include <net/if_tun.h>
67
68 #define TUNDEBUG if (tundebug) printf
69 int tundebug = 0;
70
71 extern int ifqmaxlen;
72 void tunattach(int);
73
74 static LIST_HEAD(, tun_softc) tun_softc_list;
75 static LIST_HEAD(, tun_softc) tunz_softc_list;
76 static struct simplelock tun_softc_lock;
77
78 static int tun_ioctl(struct ifnet *, u_long, caddr_t);
79 static int tun_output(struct ifnet *, struct mbuf *, struct sockaddr *,
80 struct rtentry *rt);
81 static int tun_clone_create(struct if_clone *, int);
82 static int tun_clone_destroy(struct ifnet *);
83
84 static struct if_clone tun_cloner =
85 IF_CLONE_INITIALIZER("tun", tun_clone_create, tun_clone_destroy);
86
87 static void tunattach0(struct tun_softc *);
88 static void tuninit(struct tun_softc *);
89 #ifdef ALTQ
90 static void tunstart(struct ifnet *);
91 #endif
92 static struct tun_softc *tun_find_unit(dev_t);
93 static struct tun_softc *tun_find_zunit(int);
94
95 static dev_type_open(tunopen);
96 static dev_type_close(tunclose);
97 static dev_type_read(tunread);
98 static dev_type_write(tunwrite);
99 static dev_type_ioctl(tunioctl);
100 static dev_type_poll(tunpoll);
101 static dev_type_kqfilter(tunkqfilter);
102
103 const struct cdevsw tun_cdevsw = {
104 tunopen, tunclose, tunread, tunwrite, tunioctl,
105 nostop, notty, tunpoll, nommap, tunkqfilter,
106 };
107
108 void
109 tunattach(int unused)
110 {
111
112 simple_lock_init(&tun_softc_lock);
113 LIST_INIT(&tun_softc_list);
114 LIST_INIT(&tunz_softc_list);
115 if_clone_attach(&tun_cloner);
116 }
117
118 /*
119 * Find driver instance from dev_t.
120 * Call at splnet().
121 * Returns with tp locked (if found).
122 */
123 static struct tun_softc *
124 tun_find_unit(dev_t dev)
125 {
126 struct tun_softc *tp;
127 int unit = minor(dev);
128
129 simple_lock(&tun_softc_lock);
130 LIST_FOREACH(tp, &tun_softc_list, tun_list)
131 if (unit == tp->tun_unit)
132 break;
133 if (tp)
134 simple_lock(&tp->tun_lock);
135 simple_unlock(&tun_softc_lock);
136
137 return (tp);
138 }
139
140 /*
141 * Find zombie driver instance by unit number.
142 * Call at splnet().
143 * Remove tp from list and return it unlocked (if found).
144 */
145 static struct tun_softc *
146 tun_find_zunit(int unit)
147 {
148 struct tun_softc *tp;
149
150 simple_lock(&tun_softc_lock);
151 LIST_FOREACH(tp, &tunz_softc_list, tun_list)
152 if (unit == tp->tun_unit)
153 break;
154 if (tp)
155 LIST_REMOVE(tp, tun_list);
156 simple_unlock(&tun_softc_lock);
157 #ifdef DIAGNOSTIC
158 if (tp != NULL && (tp->tun_flags & (TUN_INITED|TUN_OPEN)) != TUN_OPEN)
159 printf("tun%d: inconsistent flags: %x\n", unit, tp->tun_flags);
160 #endif
161
162 return (tp);
163 }
164
165 static int
166 tun_clone_create(struct if_clone *ifc, int unit)
167 {
168 struct tun_softc *tp;
169
170 if ((tp = tun_find_zunit(unit)) == NULL) {
171 /* Allocate a new instance */
172 tp = malloc(sizeof(struct tun_softc), M_DEVBUF, M_WAITOK);
173 (void)memset(tp, 0, sizeof(struct tun_softc));
174
175 tp->tun_unit = unit;
176 simple_lock_init(&tp->tun_lock);
177 } else {
178 /* Revive tunnel instance; clear ifp part */
179 (void)memset(&tp->tun_if, 0, sizeof(struct ifnet));
180 }
181
182 (void)snprintf(tp->tun_if.if_xname, sizeof(tp->tun_if.if_xname),
183 "%s%d", ifc->ifc_name, unit);
184 tunattach0(tp);
185 tp->tun_flags |= TUN_INITED;
186
187 simple_lock(&tun_softc_lock);
188 LIST_INSERT_HEAD(&tun_softc_list, tp, tun_list);
189 simple_unlock(&tun_softc_lock);
190
191 return (0);
192 }
193
194 static void
195 tunattach0(struct tun_softc *tp)
196 {
197 struct ifnet *ifp;
198
199 ifp = &tp->tun_if;
200 ifp->if_softc = tp;
201 ifp->if_mtu = TUNMTU;
202 ifp->if_ioctl = tun_ioctl;
203 ifp->if_output = tun_output;
204 #ifdef ALTQ
205 ifp->if_start = tunstart;
206 #endif
207 ifp->if_flags = IFF_POINTOPOINT;
208 ifp->if_type = IFT_TUNNEL;
209 ifp->if_snd.ifq_maxlen = ifqmaxlen;
210 ifp->if_collisions = 0;
211 ifp->if_ierrors = 0;
212 ifp->if_oerrors = 0;
213 ifp->if_ipackets = 0;
214 ifp->if_opackets = 0;
215 ifp->if_ibytes = 0;
216 ifp->if_obytes = 0;
217 ifp->if_dlt = DLT_NULL;
218 IFQ_SET_READY(&ifp->if_snd);
219 if_attach(ifp);
220 if_alloc_sadl(ifp);
221 #if NBPFILTER > 0
222 bpfattach(ifp, DLT_NULL, sizeof(u_int32_t));
223 #endif
224 }
225
226 static int
227 tun_clone_destroy(struct ifnet *ifp)
228 {
229 struct tun_softc *tp = (void *)ifp;
230 int s, zombie = 0;
231
232 s = splnet();
233 simple_lock(&tun_softc_lock);
234 simple_lock(&tp->tun_lock);
235 LIST_REMOVE(tp, tun_list);
236 if (tp->tun_flags & TUN_OPEN) {
237 /* Hang on to storage until last close */
238 zombie = 1;
239 tp->tun_flags &= ~TUN_INITED;
240 LIST_INSERT_HEAD(&tunz_softc_list, tp, tun_list);
241 }
242 simple_unlock(&tun_softc_lock);
243
244 IF_PURGE(&ifp->if_snd);
245 ifp->if_flags &= ~IFF_RUNNING;
246
247 if (tp->tun_flags & TUN_RWAIT) {
248 tp->tun_flags &= ~TUN_RWAIT;
249 wakeup((caddr_t)tp);
250 }
251 if (tp->tun_flags & TUN_ASYNC && tp->tun_pgid)
252 fownsignal(tp->tun_pgid, SIGIO, POLL_HUP, 0, NULL);
253
254 selwakeup(&tp->tun_rsel);
255
256 simple_unlock(&tp->tun_lock);
257 splx(s);
258
259 #if NBPFILTER > 0
260 bpfdetach(ifp);
261 #endif
262 if_detach(ifp);
263
264 if (!zombie)
265 free(tp, M_DEVBUF);
266
267 return (0);
268 }
269
270 /*
271 * tunnel open - must be superuser & the device must be
272 * configured in
273 */
274 static int
275 tunopen(dev_t dev, int flag, int mode, struct lwp *l)
276 {
277 struct proc *p = l->l_proc;
278 struct ifnet *ifp;
279 struct tun_softc *tp;
280 int s, error;
281
282 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
283 return (error);
284
285 s = splnet();
286 tp = tun_find_unit(dev);
287
288 if (tp == NULL) {
289 (void)tun_clone_create(&tun_cloner, minor(dev));
290 tp = tun_find_unit(dev);
291 if (tp == NULL) {
292 error = ENXIO;
293 goto out_nolock;
294 }
295 }
296
297 if (tp->tun_flags & TUN_OPEN) {
298 error = EBUSY;
299 goto out;
300 }
301
302 ifp = &tp->tun_if;
303 tp->tun_flags |= TUN_OPEN;
304 TUNDEBUG("%s: open\n", ifp->if_xname);
305 out:
306 simple_unlock(&tp->tun_lock);
307 out_nolock:
308 splx(s);
309 return (error);
310 }
311
312 /*
313 * tunclose - close the device - mark i/f down & delete
314 * routing info
315 */
316 int
317 tunclose(dev_t dev, int flag, int mode, struct lwp *l)
318 {
319 int s;
320 struct tun_softc *tp;
321 struct ifnet *ifp;
322
323 s = splnet();
324 if ((tp = tun_find_zunit(minor(dev))) != NULL) {
325 /* interface was "destroyed" before the close */
326 free(tp, M_DEVBUF);
327 goto out_nolock;
328 }
329
330 if ((tp = tun_find_unit(dev)) == NULL)
331 goto out_nolock;
332
333 ifp = &tp->tun_if;
334
335 tp->tun_flags &= ~TUN_OPEN;
336
337 /*
338 * junk all pending output
339 */
340 IFQ_PURGE(&ifp->if_snd);
341
342 if (ifp->if_flags & IFF_UP) {
343 if_down(ifp);
344 if (ifp->if_flags & IFF_RUNNING) {
345 /* find internet addresses and delete routes */
346 struct ifaddr *ifa;
347 IFADDR_FOREACH(ifa, ifp) {
348 #ifdef INET
349 if (ifa->ifa_addr->sa_family == AF_INET) {
350 rtinit(ifa, (int)RTM_DELETE,
351 tp->tun_flags & TUN_DSTADDR
352 ? RTF_HOST
353 : 0);
354 }
355 #endif
356 }
357 }
358 }
359 tp->tun_pgid = 0;
360 selnotify(&tp->tun_rsel, 0);
361
362 TUNDEBUG ("%s: closed\n", ifp->if_xname);
363 simple_unlock(&tp->tun_lock);
364 out_nolock:
365 splx(s);
366 return (0);
367 }
368
369 /*
370 * Call at splnet() with tp locked.
371 */
372 static void
373 tuninit(struct tun_softc *tp)
374 {
375 struct ifnet *ifp = &tp->tun_if;
376 struct ifaddr *ifa;
377
378 TUNDEBUG("%s: tuninit\n", ifp->if_xname);
379
380 ifp->if_flags |= IFF_UP | IFF_RUNNING;
381
382 tp->tun_flags &= ~(TUN_IASET|TUN_DSTADDR);
383 IFADDR_FOREACH(ifa, ifp) {
384 #ifdef INET
385 if (ifa->ifa_addr->sa_family == AF_INET) {
386 struct sockaddr_in *sin;
387
388 sin = satosin(ifa->ifa_addr);
389 if (sin && sin->sin_addr.s_addr)
390 tp->tun_flags |= TUN_IASET;
391
392 if (ifp->if_flags & IFF_POINTOPOINT) {
393 sin = satosin(ifa->ifa_dstaddr);
394 if (sin && sin->sin_addr.s_addr)
395 tp->tun_flags |= TUN_DSTADDR;
396 }
397 }
398 #endif
399 }
400
401 return;
402 }
403
404 /*
405 * Process an ioctl request.
406 */
407 static int
408 tun_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
409 {
410 int error = 0, s;
411 struct tun_softc *tp = (struct tun_softc *)(ifp->if_softc);
412
413 s = splnet();
414 simple_lock(&tp->tun_lock);
415
416 switch (cmd) {
417 case SIOCSIFADDR:
418 tuninit(tp);
419 TUNDEBUG("%s: address set\n", ifp->if_xname);
420 break;
421 case SIOCSIFDSTADDR:
422 tuninit(tp);
423 TUNDEBUG("%s: destination address set\n", ifp->if_xname);
424 break;
425 case SIOCSIFBRDADDR:
426 TUNDEBUG("%s: broadcast address set\n", ifp->if_xname);
427 break;
428 case SIOCSIFMTU: {
429 struct ifreq *ifr = (struct ifreq *) data;
430 if (ifr->ifr_mtu > TUNMTU || ifr->ifr_mtu < 576) {
431 error = EINVAL;
432 break;
433 }
434 TUNDEBUG("%s: interface mtu set\n", ifp->if_xname);
435 ifp->if_mtu = ifr->ifr_mtu;
436 break;
437 }
438 case SIOCADDMULTI:
439 case SIOCDELMULTI: {
440 struct ifreq *ifr = (struct ifreq *) data;
441 if (ifr == 0) {
442 error = EAFNOSUPPORT; /* XXX */
443 break;
444 }
445 switch (ifr->ifr_addr.sa_family) {
446 #ifdef INET
447 case AF_INET:
448 break;
449 #endif
450 default:
451 error = EAFNOSUPPORT;
452 break;
453 }
454 break;
455 }
456 case SIOCSIFFLAGS:
457 break;
458 default:
459 error = EINVAL;
460 }
461
462 simple_unlock(&tp->tun_lock);
463 splx(s);
464 return (error);
465 }
466
467 /*
468 * tun_output - queue packets from higher level ready to put out.
469 */
470 static int
471 tun_output(struct ifnet *ifp, struct mbuf *m0, struct sockaddr *dst,
472 struct rtentry *rt)
473 {
474 struct tun_softc *tp = ifp->if_softc;
475 int s;
476 int error;
477 #ifdef INET
478 int mlen;
479 #endif
480 ALTQ_DECL(struct altq_pktattr pktattr;)
481
482 s = splnet();
483 simple_lock(&tp->tun_lock);
484 TUNDEBUG ("%s: tun_output\n", ifp->if_xname);
485
486 if ((tp->tun_flags & TUN_READY) != TUN_READY) {
487 TUNDEBUG ("%s: not ready 0%o\n", ifp->if_xname,
488 tp->tun_flags);
489 m_freem (m0);
490 error = EHOSTDOWN;
491 goto out;
492 }
493
494 /*
495 * if the queueing discipline needs packet classification,
496 * do it before prepending link headers.
497 */
498 IFQ_CLASSIFY(&ifp->if_snd, m0, dst->sa_family, &pktattr);
499
500 #if NBPFILTER > 0
501 if (ifp->if_bpf)
502 bpf_mtap_af(ifp->if_bpf, dst->sa_family, m0);
503 #endif
504
505 switch(dst->sa_family) {
506 #ifdef INET
507 case AF_INET:
508 if (tp->tun_flags & TUN_PREPADDR) {
509 /* Simple link-layer header */
510 M_PREPEND(m0, dst->sa_len, M_DONTWAIT);
511 if (m0 == NULL) {
512 IF_DROP(&ifp->if_snd);
513 error = ENOBUFS;
514 goto out;
515 }
516 bcopy(dst, mtod(m0, char *), dst->sa_len);
517 }
518 /* FALLTHROUGH */
519 case AF_UNSPEC:
520 IFQ_ENQUEUE(&ifp->if_snd, m0, &pktattr, error);
521 if (error) {
522 ifp->if_collisions++;
523 error = EAFNOSUPPORT;
524 goto out;
525 }
526 mlen = m0->m_pkthdr.len;
527 ifp->if_opackets++;
528 ifp->if_obytes += mlen;
529 break;
530 #endif
531 default:
532 m_freem(m0);
533 error = EAFNOSUPPORT;
534 goto out;
535 }
536
537 if (tp->tun_flags & TUN_RWAIT) {
538 tp->tun_flags &= ~TUN_RWAIT;
539 wakeup((caddr_t)tp);
540 }
541 if (tp->tun_flags & TUN_ASYNC && tp->tun_pgid)
542 fownsignal(tp->tun_pgid, SIGIO, POLL_IN, POLLIN|POLLRDNORM,
543 NULL);
544
545 selnotify(&tp->tun_rsel, 0);
546 out:
547 simple_unlock(&tp->tun_lock);
548 splx(s);
549 return (0);
550 }
551
552 /*
553 * the cdevsw interface is now pretty minimal.
554 */
555 int
556 tunioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct lwp *l)
557 {
558 struct tun_softc *tp;
559 int s, error = 0;
560
561 s = splnet();
562 tp = tun_find_unit(dev);
563
564 /* interface was "destroyed" already */
565 if (tp == NULL) {
566 error = ENXIO;
567 goto out_nolock;
568 }
569
570 switch (cmd) {
571 case TUNSDEBUG:
572 tundebug = *(int *)data;
573 break;
574
575 case TUNGDEBUG:
576 *(int *)data = tundebug;
577 break;
578
579 case TUNSIFMODE:
580 switch (*(int *)data & (IFF_POINTOPOINT|IFF_BROADCAST)) {
581 case IFF_POINTOPOINT:
582 case IFF_BROADCAST:
583 if (tp->tun_if.if_flags & IFF_UP) {
584 error = EBUSY;
585 goto out;
586 }
587 tp->tun_if.if_flags &=
588 ~(IFF_BROADCAST|IFF_POINTOPOINT|IFF_MULTICAST);
589 tp->tun_if.if_flags |= *(int *)data;
590 break;
591 default:
592 error = EINVAL;
593 goto out;
594 }
595 break;
596
597 case TUNSLMODE:
598 if (*(int *)data)
599 tp->tun_flags |= TUN_PREPADDR;
600 else
601 tp->tun_flags &= ~TUN_PREPADDR;
602 break;
603
604 case FIONBIO:
605 if (*(int *)data)
606 tp->tun_flags |= TUN_NBIO;
607 else
608 tp->tun_flags &= ~TUN_NBIO;
609 break;
610
611 case FIOASYNC:
612 if (*(int *)data)
613 tp->tun_flags |= TUN_ASYNC;
614 else
615 tp->tun_flags &= ~TUN_ASYNC;
616 break;
617
618 case FIONREAD:
619 if (tp->tun_if.if_snd.ifq_head)
620 *(int *)data = tp->tun_if.if_snd.ifq_head->m_pkthdr.len;
621 else
622 *(int *)data = 0;
623 break;
624
625 case TIOCSPGRP:
626 case FIOSETOWN:
627 error = fsetown(l->l_proc, &tp->tun_pgid, cmd, data);
628 break;
629
630 case TIOCGPGRP:
631 case FIOGETOWN:
632 error = fgetown(l->l_proc, tp->tun_pgid, cmd, data);
633 break;
634
635 default:
636 error = ENOTTY;
637 }
638
639 out:
640 simple_unlock(&tp->tun_lock);
641 out_nolock:
642 splx(s);
643 return (error);
644 }
645
646 /*
647 * The cdevsw read interface - reads a packet at a time, or at
648 * least as much of a packet as can be read.
649 */
650 int
651 tunread(dev_t dev, struct uio *uio, int ioflag)
652 {
653 struct tun_softc *tp;
654 struct ifnet *ifp;
655 struct mbuf *m, *m0;
656 int error = 0, len, s, index;
657
658 s = splnet();
659 tp = tun_find_unit(dev);
660
661 /* interface was "destroyed" already */
662 if (tp == NULL) {
663 error = ENXIO;
664 goto out_nolock;
665 }
666
667 index = tp->tun_if.if_index;
668 ifp = &tp->tun_if;
669
670 TUNDEBUG ("%s: read\n", ifp->if_xname);
671 if ((tp->tun_flags & TUN_READY) != TUN_READY) {
672 TUNDEBUG ("%s: not ready 0%o\n", ifp->if_xname, tp->tun_flags);
673 error = EHOSTDOWN;
674 goto out;
675 }
676
677 tp->tun_flags &= ~TUN_RWAIT;
678
679 do {
680 IFQ_DEQUEUE(&ifp->if_snd, m0);
681 if (m0 == 0) {
682 if (tp->tun_flags & TUN_NBIO) {
683 error = EWOULDBLOCK;
684 goto out;
685 }
686 tp->tun_flags |= TUN_RWAIT;
687 if (ltsleep((caddr_t)tp, PZERO|PCATCH|PNORELOCK,
688 "tunread", 0, &tp->tun_lock) != 0) {
689 error = EINTR;
690 goto out_nolock;
691 } else {
692 /*
693 * Maybe the interface was destroyed while
694 * we were sleeping, so let's ensure that
695 * we're looking at the same (valid) tun
696 * interface before looping.
697 */
698 tp = tun_find_unit(dev);
699 if (tp == NULL) {
700 error = ENXIO;
701 goto out_nolock;
702 }
703 if (tp->tun_if.if_index != index) {
704 error = ENXIO;
705 goto out;
706 }
707 }
708 }
709 } while (m0 == 0);
710
711 simple_unlock(&tp->tun_lock);
712 splx(s);
713
714 /* Copy the mbuf chain */
715 while (m0 && uio->uio_resid > 0 && error == 0) {
716 len = min(uio->uio_resid, m0->m_len);
717 if (len != 0)
718 error = uiomove(mtod(m0, caddr_t), len, uio);
719 MFREE(m0, m);
720 m0 = m;
721 }
722
723 if (m0) {
724 TUNDEBUG("Dropping mbuf\n");
725 m_freem(m0);
726 }
727 if (error)
728 ifp->if_ierrors++;
729
730 return (error);
731
732 out:
733 simple_unlock(&tp->tun_lock);
734 out_nolock:
735 splx(s);
736 return (error);
737 }
738
739 /*
740 * the cdevsw write interface - an atomic write is a packet - or else!
741 */
742 int
743 tunwrite(dev_t dev, struct uio *uio, int ioflag)
744 {
745 struct tun_softc *tp;
746 struct ifnet *ifp;
747 struct mbuf *top, **mp, *m;
748 struct ifqueue *ifq;
749 struct sockaddr dst;
750 int isr, error = 0, s, tlen, mlen;
751
752 s = splnet();
753 tp = tun_find_unit(dev);
754
755 /* interface was "destroyed" already */
756 if (tp == NULL) {
757 error = ENXIO;
758 goto out_nolock;
759 }
760
761 /* Unlock until we've got the data */
762 simple_unlock(&tp->tun_lock);
763 splx(s);
764
765 ifp = &tp->tun_if;
766
767 TUNDEBUG("%s: tunwrite\n", ifp->if_xname);
768
769 if (tp->tun_flags & TUN_PREPADDR) {
770 if (uio->uio_resid < sizeof(dst)) {
771 error = EIO;
772 goto out0;
773 }
774 error = uiomove((caddr_t)&dst, sizeof(dst), uio);
775 if (dst.sa_len > sizeof(dst)) {
776 /* Duh.. */
777 char discard;
778 int n = dst.sa_len - sizeof(dst);
779 while (n--)
780 if ((error = uiomove(&discard, 1, uio)) != 0) {
781 goto out0;
782 }
783 }
784 } else {
785 #ifdef INET
786 dst.sa_family = AF_INET;
787 #endif
788 }
789
790 if (uio->uio_resid > TUNMTU) {
791 TUNDEBUG("%s: len=%lu!\n", ifp->if_xname,
792 (unsigned long)uio->uio_resid);
793 error = EIO;
794 goto out0;
795 }
796
797 switch (dst.sa_family) {
798 #ifdef INET
799 case AF_INET:
800 ifq = &ipintrq;
801 isr = NETISR_IP;
802 break;
803 #endif
804 default:
805 error = EAFNOSUPPORT;
806 goto out0;
807 }
808
809 tlen = uio->uio_resid;
810
811 /* get a header mbuf */
812 MGETHDR(m, M_DONTWAIT, MT_DATA);
813 if (m == NULL) {
814 error = ENOBUFS;
815 goto out0;
816 }
817 mlen = MHLEN;
818
819 top = NULL;
820 mp = ⊤
821 while (error == 0 && uio->uio_resid > 0) {
822 m->m_len = min(mlen, uio->uio_resid);
823 error = uiomove(mtod(m, caddr_t), m->m_len, uio);
824 *mp = m;
825 mp = &m->m_next;
826 if (error == 0 && uio->uio_resid > 0) {
827 MGET(m, M_DONTWAIT, MT_DATA);
828 if (m == NULL) {
829 error = ENOBUFS;
830 break;
831 }
832 mlen = MLEN;
833 }
834 }
835 if (error) {
836 if (top != NULL)
837 m_freem (top);
838 ifp->if_ierrors++;
839 goto out0;
840 }
841
842 top->m_pkthdr.len = tlen;
843 top->m_pkthdr.rcvif = ifp;
844
845 #if NBPFILTER > 0
846 if (ifp->if_bpf)
847 bpf_mtap_af(ifp->if_bpf, AF_INET, top);
848 #endif
849
850 s = splnet();
851 simple_lock(&tp->tun_lock);
852 if ((tp->tun_flags & TUN_INITED) == 0) {
853 /* Interface was destroyed */
854 error = ENXIO;
855 goto out;
856 }
857 if (IF_QFULL(ifq)) {
858 IF_DROP(ifq);
859 ifp->if_collisions++;
860 m_freem(top);
861 error = ENOBUFS;
862 goto out;
863 }
864
865 IF_ENQUEUE(ifq, top);
866 ifp->if_ipackets++;
867 ifp->if_ibytes += tlen;
868 schednetisr(isr);
869 out:
870 simple_unlock(&tp->tun_lock);
871 out_nolock:
872 splx(s);
873 out0:
874 return (error);
875 }
876
877 #ifdef ALTQ
878 /*
879 * Start packet transmission on the interface.
880 * when the interface queue is rate-limited by ALTQ or TBR,
881 * if_start is needed to drain packets from the queue in order
882 * to notify readers when outgoing packets become ready.
883 *
884 * Should be called at splnet.
885 */
886 static void
887 tunstart(struct ifnet *ifp)
888 {
889 struct tun_softc *tp = ifp->if_softc;
890
891 if (!ALTQ_IS_ENABLED(&ifp->if_snd) && !TBR_IS_ENABLED(&ifp->if_snd))
892 return;
893
894 simple_lock(&tp->tun_lock);
895 if (!IF_IS_EMPTY(&ifp->if_snd)) {
896 if (tp->tun_flags & TUN_RWAIT) {
897 tp->tun_flags &= ~TUN_RWAIT;
898 wakeup((caddr_t)tp);
899 }
900 if (tp->tun_flags & TUN_ASYNC && tp->tun_pgid)
901 fownsignal(tp->tun_pgid, SIGIO, POLL_OUT,
902 POLLOUT|POLLWRNORM, NULL);
903
904 selwakeup(&tp->tun_rsel);
905 }
906 simple_unlock(&tp->tun_lock);
907 }
908 #endif /* ALTQ */
909 /*
910 * tunpoll - the poll interface, this is only useful on reads
911 * really. The write detect always returns true, write never blocks
912 * anyway, it either accepts the packet or drops it.
913 */
914 int
915 tunpoll(dev_t dev, int events, struct lwp *l)
916 {
917 struct tun_softc *tp;
918 struct ifnet *ifp;
919 int s, revents = 0;
920
921 s = splnet();
922 tp = tun_find_unit(dev);
923
924 /* interface was "destroyed" already */
925 if (tp == NULL)
926 goto out_nolock;
927
928 ifp = &tp->tun_if;
929
930 TUNDEBUG("%s: tunpoll\n", ifp->if_xname);
931
932 if (events & (POLLIN | POLLRDNORM)) {
933 if (!IFQ_IS_EMPTY(&ifp->if_snd)) {
934 TUNDEBUG("%s: tunpoll q=%d\n", ifp->if_xname,
935 ifp->if_snd.ifq_len);
936 revents |= events & (POLLIN | POLLRDNORM);
937 } else {
938 TUNDEBUG("%s: tunpoll waiting\n", ifp->if_xname);
939 selrecord(l, &tp->tun_rsel);
940 }
941 }
942
943 if (events & (POLLOUT | POLLWRNORM))
944 revents |= events & (POLLOUT | POLLWRNORM);
945
946 simple_unlock(&tp->tun_lock);
947 out_nolock:
948 splx(s);
949 return (revents);
950 }
951
952 static void
953 filt_tunrdetach(struct knote *kn)
954 {
955 struct tun_softc *tp = kn->kn_hook;
956 int s;
957
958 s = splnet();
959 SLIST_REMOVE(&tp->tun_rsel.sel_klist, kn, knote, kn_selnext);
960 splx(s);
961 }
962
963 static int
964 filt_tunread(struct knote *kn, long hint)
965 {
966 struct tun_softc *tp = kn->kn_hook;
967 struct ifnet *ifp = &tp->tun_if;
968 struct mbuf *m;
969 int s;
970
971 s = splnet();
972 IF_POLL(&ifp->if_snd, m);
973 if (m == NULL) {
974 splx(s);
975 return (0);
976 }
977
978 for (kn->kn_data = 0; m != NULL; m = m->m_next)
979 kn->kn_data += m->m_len;
980
981 splx(s);
982 return (1);
983 }
984
985 static const struct filterops tunread_filtops =
986 { 1, NULL, filt_tunrdetach, filt_tunread };
987
988 static const struct filterops tun_seltrue_filtops =
989 { 1, NULL, filt_tunrdetach, filt_seltrue };
990
991 int
992 tunkqfilter(dev_t dev, struct knote *kn)
993 {
994 struct tun_softc *tp;
995 struct klist *klist;
996 int rv = 0, s;
997
998 s = splnet();
999 tp = tun_find_unit(dev);
1000 if (tp == NULL)
1001 goto out_nolock;
1002
1003 switch (kn->kn_filter) {
1004 case EVFILT_READ:
1005 klist = &tp->tun_rsel.sel_klist;
1006 kn->kn_fop = &tunread_filtops;
1007 break;
1008
1009 case EVFILT_WRITE:
1010 klist = &tp->tun_rsel.sel_klist;
1011 kn->kn_fop = &tun_seltrue_filtops;
1012 break;
1013
1014 default:
1015 rv = 1;
1016 goto out;
1017 }
1018
1019 kn->kn_hook = tp;
1020
1021 SLIST_INSERT_HEAD(klist, kn, kn_selnext);
1022
1023 out:
1024 simple_unlock(&tp->tun_lock);
1025 out_nolock:
1026 splx(s);
1027 return (rv);
1028 }
1029