if_tun.c revision 1.124 1 /* $NetBSD: if_tun.c,v 1.124 2016/04/20 09:01:04 knakahara Exp $ */
2
3 /*
4 * Copyright (c) 1988, Julian Onions <jpo (at) cs.nott.ac.uk>
5 * Nottingham University 1987.
6 *
7 * This source may be freely distributed, however I would be interested
8 * in any changes that are made.
9 *
10 * This driver takes packets off the IP i/f and hands them up to a
11 * user process to have its wicked way with. This driver has its
12 * roots in a similar driver written by Phil Cockcroft (formerly) at
13 * UCL. This driver is based much more on read/write/poll mode of
14 * operation though.
15 */
16
17 #include <sys/cdefs.h>
18 __KERNEL_RCSID(0, "$NetBSD: if_tun.c,v 1.124 2016/04/20 09:01:04 knakahara Exp $");
19
20 #ifdef _KERNEL_OPT
21 #include "opt_inet.h"
22 #endif
23
24 #include <sys/param.h>
25 #include <sys/proc.h>
26 #include <sys/systm.h>
27 #include <sys/mbuf.h>
28 #include <sys/buf.h>
29 #include <sys/protosw.h>
30 #include <sys/socket.h>
31 #include <sys/ioctl.h>
32 #include <sys/errno.h>
33 #include <sys/syslog.h>
34 #include <sys/select.h>
35 #include <sys/poll.h>
36 #include <sys/file.h>
37 #include <sys/signalvar.h>
38 #include <sys/conf.h>
39 #include <sys/kauth.h>
40 #include <sys/mutex.h>
41 #include <sys/cpu.h>
42
43 #include <net/if.h>
44 #include <net/if_types.h>
45 #include <net/netisr.h>
46 #include <net/route.h>
47
48
49 #ifdef INET
50 #include <netinet/in.h>
51 #include <netinet/in_systm.h>
52 #include <netinet/in_var.h>
53 #include <netinet/ip.h>
54 #include <netinet/if_inarp.h>
55 #endif
56
57
58 #include <sys/time.h>
59 #include <net/bpf.h>
60
61 #include <net/if_tun.h>
62
63 #include "ioconf.h"
64
65 #define TUNDEBUG if (tundebug) printf
66 int tundebug = 0;
67
68 extern int ifqmaxlen;
69
70 static LIST_HEAD(, tun_softc) tun_softc_list;
71 static LIST_HEAD(, tun_softc) tunz_softc_list;
72 static kmutex_t tun_softc_lock;
73
74 static int tun_ioctl(struct ifnet *, u_long, void *);
75 static int tun_output(struct ifnet *, struct mbuf *,
76 const struct sockaddr *, struct rtentry *rt);
77 static int tun_clone_create(struct if_clone *, int);
78 static int tun_clone_destroy(struct ifnet *);
79
80 static struct if_clone tun_cloner =
81 IF_CLONE_INITIALIZER("tun", tun_clone_create, tun_clone_destroy);
82
83 static void tunattach0(struct tun_softc *);
84 static void tuninit(struct tun_softc *);
85 static void tun_i_softintr(void *);
86 static void tun_o_softintr(void *);
87 #ifdef ALTQ
88 static void tunstart(struct ifnet *);
89 #endif
90 static struct tun_softc *tun_find_unit(dev_t);
91 static struct tun_softc *tun_find_zunit(int);
92
93 static dev_type_open(tunopen);
94 static dev_type_close(tunclose);
95 static dev_type_read(tunread);
96 static dev_type_write(tunwrite);
97 static dev_type_ioctl(tunioctl);
98 static dev_type_poll(tunpoll);
99 static dev_type_kqfilter(tunkqfilter);
100
101 const struct cdevsw tun_cdevsw = {
102 .d_open = tunopen,
103 .d_close = tunclose,
104 .d_read = tunread,
105 .d_write = tunwrite,
106 .d_ioctl = tunioctl,
107 .d_stop = nostop,
108 .d_tty = notty,
109 .d_poll = tunpoll,
110 .d_mmap = nommap,
111 .d_kqfilter = tunkqfilter,
112 .d_discard = nodiscard,
113 .d_flag = D_OTHER
114 };
115
116 void
117 tunattach(int unused)
118 {
119
120 mutex_init(&tun_softc_lock, MUTEX_DEFAULT, IPL_NET);
121 LIST_INIT(&tun_softc_list);
122 LIST_INIT(&tunz_softc_list);
123 if_clone_attach(&tun_cloner);
124 }
125
126 /*
127 * Find driver instance from dev_t.
128 * Returns with tp locked (if found).
129 */
130 static struct tun_softc *
131 tun_find_unit(dev_t dev)
132 {
133 struct tun_softc *tp;
134 int unit = minor(dev);
135
136 mutex_enter(&tun_softc_lock);
137 LIST_FOREACH(tp, &tun_softc_list, tun_list)
138 if (unit == tp->tun_unit)
139 break;
140 if (tp)
141 mutex_enter(&tp->tun_lock);
142 mutex_exit(&tun_softc_lock);
143
144 return (tp);
145 }
146
147 /*
148 * Find zombie driver instance by unit number.
149 * Remove tp from list and return it unlocked (if found).
150 */
151 static struct tun_softc *
152 tun_find_zunit(int unit)
153 {
154 struct tun_softc *tp;
155
156 mutex_enter(&tun_softc_lock);
157 LIST_FOREACH(tp, &tunz_softc_list, tun_list)
158 if (unit == tp->tun_unit)
159 break;
160 if (tp)
161 LIST_REMOVE(tp, tun_list);
162 mutex_exit(&tun_softc_lock);
163 #ifdef DIAGNOSTIC
164 if (tp != NULL && (tp->tun_flags & (TUN_INITED|TUN_OPEN)) != TUN_OPEN)
165 printf("tun%d: inconsistent flags: %x\n", unit, tp->tun_flags);
166 #endif
167
168 return (tp);
169 }
170
171 static int
172 tun_clone_create(struct if_clone *ifc, int unit)
173 {
174 struct tun_softc *tp;
175
176 if ((tp = tun_find_zunit(unit)) == NULL) {
177 /* Allocate a new instance */
178 tp = malloc(sizeof(*tp), M_DEVBUF, M_WAITOK|M_ZERO);
179
180 tp->tun_unit = unit;
181 mutex_init(&tp->tun_lock, MUTEX_DEFAULT, IPL_NET);
182 selinit(&tp->tun_rsel);
183 selinit(&tp->tun_wsel);
184 } else {
185 /* Revive tunnel instance; clear ifp part */
186 (void)memset(&tp->tun_if, 0, sizeof(struct ifnet));
187 }
188
189 if_initname(&tp->tun_if, ifc->ifc_name, unit);
190 tunattach0(tp);
191 tp->tun_flags |= TUN_INITED;
192 tp->tun_osih = softint_establish(SOFTINT_CLOCK, tun_o_softintr, tp);
193 tp->tun_isih = softint_establish(SOFTINT_CLOCK, tun_i_softintr, tp);
194
195 mutex_enter(&tun_softc_lock);
196 LIST_INSERT_HEAD(&tun_softc_list, tp, tun_list);
197 mutex_exit(&tun_softc_lock);
198
199 return (0);
200 }
201
202 static void
203 tunattach0(struct tun_softc *tp)
204 {
205 struct ifnet *ifp;
206
207 ifp = &tp->tun_if;
208 ifp->if_softc = tp;
209 ifp->if_mtu = TUNMTU;
210 ifp->if_ioctl = tun_ioctl;
211 ifp->if_output = tun_output;
212 #ifdef ALTQ
213 ifp->if_start = tunstart;
214 #endif
215 ifp->if_flags = IFF_POINTOPOINT;
216 ifp->if_type = IFT_TUNNEL;
217 ifp->if_snd.ifq_maxlen = ifqmaxlen;
218 ifp->if_collisions = 0;
219 ifp->if_ierrors = 0;
220 ifp->if_oerrors = 0;
221 ifp->if_ipackets = 0;
222 ifp->if_opackets = 0;
223 ifp->if_ibytes = 0;
224 ifp->if_obytes = 0;
225 ifp->if_dlt = DLT_NULL;
226 IFQ_SET_READY(&ifp->if_snd);
227 if_attach(ifp);
228 if_alloc_sadl(ifp);
229 bpf_attach(ifp, DLT_NULL, sizeof(uint32_t));
230 }
231
232 static int
233 tun_clone_destroy(struct ifnet *ifp)
234 {
235 struct tun_softc *tp = (void *)ifp;
236 int zombie = 0;
237
238 IF_PURGE(&ifp->if_snd);
239 ifp->if_flags &= ~IFF_RUNNING;
240
241 mutex_enter(&tun_softc_lock);
242 mutex_enter(&tp->tun_lock);
243 LIST_REMOVE(tp, tun_list);
244 if (tp->tun_flags & TUN_OPEN) {
245 /* Hang on to storage until last close */
246 zombie = 1;
247 tp->tun_flags &= ~TUN_INITED;
248 LIST_INSERT_HEAD(&tunz_softc_list, tp, tun_list);
249 }
250 mutex_exit(&tun_softc_lock);
251
252 if (tp->tun_flags & TUN_RWAIT) {
253 tp->tun_flags &= ~TUN_RWAIT;
254 wakeup((void *)tp);
255 }
256 selnotify(&tp->tun_rsel, 0, 0);
257
258 mutex_exit(&tp->tun_lock);
259
260 if (tp->tun_flags & TUN_ASYNC && tp->tun_pgid)
261 fownsignal(tp->tun_pgid, SIGIO, POLL_HUP, 0, NULL);
262
263 bpf_detach(ifp);
264 if_detach(ifp);
265
266 if (!zombie) {
267 seldestroy(&tp->tun_rsel);
268 seldestroy(&tp->tun_wsel);
269 softint_disestablish(tp->tun_osih);
270 softint_disestablish(tp->tun_isih);
271 mutex_destroy(&tp->tun_lock);
272 free(tp, M_DEVBUF);
273 }
274
275 return (0);
276 }
277
278 /*
279 * tunnel open - must be superuser & the device must be
280 * configured in
281 */
282 static int
283 tunopen(dev_t dev, int flag, int mode, struct lwp *l)
284 {
285 struct ifnet *ifp;
286 struct tun_softc *tp;
287 int error;
288
289 error = kauth_authorize_network(l->l_cred, KAUTH_NETWORK_INTERFACE_TUN,
290 KAUTH_REQ_NETWORK_INTERFACE_TUN_ADD, NULL, NULL, NULL);
291 if (error)
292 return (error);
293
294 tp = tun_find_unit(dev);
295
296 if (tp == NULL) {
297 (void)tun_clone_create(&tun_cloner, minor(dev));
298 tp = tun_find_unit(dev);
299 if (tp == NULL) {
300 error = ENXIO;
301 goto out_nolock;
302 }
303 }
304
305 if (tp->tun_flags & TUN_OPEN) {
306 error = EBUSY;
307 goto out;
308 }
309
310 ifp = &tp->tun_if;
311 tp->tun_flags |= TUN_OPEN;
312 TUNDEBUG("%s: open\n", ifp->if_xname);
313 out:
314 mutex_exit(&tp->tun_lock);
315 out_nolock:
316 return (error);
317 }
318
319 /*
320 * tunclose - close the device - mark i/f down & delete
321 * routing info
322 */
323 int
324 tunclose(dev_t dev, int flag, int mode,
325 struct lwp *l)
326 {
327 struct tun_softc *tp;
328 struct ifnet *ifp;
329
330 if ((tp = tun_find_zunit(minor(dev))) != NULL) {
331 /* interface was "destroyed" before the close */
332 seldestroy(&tp->tun_rsel);
333 seldestroy(&tp->tun_wsel);
334 softint_disestablish(tp->tun_osih);
335 softint_disestablish(tp->tun_isih);
336 mutex_destroy(&tp->tun_lock);
337 free(tp, M_DEVBUF);
338 goto out_nolock;
339 }
340
341 if ((tp = tun_find_unit(dev)) == NULL)
342 goto out_nolock;
343
344 ifp = &tp->tun_if;
345
346 tp->tun_flags &= ~TUN_OPEN;
347
348 tp->tun_pgid = 0;
349 selnotify(&tp->tun_rsel, 0, 0);
350
351 TUNDEBUG ("%s: closed\n", ifp->if_xname);
352 mutex_exit(&tp->tun_lock);
353
354 /*
355 * junk all pending output
356 */
357 IFQ_PURGE(&ifp->if_snd);
358
359 if (ifp->if_flags & IFF_UP) {
360 if_down(ifp);
361 if (ifp->if_flags & IFF_RUNNING) {
362 /* find internet addresses and delete routes */
363 struct ifaddr *ifa;
364 IFADDR_FOREACH(ifa, ifp) {
365 #if defined(INET) || defined(INET6)
366 if (ifa->ifa_addr->sa_family == AF_INET ||
367 ifa->ifa_addr->sa_family == AF_INET6) {
368 rtinit(ifa, (int)RTM_DELETE,
369 tp->tun_flags & TUN_DSTADDR
370 ? RTF_HOST
371 : 0);
372 }
373 #endif
374 }
375 }
376 }
377 out_nolock:
378 return (0);
379 }
380
381 /*
382 * Call at splnet().
383 */
384 static void
385 tuninit(struct tun_softc *tp)
386 {
387 struct ifnet *ifp = &tp->tun_if;
388 struct ifaddr *ifa;
389
390 TUNDEBUG("%s: tuninit\n", ifp->if_xname);
391
392 mutex_enter(&tp->tun_lock);
393 ifp->if_flags |= IFF_UP | IFF_RUNNING;
394
395 tp->tun_flags &= ~(TUN_IASET|TUN_DSTADDR);
396 IFADDR_FOREACH(ifa, ifp) {
397 #ifdef INET
398 if (ifa->ifa_addr->sa_family == AF_INET) {
399 struct sockaddr_in *sin;
400
401 sin = satosin(ifa->ifa_addr);
402 if (sin && sin->sin_addr.s_addr)
403 tp->tun_flags |= TUN_IASET;
404
405 if (ifp->if_flags & IFF_POINTOPOINT) {
406 sin = satosin(ifa->ifa_dstaddr);
407 if (sin && sin->sin_addr.s_addr)
408 tp->tun_flags |= TUN_DSTADDR;
409 }
410 }
411 #endif
412 #ifdef INET6
413 if (ifa->ifa_addr->sa_family == AF_INET6) {
414 struct sockaddr_in6 *sin;
415
416 sin = (struct sockaddr_in6 *)ifa->ifa_addr;
417 if (!IN6_IS_ADDR_UNSPECIFIED(&sin->sin6_addr))
418 tp->tun_flags |= TUN_IASET;
419
420 if (ifp->if_flags & IFF_POINTOPOINT) {
421 sin = (struct sockaddr_in6 *)ifa->ifa_dstaddr;
422 if (sin &&
423 !IN6_IS_ADDR_UNSPECIFIED(&sin->sin6_addr))
424 tp->tun_flags |= TUN_DSTADDR;
425 } else
426 tp->tun_flags &= ~TUN_DSTADDR;
427 }
428 #endif /* INET6 */
429 }
430 mutex_exit(&tp->tun_lock);
431 }
432
433 /*
434 * Process an ioctl request.
435 */
436 static int
437 tun_ioctl(struct ifnet *ifp, u_long cmd, void *data)
438 {
439 int error = 0, s;
440 struct tun_softc *tp = (struct tun_softc *)(ifp->if_softc);
441 struct ifreq *ifr = (struct ifreq *)data;
442 struct ifaddr *ifa = (struct ifaddr *)data;
443
444 s = splnet();
445
446 switch (cmd) {
447 case SIOCINITIFADDR:
448 tuninit(tp);
449 ifa->ifa_rtrequest = p2p_rtrequest;
450 TUNDEBUG("%s: address set\n", ifp->if_xname);
451 break;
452 case SIOCSIFBRDADDR:
453 TUNDEBUG("%s: broadcast address set\n", ifp->if_xname);
454 break;
455 case SIOCSIFMTU:
456 if (ifr->ifr_mtu > TUNMTU || ifr->ifr_mtu < 576) {
457 error = EINVAL;
458 break;
459 }
460 TUNDEBUG("%s: interface mtu set\n", ifp->if_xname);
461 if ((error = ifioctl_common(ifp, cmd, data)) == ENETRESET)
462 error = 0;
463 break;
464 case SIOCADDMULTI:
465 case SIOCDELMULTI:
466 if (ifr == NULL) {
467 error = EAFNOSUPPORT; /* XXX */
468 break;
469 }
470 switch (ifreq_getaddr(cmd, ifr)->sa_family) {
471 #ifdef INET
472 case AF_INET:
473 break;
474 #endif
475 #ifdef INET6
476 case AF_INET6:
477 break;
478 #endif
479 default:
480 error = EAFNOSUPPORT;
481 break;
482 }
483 break;
484 default:
485 error = ifioctl_common(ifp, cmd, data);
486 }
487
488 splx(s);
489 return (error);
490 }
491
492 /*
493 * tun_output - queue packets from higher level ready to put out.
494 */
495 static int
496 tun_output(struct ifnet *ifp, struct mbuf *m0, const struct sockaddr *dst,
497 struct rtentry *rt)
498 {
499 struct tun_softc *tp = ifp->if_softc;
500 int s;
501 int error;
502 #if defined(INET) || defined(INET6)
503 int mlen;
504 uint32_t *af;
505 #endif
506
507 s = splnet();
508 mutex_enter(&tp->tun_lock);
509 TUNDEBUG ("%s: tun_output\n", ifp->if_xname);
510
511 if ((tp->tun_flags & TUN_READY) != TUN_READY) {
512 TUNDEBUG ("%s: not ready 0%o\n", ifp->if_xname,
513 tp->tun_flags);
514 error = EHOSTDOWN;
515 goto out;
516 }
517
518 /*
519 * if the queueing discipline needs packet classification,
520 * do it before prepending link headers.
521 */
522 IFQ_CLASSIFY(&ifp->if_snd, m0, dst->sa_family);
523
524 bpf_mtap_af(ifp, dst->sa_family, m0);
525
526 switch(dst->sa_family) {
527 #ifdef INET6
528 case AF_INET6:
529 #endif
530 #ifdef INET
531 case AF_INET:
532 #endif
533 #if defined(INET) || defined(INET6)
534 if (tp->tun_flags & TUN_PREPADDR) {
535 /* Simple link-layer header */
536 M_PREPEND(m0, dst->sa_len, M_DONTWAIT);
537 if (m0 == NULL) {
538 IF_DROP(&ifp->if_snd);
539 error = ENOBUFS;
540 goto out;
541 }
542 bcopy(dst, mtod(m0, char *), dst->sa_len);
543 }
544
545 if (tp->tun_flags & TUN_IFHEAD) {
546 /* Prepend the address family */
547 M_PREPEND(m0, sizeof(*af), M_DONTWAIT);
548 if (m0 == NULL) {
549 IF_DROP(&ifp->if_snd);
550 error = ENOBUFS;
551 goto out;
552 }
553 af = mtod(m0,uint32_t *);
554 *af = htonl(dst->sa_family);
555 } else {
556 #ifdef INET
557 if (dst->sa_family != AF_INET)
558 #endif
559 {
560 error = EAFNOSUPPORT;
561 goto out;
562 }
563 }
564 /* FALLTHROUGH */
565 case AF_UNSPEC:
566 IFQ_ENQUEUE(&ifp->if_snd, m0, error);
567 if (error) {
568 ifp->if_collisions++;
569 error = EAFNOSUPPORT;
570 m0 = NULL;
571 goto out;
572 }
573 mlen = m0->m_pkthdr.len;
574 ifp->if_opackets++;
575 ifp->if_obytes += mlen;
576 break;
577 #endif
578 default:
579 error = EAFNOSUPPORT;
580 goto out;
581 }
582
583 if (tp->tun_flags & TUN_RWAIT) {
584 tp->tun_flags &= ~TUN_RWAIT;
585 wakeup((void *)tp);
586 }
587 if (tp->tun_flags & TUN_ASYNC && tp->tun_pgid)
588 softint_schedule(tp->tun_isih);
589
590 selnotify(&tp->tun_rsel, 0, 0);
591 out:
592 mutex_exit(&tp->tun_lock);
593 splx(s);
594
595 if (error && m0) {
596 m_freem(m0);
597 }
598 return 0;
599 }
600
601 static void
602 tun_i_softintr(void *cookie)
603 {
604 struct tun_softc *tp = cookie;
605
606 if (tp->tun_flags & TUN_ASYNC && tp->tun_pgid)
607 fownsignal(tp->tun_pgid, SIGIO, POLL_IN, POLLIN|POLLRDNORM,
608 NULL);
609 }
610
611 static void
612 tun_o_softintr(void *cookie)
613 {
614 struct tun_softc *tp = cookie;
615
616 if (tp->tun_flags & TUN_ASYNC && tp->tun_pgid)
617 fownsignal(tp->tun_pgid, SIGIO, POLL_OUT, POLLOUT|POLLWRNORM,
618 NULL);
619 }
620
621 /*
622 * the cdevsw interface is now pretty minimal.
623 */
624 int
625 tunioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
626 {
627 struct tun_softc *tp;
628 int s, error = 0;
629
630 s = splnet();
631 tp = tun_find_unit(dev);
632
633 /* interface was "destroyed" already */
634 if (tp == NULL) {
635 error = ENXIO;
636 goto out_nolock;
637 }
638
639 switch (cmd) {
640 case TUNSDEBUG:
641 tundebug = *(int *)data;
642 break;
643
644 case TUNGDEBUG:
645 *(int *)data = tundebug;
646 break;
647
648 case TUNSIFMODE:
649 switch (*(int *)data & (IFF_POINTOPOINT|IFF_BROADCAST)) {
650 case IFF_POINTOPOINT:
651 case IFF_BROADCAST:
652 if (tp->tun_if.if_flags & IFF_UP) {
653 error = EBUSY;
654 goto out;
655 }
656 tp->tun_if.if_flags &=
657 ~(IFF_BROADCAST|IFF_POINTOPOINT|IFF_MULTICAST);
658 tp->tun_if.if_flags |= *(int *)data;
659 break;
660 default:
661 error = EINVAL;
662 goto out;
663 }
664 break;
665
666 case TUNSLMODE:
667 if (*(int *)data) {
668 tp->tun_flags |= TUN_PREPADDR;
669 tp->tun_flags &= ~TUN_IFHEAD;
670 } else
671 tp->tun_flags &= ~TUN_PREPADDR;
672 break;
673
674 case TUNSIFHEAD:
675 if (*(int *)data) {
676 tp->tun_flags |= TUN_IFHEAD;
677 tp->tun_flags &= ~TUN_PREPADDR;
678 } else
679 tp->tun_flags &= ~TUN_IFHEAD;
680 break;
681
682 case TUNGIFHEAD:
683 *(int *)data = (tp->tun_flags & TUN_IFHEAD);
684 break;
685
686 case FIONBIO:
687 if (*(int *)data)
688 tp->tun_flags |= TUN_NBIO;
689 else
690 tp->tun_flags &= ~TUN_NBIO;
691 break;
692
693 case FIOASYNC:
694 if (*(int *)data)
695 tp->tun_flags |= TUN_ASYNC;
696 else
697 tp->tun_flags &= ~TUN_ASYNC;
698 break;
699
700 case FIONREAD:
701 if (tp->tun_if.if_snd.ifq_head)
702 *(int *)data = tp->tun_if.if_snd.ifq_head->m_pkthdr.len;
703 else
704 *(int *)data = 0;
705 break;
706
707 case TIOCSPGRP:
708 case FIOSETOWN:
709 error = fsetown(&tp->tun_pgid, cmd, data);
710 break;
711
712 case TIOCGPGRP:
713 case FIOGETOWN:
714 error = fgetown(tp->tun_pgid, cmd, data);
715 break;
716
717 default:
718 error = ENOTTY;
719 }
720
721 out:
722 mutex_exit(&tp->tun_lock);
723 out_nolock:
724 splx(s);
725 return (error);
726 }
727
728 /*
729 * The cdevsw read interface - reads a packet at a time, or at
730 * least as much of a packet as can be read.
731 */
732 int
733 tunread(dev_t dev, struct uio *uio, int ioflag)
734 {
735 struct tun_softc *tp;
736 struct ifnet *ifp;
737 struct mbuf *m, *m0;
738 int error = 0, len, s, index;
739
740 s = splnet();
741 tp = tun_find_unit(dev);
742
743 /* interface was "destroyed" already */
744 if (tp == NULL) {
745 error = ENXIO;
746 goto out_nolock;
747 }
748
749 index = tp->tun_if.if_index;
750 ifp = &tp->tun_if;
751
752 TUNDEBUG ("%s: read\n", ifp->if_xname);
753 if ((tp->tun_flags & TUN_READY) != TUN_READY) {
754 TUNDEBUG ("%s: not ready 0%o\n", ifp->if_xname, tp->tun_flags);
755 error = EHOSTDOWN;
756 goto out;
757 }
758
759 tp->tun_flags &= ~TUN_RWAIT;
760
761 do {
762 IFQ_DEQUEUE(&ifp->if_snd, m0);
763 if (m0 == 0) {
764 if (tp->tun_flags & TUN_NBIO) {
765 error = EWOULDBLOCK;
766 goto out;
767 }
768 tp->tun_flags |= TUN_RWAIT;
769 if (mtsleep((void *)tp, PZERO|PCATCH|PNORELOCK,
770 "tunread", 0, &tp->tun_lock) != 0) {
771 error = EINTR;
772 goto out_nolock;
773 } else {
774 /*
775 * Maybe the interface was destroyed while
776 * we were sleeping, so let's ensure that
777 * we're looking at the same (valid) tun
778 * interface before looping.
779 */
780 tp = tun_find_unit(dev);
781 if (tp == NULL) {
782 error = ENXIO;
783 goto out_nolock;
784 }
785 if (tp->tun_if.if_index != index) {
786 error = ENXIO;
787 goto out;
788 }
789 }
790 }
791 } while (m0 == 0);
792
793 mutex_exit(&tp->tun_lock);
794 splx(s);
795
796 /* Copy the mbuf chain */
797 while (m0 && uio->uio_resid > 0 && error == 0) {
798 len = min(uio->uio_resid, m0->m_len);
799 if (len != 0)
800 error = uiomove(mtod(m0, void *), len, uio);
801 MFREE(m0, m);
802 m0 = m;
803 }
804
805 if (m0) {
806 TUNDEBUG("Dropping mbuf\n");
807 m_freem(m0);
808 }
809 if (error)
810 ifp->if_ierrors++;
811
812 return (error);
813
814 out:
815 mutex_exit(&tp->tun_lock);
816 out_nolock:
817 splx(s);
818 return (error);
819 }
820
821 /*
822 * the cdevsw write interface - an atomic write is a packet - or else!
823 */
824 int
825 tunwrite(dev_t dev, struct uio *uio, int ioflag)
826 {
827 struct tun_softc *tp;
828 struct ifnet *ifp;
829 struct mbuf *top, **mp, *m;
830 pktqueue_t *pktq;
831 struct sockaddr dst;
832 int error = 0, s, tlen, mlen;
833 uint32_t family;
834
835 s = splnet();
836 tp = tun_find_unit(dev);
837
838 /* interface was "destroyed" already */
839 if (tp == NULL) {
840 error = ENXIO;
841 goto out_nolock;
842 }
843
844 /* Unlock until we've got the data */
845 mutex_exit(&tp->tun_lock);
846 splx(s);
847
848 ifp = &tp->tun_if;
849
850 TUNDEBUG("%s: tunwrite\n", ifp->if_xname);
851
852 if (tp->tun_flags & TUN_PREPADDR) {
853 if (uio->uio_resid < sizeof(dst)) {
854 error = EIO;
855 goto out0;
856 }
857 error = uiomove((void *)&dst, sizeof(dst), uio);
858 if (dst.sa_len > sizeof(dst)) {
859 /* Duh.. */
860 char discard;
861 int n = dst.sa_len - sizeof(dst);
862 while (n--)
863 if ((error = uiomove(&discard, 1, uio)) != 0) {
864 goto out0;
865 }
866 }
867 } else if (tp->tun_flags & TUN_IFHEAD) {
868 if (uio->uio_resid < sizeof(family)){
869 error = EIO;
870 goto out0;
871 }
872 error = uiomove((void *)&family, sizeof(family), uio);
873 dst.sa_family = ntohl(family);
874 } else {
875 #ifdef INET
876 dst.sa_family = AF_INET;
877 #endif
878 }
879
880 if (uio->uio_resid > TUNMTU) {
881 TUNDEBUG("%s: len=%lu!\n", ifp->if_xname,
882 (unsigned long)uio->uio_resid);
883 error = EIO;
884 goto out0;
885 }
886
887 switch (dst.sa_family) {
888 #ifdef INET
889 case AF_INET:
890 pktq = ip_pktq;
891 break;
892 #endif
893 #ifdef INET6
894 case AF_INET6:
895 pktq = ip6_pktq;
896 break;
897 #endif
898 default:
899 error = EAFNOSUPPORT;
900 goto out0;
901 }
902
903 tlen = uio->uio_resid;
904
905 /* get a header mbuf */
906 MGETHDR(m, M_DONTWAIT, MT_DATA);
907 if (m == NULL) {
908 error = ENOBUFS;
909 goto out0;
910 }
911 mlen = MHLEN;
912
913 top = NULL;
914 mp = ⊤
915 while (error == 0 && uio->uio_resid > 0) {
916 m->m_len = min(mlen, uio->uio_resid);
917 error = uiomove(mtod(m, void *), m->m_len, uio);
918 *mp = m;
919 mp = &m->m_next;
920 if (error == 0 && uio->uio_resid > 0) {
921 MGET(m, M_DONTWAIT, MT_DATA);
922 if (m == NULL) {
923 error = ENOBUFS;
924 break;
925 }
926 mlen = MLEN;
927 }
928 }
929 if (error) {
930 if (top != NULL)
931 m_freem (top);
932 ifp->if_ierrors++;
933 goto out0;
934 }
935
936 top->m_pkthdr.len = tlen;
937 top->m_pkthdr.rcvif = ifp;
938
939 bpf_mtap_af(ifp, dst.sa_family, top);
940
941 s = splnet();
942 mutex_enter(&tp->tun_lock);
943 if ((tp->tun_flags & TUN_INITED) == 0) {
944 /* Interface was destroyed */
945 error = ENXIO;
946 goto out;
947 }
948 if (__predict_false(!pktq_enqueue(pktq, top, 0))) {
949 ifp->if_collisions++;
950 mutex_exit(&tp->tun_lock);
951 error = ENOBUFS;
952 m_freem(top);
953 goto out_nolock;
954 }
955 ifp->if_ipackets++;
956 ifp->if_ibytes += tlen;
957 out:
958 mutex_exit(&tp->tun_lock);
959 out_nolock:
960 splx(s);
961 out0:
962 return (error);
963 }
964
965 #ifdef ALTQ
966 /*
967 * Start packet transmission on the interface.
968 * when the interface queue is rate-limited by ALTQ or TBR,
969 * if_start is needed to drain packets from the queue in order
970 * to notify readers when outgoing packets become ready.
971 *
972 * Should be called at splnet.
973 */
974 static void
975 tunstart(struct ifnet *ifp)
976 {
977 struct tun_softc *tp = ifp->if_softc;
978
979 if (!ALTQ_IS_ENABLED(&ifp->if_snd) && !TBR_IS_ENABLED(&ifp->if_snd))
980 return;
981
982 mutex_enter(&tp->tun_lock);
983 if (!IF_IS_EMPTY(&ifp->if_snd)) {
984 if (tp->tun_flags & TUN_RWAIT) {
985 tp->tun_flags &= ~TUN_RWAIT;
986 wakeup((void *)tp);
987 }
988 if (tp->tun_flags & TUN_ASYNC && tp->tun_pgid)
989 softint_schedule(tp->tun_osih);
990
991 selnotify(&tp->tun_rsel, 0, 0);
992 }
993 mutex_exit(&tp->tun_lock);
994 }
995 #endif /* ALTQ */
996 /*
997 * tunpoll - the poll interface, this is only useful on reads
998 * really. The write detect always returns true, write never blocks
999 * anyway, it either accepts the packet or drops it.
1000 */
1001 int
1002 tunpoll(dev_t dev, int events, struct lwp *l)
1003 {
1004 struct tun_softc *tp;
1005 struct ifnet *ifp;
1006 int s, revents = 0;
1007
1008 s = splnet();
1009 tp = tun_find_unit(dev);
1010
1011 /* interface was "destroyed" already */
1012 if (tp == NULL)
1013 goto out_nolock;
1014
1015 ifp = &tp->tun_if;
1016
1017 TUNDEBUG("%s: tunpoll\n", ifp->if_xname);
1018
1019 if (events & (POLLIN | POLLRDNORM)) {
1020 if (!IFQ_IS_EMPTY(&ifp->if_snd)) {
1021 TUNDEBUG("%s: tunpoll q=%d\n", ifp->if_xname,
1022 ifp->if_snd.ifq_len);
1023 revents |= events & (POLLIN | POLLRDNORM);
1024 } else {
1025 TUNDEBUG("%s: tunpoll waiting\n", ifp->if_xname);
1026 selrecord(l, &tp->tun_rsel);
1027 }
1028 }
1029
1030 if (events & (POLLOUT | POLLWRNORM))
1031 revents |= events & (POLLOUT | POLLWRNORM);
1032
1033 mutex_exit(&tp->tun_lock);
1034 out_nolock:
1035 splx(s);
1036 return (revents);
1037 }
1038
1039 static void
1040 filt_tunrdetach(struct knote *kn)
1041 {
1042 struct tun_softc *tp = kn->kn_hook;
1043 int s;
1044
1045 s = splnet();
1046 SLIST_REMOVE(&tp->tun_rsel.sel_klist, kn, knote, kn_selnext);
1047 splx(s);
1048 }
1049
1050 static int
1051 filt_tunread(struct knote *kn, long hint)
1052 {
1053 struct tun_softc *tp = kn->kn_hook;
1054 struct ifnet *ifp = &tp->tun_if;
1055 struct mbuf *m;
1056 int s;
1057
1058 s = splnet();
1059 IF_POLL(&ifp->if_snd, m);
1060 if (m == NULL) {
1061 splx(s);
1062 return (0);
1063 }
1064
1065 for (kn->kn_data = 0; m != NULL; m = m->m_next)
1066 kn->kn_data += m->m_len;
1067
1068 splx(s);
1069 return (1);
1070 }
1071
1072 static const struct filterops tunread_filtops =
1073 { 1, NULL, filt_tunrdetach, filt_tunread };
1074
1075 static const struct filterops tun_seltrue_filtops =
1076 { 1, NULL, filt_tunrdetach, filt_seltrue };
1077
1078 int
1079 tunkqfilter(dev_t dev, struct knote *kn)
1080 {
1081 struct tun_softc *tp;
1082 struct klist *klist;
1083 int rv = 0, s;
1084
1085 s = splnet();
1086 tp = tun_find_unit(dev);
1087 if (tp == NULL)
1088 goto out_nolock;
1089
1090 switch (kn->kn_filter) {
1091 case EVFILT_READ:
1092 klist = &tp->tun_rsel.sel_klist;
1093 kn->kn_fop = &tunread_filtops;
1094 break;
1095
1096 case EVFILT_WRITE:
1097 klist = &tp->tun_rsel.sel_klist;
1098 kn->kn_fop = &tun_seltrue_filtops;
1099 break;
1100
1101 default:
1102 rv = EINVAL;
1103 goto out;
1104 }
1105
1106 kn->kn_hook = tp;
1107
1108 SLIST_INSERT_HEAD(klist, kn, kn_selnext);
1109
1110 out:
1111 mutex_exit(&tp->tun_lock);
1112 out_nolock:
1113 splx(s);
1114 return (rv);
1115 }
1116