if_tun.c revision 1.138 1 /* $NetBSD: if_tun.c,v 1.138 2017/01/29 18:30:33 maya Exp $ */
2
3 /*
4 * Copyright (c) 1988, Julian Onions <jpo (at) cs.nott.ac.uk>
5 * Nottingham University 1987.
6 *
7 * This source may be freely distributed, however I would be interested
8 * in any changes that are made.
9 *
10 * This driver takes packets off the IP i/f and hands them up to a
11 * user process to have its wicked way with. This driver has its
12 * roots in a similar driver written by Phil Cockcroft (formerly) at
13 * UCL. This driver is based much more on read/write/poll mode of
14 * operation though.
15 */
16
17 /*
18 * tun - tunnel software network interface.
19 */
20
21 #include <sys/cdefs.h>
22 __KERNEL_RCSID(0, "$NetBSD: if_tun.c,v 1.138 2017/01/29 18:30:33 maya Exp $");
23
24 #ifdef _KERNEL_OPT
25 #include "opt_inet.h"
26 #endif
27
28 #include <sys/param.h>
29
30 #include <sys/buf.h>
31 #include <sys/conf.h>
32 #include <sys/cpu.h>
33 #include <sys/device.h>
34 #include <sys/file.h>
35 #include <sys/ioctl.h>
36 #include <sys/kauth.h>
37 #include <sys/kmem.h>
38 #include <sys/lwp.h>
39 #include <sys/mbuf.h>
40 #include <sys/module.h>
41 #include <sys/mutex.h>
42 #include <sys/poll.h>
43 #include <sys/select.h>
44 #include <sys/signalvar.h>
45 #include <sys/socket.h>
46
47 #include <net/bpf.h>
48 #include <net/if.h>
49 #include <net/if_types.h>
50 #include <net/route.h>
51
52 #ifdef INET
53 #include <netinet/in.h>
54 #include <netinet/in_systm.h>
55 #include <netinet/in_var.h>
56 #include <netinet/ip.h>
57 #include <netinet/if_inarp.h>
58 #endif
59
60 #include <net/if_tun.h>
61
62 #include "ioconf.h"
63
64 #define TUNDEBUG if (tundebug) printf
65 int tundebug = 0;
66
67 extern int ifqmaxlen;
68
69 static LIST_HEAD(, tun_softc) tun_softc_list;
70 static LIST_HEAD(, tun_softc) tunz_softc_list;
71 static kmutex_t tun_softc_lock;
72
73 static int tun_ioctl(struct ifnet *, u_long, void *);
74 static int tun_output(struct ifnet *, struct mbuf *,
75 const struct sockaddr *, const struct rtentry *rt);
76 static int tun_clone_create(struct if_clone *, int);
77 static int tun_clone_destroy(struct ifnet *);
78
79 static struct if_clone tun_cloner =
80 IF_CLONE_INITIALIZER("tun", tun_clone_create, tun_clone_destroy);
81
82 static void tunattach0(struct tun_softc *);
83 static void tun_enable(struct tun_softc *, const struct ifaddr *);
84 static void tun_i_softintr(void *);
85 static void tun_o_softintr(void *);
86 #ifdef ALTQ
87 static void tunstart(struct ifnet *);
88 #endif
89 static struct tun_softc *tun_find_unit(dev_t);
90 static struct tun_softc *tun_find_zunit(int);
91
92 static dev_type_open(tunopen);
93 static dev_type_close(tunclose);
94 static dev_type_read(tunread);
95 static dev_type_write(tunwrite);
96 static dev_type_ioctl(tunioctl);
97 static dev_type_poll(tunpoll);
98 static dev_type_kqfilter(tunkqfilter);
99
100 const struct cdevsw tun_cdevsw = {
101 .d_open = tunopen,
102 .d_close = tunclose,
103 .d_read = tunread,
104 .d_write = tunwrite,
105 .d_ioctl = tunioctl,
106 .d_stop = nostop,
107 .d_tty = notty,
108 .d_poll = tunpoll,
109 .d_mmap = nommap,
110 .d_kqfilter = tunkqfilter,
111 .d_discard = nodiscard,
112 .d_flag = D_OTHER | D_MPSAFE
113 };
114
115 #ifdef _MODULE
116 devmajor_t tun_bmajor = -1, tun_cmajor = -1;
117 #endif
118
119 void
120 tunattach(int unused)
121 {
122
123 /*
124 * Nothing to do here, initialization is handled by the
125 * module initialization code in tuninit() below).
126 */
127 }
128
129 static void
130 tuninit(void)
131 {
132
133 mutex_init(&tun_softc_lock, MUTEX_DEFAULT, IPL_NET);
134 LIST_INIT(&tun_softc_list);
135 LIST_INIT(&tunz_softc_list);
136 if_clone_attach(&tun_cloner);
137 #ifdef _MODULE
138 devsw_attach("tun", NULL, &tun_bmajor, &tun_cdevsw, &tun_cmajor);
139 #endif
140 }
141
142 static int
143 tundetach(void)
144 {
145 int error = 0;
146
147 if (!LIST_EMPTY(&tun_softc_list) || !LIST_EMPTY(&tunz_softc_list))
148 error = EBUSY;
149
150 #ifdef _MODULE
151 if (error == 0)
152 error = devsw_detach(NULL, &tun_cdevsw);
153 #endif
154 if (error == 0) {
155 if_clone_detach(&tun_cloner);
156 mutex_destroy(&tun_softc_lock);
157 }
158
159 return error;
160 }
161
162 /*
163 * Find driver instance from dev_t.
164 * Returns with tp locked (if found).
165 */
166 static struct tun_softc *
167 tun_find_unit(dev_t dev)
168 {
169 struct tun_softc *tp;
170 int unit = minor(dev);
171
172 mutex_enter(&tun_softc_lock);
173 LIST_FOREACH(tp, &tun_softc_list, tun_list)
174 if (unit == tp->tun_unit)
175 break;
176 if (tp)
177 mutex_enter(&tp->tun_lock);
178 mutex_exit(&tun_softc_lock);
179
180 return tp;
181 }
182
183 /*
184 * Find zombie driver instance by unit number.
185 * Remove tp from list and return it unlocked (if found).
186 */
187 static struct tun_softc *
188 tun_find_zunit(int unit)
189 {
190 struct tun_softc *tp;
191
192 mutex_enter(&tun_softc_lock);
193 LIST_FOREACH(tp, &tunz_softc_list, tun_list)
194 if (unit == tp->tun_unit)
195 break;
196 if (tp)
197 LIST_REMOVE(tp, tun_list);
198 mutex_exit(&tun_softc_lock);
199 KASSERTMSG(!tp || (tp->tun_flags & (TUN_INITED|TUN_OPEN)) == TUN_OPEN,
200 "tun%d: inconsistent flags: %x", unit, tp->tun_flags);
201
202 return tp;
203 }
204
205 static int
206 tun_clone_create(struct if_clone *ifc, int unit)
207 {
208 struct tun_softc *tp;
209
210 if ((tp = tun_find_zunit(unit)) == NULL) {
211 tp = kmem_zalloc(sizeof(*tp), KM_SLEEP);
212
213 tp->tun_unit = unit;
214 mutex_init(&tp->tun_lock, MUTEX_DEFAULT, IPL_NET);
215 cv_init(&tp->tun_cv, "tunread");
216 selinit(&tp->tun_rsel);
217 selinit(&tp->tun_wsel);
218 } else {
219 /* Revive tunnel instance; clear ifp part */
220 (void)memset(&tp->tun_if, 0, sizeof(struct ifnet));
221 }
222
223 if_initname(&tp->tun_if, ifc->ifc_name, unit);
224 tunattach0(tp);
225 tp->tun_flags |= TUN_INITED;
226 tp->tun_osih = softint_establish(SOFTINT_CLOCK, tun_o_softintr, tp);
227 tp->tun_isih = softint_establish(SOFTINT_CLOCK, tun_i_softintr, tp);
228
229 mutex_enter(&tun_softc_lock);
230 LIST_INSERT_HEAD(&tun_softc_list, tp, tun_list);
231 mutex_exit(&tun_softc_lock);
232
233 return 0;
234 }
235
236 static void
237 tunattach0(struct tun_softc *tp)
238 {
239 struct ifnet *ifp;
240
241 ifp = &tp->tun_if;
242 ifp->if_softc = tp;
243 ifp->if_mtu = TUNMTU;
244 ifp->if_ioctl = tun_ioctl;
245 ifp->if_output = tun_output;
246 #ifdef ALTQ
247 ifp->if_start = tunstart;
248 #endif
249 ifp->if_flags = IFF_POINTOPOINT;
250 ifp->if_type = IFT_TUNNEL;
251 ifp->if_snd.ifq_maxlen = ifqmaxlen;
252 ifp->if_collisions = 0;
253 ifp->if_ierrors = 0;
254 ifp->if_oerrors = 0;
255 ifp->if_ipackets = 0;
256 ifp->if_opackets = 0;
257 ifp->if_ibytes = 0;
258 ifp->if_obytes = 0;
259 ifp->if_dlt = DLT_NULL;
260 IFQ_SET_READY(&ifp->if_snd);
261 if_attach(ifp);
262 if_alloc_sadl(ifp);
263 bpf_attach(ifp, DLT_NULL, sizeof(uint32_t));
264 }
265
266 static int
267 tun_clone_destroy(struct ifnet *ifp)
268 {
269 struct tun_softc *tp = (void *)ifp;
270 bool zombie = false;
271
272 IF_PURGE(&ifp->if_snd);
273 ifp->if_flags &= ~IFF_RUNNING;
274
275 mutex_enter(&tun_softc_lock);
276 mutex_enter(&tp->tun_lock);
277 LIST_REMOVE(tp, tun_list);
278 if (tp->tun_flags & TUN_OPEN) {
279 /* Hang on to storage until last close. */
280 tp->tun_flags &= ~TUN_INITED;
281 LIST_INSERT_HEAD(&tunz_softc_list, tp, tun_list);
282 zombie = true;
283 }
284 mutex_exit(&tun_softc_lock);
285
286 if (tp->tun_flags & TUN_RWAIT) {
287 tp->tun_flags &= ~TUN_RWAIT;
288 cv_broadcast(&tp->tun_cv);
289 }
290 selnotify(&tp->tun_rsel, 0, 0);
291
292 mutex_exit(&tp->tun_lock);
293
294 if (tp->tun_flags & TUN_ASYNC && tp->tun_pgid)
295 fownsignal(tp->tun_pgid, SIGIO, POLL_HUP, 0, NULL);
296
297 bpf_detach(ifp);
298 if_detach(ifp);
299
300 if (!zombie) {
301 seldestroy(&tp->tun_rsel);
302 seldestroy(&tp->tun_wsel);
303 softint_disestablish(tp->tun_osih);
304 softint_disestablish(tp->tun_isih);
305 mutex_destroy(&tp->tun_lock);
306 kmem_free(tp, sizeof(*tp));
307 }
308
309 return 0;
310 }
311
312 /*
313 * tunnel open - must be superuser & the device must be
314 * configured in
315 */
316 static int
317 tunopen(dev_t dev, int flag, int mode, struct lwp *l)
318 {
319 struct ifnet *ifp;
320 struct tun_softc *tp;
321 int error;
322
323 error = kauth_authorize_network(l->l_cred, KAUTH_NETWORK_INTERFACE_TUN,
324 KAUTH_REQ_NETWORK_INTERFACE_TUN_ADD, NULL, NULL, NULL);
325 if (error)
326 return error;
327
328 tp = tun_find_unit(dev);
329
330 if (tp == NULL) {
331 (void)tun_clone_create(&tun_cloner, minor(dev));
332 tp = tun_find_unit(dev);
333 if (tp == NULL) {
334 return ENXIO;
335 }
336 }
337
338 if (tp->tun_flags & TUN_OPEN) {
339 mutex_exit(&tp->tun_lock);
340 return EBUSY;
341 }
342
343 ifp = &tp->tun_if;
344 tp->tun_flags |= TUN_OPEN;
345 TUNDEBUG("%s: open\n", ifp->if_xname);
346
347 mutex_exit(&tp->tun_lock);
348
349 return error;
350 }
351
352 /*
353 * tunclose - close the device - mark i/f down & delete
354 * routing info
355 */
356 int
357 tunclose(dev_t dev, int flag, int mode,
358 struct lwp *l)
359 {
360 struct tun_softc *tp;
361 struct ifnet *ifp;
362
363 if ((tp = tun_find_zunit(minor(dev))) != NULL) {
364 /* interface was "destroyed" before the close */
365 seldestroy(&tp->tun_rsel);
366 seldestroy(&tp->tun_wsel);
367 softint_disestablish(tp->tun_osih);
368 softint_disestablish(tp->tun_isih);
369 mutex_destroy(&tp->tun_lock);
370 kmem_free(tp, sizeof(*tp));
371 return 0;
372 }
373
374 if ((tp = tun_find_unit(dev)) == NULL)
375 goto out_nolock;
376
377 ifp = &tp->tun_if;
378
379 tp->tun_flags &= ~TUN_OPEN;
380
381 tp->tun_pgid = 0;
382 selnotify(&tp->tun_rsel, 0, 0);
383
384 TUNDEBUG ("%s: closed\n", ifp->if_xname);
385 mutex_exit(&tp->tun_lock);
386
387 /*
388 * junk all pending output
389 */
390 IFQ_PURGE(&ifp->if_snd);
391
392 if (ifp->if_flags & IFF_UP) {
393 if_down(ifp);
394 if (ifp->if_flags & IFF_RUNNING) {
395 /* find internet addresses and delete routes */
396 struct ifaddr *ifa;
397 IFADDR_READER_FOREACH(ifa, ifp) {
398 #if defined(INET) || defined(INET6)
399 if (ifa->ifa_addr->sa_family == AF_INET ||
400 ifa->ifa_addr->sa_family == AF_INET6) {
401 rtinit(ifa, (int)RTM_DELETE,
402 tp->tun_flags & TUN_DSTADDR
403 ? RTF_HOST
404 : 0);
405 }
406 #endif
407 }
408 }
409 }
410 out_nolock:
411 return 0;
412 }
413
414 static void
415 tun_enable(struct tun_softc *tp, const struct ifaddr *ifa)
416 {
417 struct ifnet *ifp = &tp->tun_if;
418
419 TUNDEBUG("%s: %s\n", __func__, ifp->if_xname);
420
421 mutex_enter(&tp->tun_lock);
422 ifp->if_flags |= IFF_UP | IFF_RUNNING;
423
424 tp->tun_flags &= ~(TUN_IASET|TUN_DSTADDR);
425
426 switch (ifa->ifa_addr->sa_family) {
427 #ifdef INET
428 case AF_INET: {
429 struct sockaddr_in *sin;
430
431 sin = satosin(ifa->ifa_addr);
432 if (sin && sin->sin_addr.s_addr)
433 tp->tun_flags |= TUN_IASET;
434
435 if (ifp->if_flags & IFF_POINTOPOINT) {
436 sin = satosin(ifa->ifa_dstaddr);
437 if (sin && sin->sin_addr.s_addr)
438 tp->tun_flags |= TUN_DSTADDR;
439 }
440 break;
441 }
442 #endif
443 #ifdef INET6
444 case AF_INET6: {
445 struct sockaddr_in6 *sin;
446
447 sin = satosin6(ifa->ifa_addr);
448 if (!IN6_IS_ADDR_UNSPECIFIED(&sin->sin6_addr))
449 tp->tun_flags |= TUN_IASET;
450
451 if (ifp->if_flags & IFF_POINTOPOINT) {
452 sin = satosin6(ifa->ifa_dstaddr);
453 if (sin && !IN6_IS_ADDR_UNSPECIFIED(&sin->sin6_addr))
454 tp->tun_flags |= TUN_DSTADDR;
455 } else
456 tp->tun_flags &= ~TUN_DSTADDR;
457 break;
458 }
459 #endif /* INET6 */
460 default:
461 break;
462 }
463 mutex_exit(&tp->tun_lock);
464 }
465
466 /*
467 * Process an ioctl request.
468 */
469 static int
470 tun_ioctl(struct ifnet *ifp, u_long cmd, void *data)
471 {
472 struct tun_softc *tp = (struct tun_softc *)(ifp->if_softc);
473 struct ifreq *ifr = (struct ifreq *)data;
474 struct ifaddr *ifa = (struct ifaddr *)data;
475 int error = 0;
476
477 switch (cmd) {
478 case SIOCINITIFADDR:
479 tun_enable(tp, ifa);
480 ifa->ifa_rtrequest = p2p_rtrequest;
481 TUNDEBUG("%s: address set\n", ifp->if_xname);
482 break;
483 case SIOCSIFBRDADDR:
484 TUNDEBUG("%s: broadcast address set\n", ifp->if_xname);
485 break;
486 case SIOCSIFMTU:
487 if (ifr->ifr_mtu > TUNMTU || ifr->ifr_mtu < 576) {
488 error = EINVAL;
489 break;
490 }
491 TUNDEBUG("%s: interface mtu set\n", ifp->if_xname);
492 if ((error = ifioctl_common(ifp, cmd, data)) == ENETRESET)
493 error = 0;
494 break;
495 case SIOCADDMULTI:
496 case SIOCDELMULTI:
497 if (ifr == NULL) {
498 error = EAFNOSUPPORT; /* XXX */
499 break;
500 }
501 switch (ifreq_getaddr(cmd, ifr)->sa_family) {
502 #ifdef INET
503 case AF_INET:
504 break;
505 #endif
506 #ifdef INET6
507 case AF_INET6:
508 break;
509 #endif
510 default:
511 error = EAFNOSUPPORT;
512 break;
513 }
514 break;
515 default:
516 error = ifioctl_common(ifp, cmd, data);
517 }
518
519 return error;
520 }
521
522 /*
523 * tun_output - queue packets from higher level ready to put out.
524 */
525 static int
526 tun_output(struct ifnet *ifp, struct mbuf *m0, const struct sockaddr *dst,
527 const struct rtentry *rt)
528 {
529 struct tun_softc *tp = ifp->if_softc;
530 int error;
531 #if defined(INET) || defined(INET6)
532 int mlen;
533 uint32_t *af;
534 #endif
535
536 mutex_enter(&tp->tun_lock);
537 TUNDEBUG ("%s: tun_output\n", ifp->if_xname);
538
539 if ((tp->tun_flags & TUN_READY) != TUN_READY) {
540 TUNDEBUG ("%s: not ready 0%o\n", ifp->if_xname,
541 tp->tun_flags);
542 error = EHOSTDOWN;
543 mutex_exit(&tp->tun_lock);
544 goto out;
545 }
546 // XXXrmind
547 mutex_exit(&tp->tun_lock);
548
549 /*
550 * if the queueing discipline needs packet classification,
551 * do it before prepending link headers.
552 */
553 IFQ_CLASSIFY(&ifp->if_snd, m0, dst->sa_family);
554
555 bpf_mtap_af(ifp, dst->sa_family, m0);
556
557 switch(dst->sa_family) {
558 #ifdef INET6
559 case AF_INET6:
560 #endif
561 #ifdef INET
562 case AF_INET:
563 #endif
564 #if defined(INET) || defined(INET6)
565 if (tp->tun_flags & TUN_PREPADDR) {
566 /* Simple link-layer header */
567 M_PREPEND(m0, dst->sa_len, M_DONTWAIT);
568 if (m0 == NULL) {
569 IF_DROP(&ifp->if_snd);
570 error = ENOBUFS;
571 goto out;
572 }
573 memcpy(mtod(m0, char *), dst, dst->sa_len);
574 }
575
576 if (tp->tun_flags & TUN_IFHEAD) {
577 /* Prepend the address family */
578 M_PREPEND(m0, sizeof(*af), M_DONTWAIT);
579 if (m0 == NULL) {
580 IF_DROP(&ifp->if_snd);
581 error = ENOBUFS;
582 goto out;
583 }
584 af = mtod(m0,uint32_t *);
585 *af = htonl(dst->sa_family);
586 } else {
587 #ifdef INET
588 if (dst->sa_family != AF_INET)
589 #endif
590 {
591 error = EAFNOSUPPORT;
592 goto out;
593 }
594 }
595 /* FALLTHROUGH */
596 case AF_UNSPEC:
597 IFQ_ENQUEUE(&ifp->if_snd, m0, error);
598 if (error) {
599 ifp->if_collisions++;
600 error = EAFNOSUPPORT;
601 m0 = NULL;
602 goto out;
603 }
604 mlen = m0->m_pkthdr.len;
605 ifp->if_opackets++;
606 ifp->if_obytes += mlen;
607 break;
608 #endif
609 default:
610 error = EAFNOSUPPORT;
611 goto out;
612 }
613
614 mutex_enter(&tp->tun_lock);
615 if (tp->tun_flags & TUN_RWAIT) {
616 tp->tun_flags &= ~TUN_RWAIT;
617 cv_broadcast(&tp->tun_cv);
618 }
619 if (tp->tun_flags & TUN_ASYNC && tp->tun_pgid)
620 softint_schedule(tp->tun_isih);
621
622 selnotify(&tp->tun_rsel, 0, 0);
623
624 mutex_exit(&tp->tun_lock);
625 out:
626 if (error && m0) {
627 m_freem(m0);
628 }
629 return 0;
630 }
631
632 static void
633 tun_i_softintr(void *cookie)
634 {
635 struct tun_softc *tp = cookie;
636
637 if (tp->tun_flags & TUN_ASYNC && tp->tun_pgid)
638 fownsignal(tp->tun_pgid, SIGIO, POLL_IN, POLLIN|POLLRDNORM,
639 NULL);
640 }
641
642 static void
643 tun_o_softintr(void *cookie)
644 {
645 struct tun_softc *tp = cookie;
646
647 if (tp->tun_flags & TUN_ASYNC && tp->tun_pgid)
648 fownsignal(tp->tun_pgid, SIGIO, POLL_OUT, POLLOUT|POLLWRNORM,
649 NULL);
650 }
651
652 /*
653 * the cdevsw interface is now pretty minimal.
654 */
655 int
656 tunioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
657 {
658 struct tun_softc *tp;
659 int error = 0;
660
661 tp = tun_find_unit(dev);
662
663 /* interface was "destroyed" already */
664 if (tp == NULL) {
665 return ENXIO;
666 }
667
668 switch (cmd) {
669 case TUNSDEBUG:
670 tundebug = *(int *)data;
671 break;
672
673 case TUNGDEBUG:
674 *(int *)data = tundebug;
675 break;
676
677 case TUNSIFMODE:
678 switch (*(int *)data & (IFF_POINTOPOINT|IFF_BROADCAST)) {
679 case IFF_POINTOPOINT:
680 case IFF_BROADCAST:
681 if (tp->tun_if.if_flags & IFF_UP) {
682 error = EBUSY;
683 goto out;
684 }
685 tp->tun_if.if_flags &=
686 ~(IFF_BROADCAST|IFF_POINTOPOINT|IFF_MULTICAST);
687 tp->tun_if.if_flags |= *(int *)data;
688 break;
689 default:
690 error = EINVAL;
691 goto out;
692 }
693 break;
694
695 case TUNSLMODE:
696 if (*(int *)data) {
697 tp->tun_flags |= TUN_PREPADDR;
698 tp->tun_flags &= ~TUN_IFHEAD;
699 } else
700 tp->tun_flags &= ~TUN_PREPADDR;
701 break;
702
703 case TUNSIFHEAD:
704 if (*(int *)data) {
705 tp->tun_flags |= TUN_IFHEAD;
706 tp->tun_flags &= ~TUN_PREPADDR;
707 } else
708 tp->tun_flags &= ~TUN_IFHEAD;
709 break;
710
711 case TUNGIFHEAD:
712 *(int *)data = (tp->tun_flags & TUN_IFHEAD);
713 break;
714
715 case FIONBIO:
716 if (*(int *)data)
717 tp->tun_flags |= TUN_NBIO;
718 else
719 tp->tun_flags &= ~TUN_NBIO;
720 break;
721
722 case FIOASYNC:
723 if (*(int *)data)
724 tp->tun_flags |= TUN_ASYNC;
725 else
726 tp->tun_flags &= ~TUN_ASYNC;
727 break;
728
729 case FIONREAD:
730 if (tp->tun_if.if_snd.ifq_head)
731 *(int *)data = tp->tun_if.if_snd.ifq_head->m_pkthdr.len;
732 else
733 *(int *)data = 0;
734 break;
735
736 case TIOCSPGRP:
737 case FIOSETOWN:
738 error = fsetown(&tp->tun_pgid, cmd, data);
739 break;
740
741 case TIOCGPGRP:
742 case FIOGETOWN:
743 error = fgetown(tp->tun_pgid, cmd, data);
744 break;
745
746 default:
747 error = ENOTTY;
748 }
749
750 out:
751 mutex_exit(&tp->tun_lock);
752
753 return error;
754 }
755
756 /*
757 * The cdevsw read interface - reads a packet at a time, or at
758 * least as much of a packet as can be read.
759 */
760 int
761 tunread(dev_t dev, struct uio *uio, int ioflag)
762 {
763 struct tun_softc *tp;
764 struct ifnet *ifp;
765 struct mbuf *m, *m0;
766 int error = 0, len;
767
768 tp = tun_find_unit(dev);
769
770 /* interface was "destroyed" already */
771 if (tp == NULL) {
772 return ENXIO;
773 }
774
775 ifp = &tp->tun_if;
776
777 TUNDEBUG ("%s: read\n", ifp->if_xname);
778 if ((tp->tun_flags & TUN_READY) != TUN_READY) {
779 TUNDEBUG ("%s: not ready 0%o\n", ifp->if_xname, tp->tun_flags);
780 error = EHOSTDOWN;
781 goto out;
782 }
783
784 tp->tun_flags &= ~TUN_RWAIT;
785
786 do {
787 IFQ_DEQUEUE(&ifp->if_snd, m0);
788 if (m0 == 0) {
789 if (tp->tun_flags & TUN_NBIO) {
790 error = EWOULDBLOCK;
791 goto out;
792 }
793 tp->tun_flags |= TUN_RWAIT;
794 if (cv_wait_sig(&tp->tun_cv, &tp->tun_lock)) {
795 error = EINTR;
796 goto out;
797 }
798 }
799 } while (m0 == 0);
800
801 mutex_exit(&tp->tun_lock);
802
803 /* Copy the mbuf chain */
804 while (m0 && uio->uio_resid > 0 && error == 0) {
805 len = min(uio->uio_resid, m0->m_len);
806 if (len != 0)
807 error = uiomove(mtod(m0, void *), len, uio);
808 m0 = m = m_free(m0);
809 }
810
811 if (m0) {
812 TUNDEBUG("Dropping mbuf\n");
813 m_freem(m0);
814 }
815 if (error)
816 ifp->if_ierrors++;
817
818 return error;
819
820 out:
821 mutex_exit(&tp->tun_lock);
822
823 return error;
824 }
825
826 /*
827 * the cdevsw write interface - an atomic write is a packet - or else!
828 */
829 int
830 tunwrite(dev_t dev, struct uio *uio, int ioflag)
831 {
832 struct tun_softc *tp;
833 struct ifnet *ifp;
834 struct mbuf *top, **mp, *m;
835 pktqueue_t *pktq;
836 struct sockaddr dst;
837 int error = 0, tlen, mlen;
838 uint32_t family;
839
840 tp = tun_find_unit(dev);
841 if (tp == NULL) {
842 /* Interface was "destroyed" already. */
843 return ENXIO;
844 }
845
846 /* Unlock until we've got the data */
847 mutex_exit(&tp->tun_lock);
848
849 ifp = &tp->tun_if;
850
851 TUNDEBUG("%s: tunwrite\n", ifp->if_xname);
852
853 if (tp->tun_flags & TUN_PREPADDR) {
854 if (uio->uio_resid < sizeof(dst)) {
855 error = EIO;
856 goto out0;
857 }
858 error = uiomove((void *)&dst, sizeof(dst), uio);
859 if (dst.sa_len > sizeof(dst)) {
860 /* Duh.. */
861 int n = dst.sa_len - sizeof(dst);
862 while (n--) {
863 char discard;
864 error = uiomove(&discard, 1, uio);
865 if (error) {
866 goto out0;
867 }
868 }
869 }
870 } else if (tp->tun_flags & TUN_IFHEAD) {
871 if (uio->uio_resid < sizeof(family)){
872 error = EIO;
873 goto out0;
874 }
875 error = uiomove((void *)&family, sizeof(family), uio);
876 dst.sa_family = ntohl(family);
877 } else {
878 #ifdef INET
879 dst.sa_family = AF_INET;
880 #endif
881 }
882
883 if (uio->uio_resid > TUNMTU) {
884 TUNDEBUG("%s: len=%lu!\n", ifp->if_xname,
885 (unsigned long)uio->uio_resid);
886 error = EIO;
887 goto out0;
888 }
889
890 switch (dst.sa_family) {
891 #ifdef INET
892 case AF_INET:
893 pktq = ip_pktq;
894 break;
895 #endif
896 #ifdef INET6
897 case AF_INET6:
898 pktq = ip6_pktq;
899 break;
900 #endif
901 default:
902 error = EAFNOSUPPORT;
903 goto out0;
904 }
905
906 tlen = uio->uio_resid;
907
908 /* get a header mbuf */
909 MGETHDR(m, M_DONTWAIT, MT_DATA);
910 if (m == NULL) {
911 return ENOBUFS;
912 }
913 mlen = MHLEN;
914
915 top = NULL;
916 mp = ⊤
917 while (error == 0 && uio->uio_resid > 0) {
918 m->m_len = min(mlen, uio->uio_resid);
919 error = uiomove(mtod(m, void *), m->m_len, uio);
920 *mp = m;
921 mp = &m->m_next;
922 if (error == 0 && uio->uio_resid > 0) {
923 MGET(m, M_DONTWAIT, MT_DATA);
924 if (m == NULL) {
925 error = ENOBUFS;
926 break;
927 }
928 mlen = MLEN;
929 }
930 }
931 if (error) {
932 if (top != NULL)
933 m_freem(top);
934 ifp->if_ierrors++;
935 goto out0;
936 }
937
938 top->m_pkthdr.len = tlen;
939 m_set_rcvif(top, ifp);
940
941 bpf_mtap_af(ifp, dst.sa_family, top);
942
943 mutex_enter(&tp->tun_lock);
944 if ((tp->tun_flags & TUN_INITED) == 0) {
945 /* Interface was destroyed */
946 error = ENXIO;
947 goto out;
948 }
949 if (__predict_false(!pktq_enqueue(pktq, top, 0))) {
950 ifp->if_collisions++;
951 mutex_exit(&tp->tun_lock);
952 error = ENOBUFS;
953 m_freem(top);
954 goto out0;
955 }
956 ifp->if_ipackets++;
957 ifp->if_ibytes += tlen;
958 out:
959 mutex_exit(&tp->tun_lock);
960 out0:
961 return error;
962 }
963
964 #ifdef ALTQ
965 /*
966 * Start packet transmission on the interface.
967 * when the interface queue is rate-limited by ALTQ or TBR,
968 * if_start is needed to drain packets from the queue in order
969 * to notify readers when outgoing packets become ready.
970 */
971 static void
972 tunstart(struct ifnet *ifp)
973 {
974 struct tun_softc *tp = ifp->if_softc;
975
976 if (!ALTQ_IS_ENABLED(&ifp->if_snd) && !TBR_IS_ENABLED(&ifp->if_snd))
977 return;
978
979 mutex_enter(&tp->tun_lock);
980 if (!IF_IS_EMPTY(&ifp->if_snd)) {
981 if (tp->tun_flags & TUN_RWAIT) {
982 tp->tun_flags &= ~TUN_RWAIT;
983 cv_broadcast(&tp->tun_cv);
984 }
985 if (tp->tun_flags & TUN_ASYNC && tp->tun_pgid)
986 softint_schedule(tp->tun_osih);
987
988 selnotify(&tp->tun_rsel, 0, 0);
989 }
990 mutex_exit(&tp->tun_lock);
991 }
992 #endif /* ALTQ */
993 /*
994 * tunpoll - the poll interface, this is only useful on reads
995 * really. The write detect always returns true, write never blocks
996 * anyway, it either accepts the packet or drops it.
997 */
998 int
999 tunpoll(dev_t dev, int events, struct lwp *l)
1000 {
1001 struct tun_softc *tp;
1002 struct ifnet *ifp;
1003 int revents = 0;
1004
1005 tp = tun_find_unit(dev);
1006 if (tp == NULL) {
1007 /* Interface was "destroyed" already. */
1008 return 0;
1009 }
1010 ifp = &tp->tun_if;
1011
1012 TUNDEBUG("%s: tunpoll\n", ifp->if_xname);
1013
1014 if (events & (POLLIN | POLLRDNORM)) {
1015 if (!IFQ_IS_EMPTY(&ifp->if_snd)) {
1016 TUNDEBUG("%s: tunpoll q=%d\n", ifp->if_xname,
1017 ifp->if_snd.ifq_len);
1018 revents |= events & (POLLIN | POLLRDNORM);
1019 } else {
1020 TUNDEBUG("%s: tunpoll waiting\n", ifp->if_xname);
1021 selrecord(l, &tp->tun_rsel);
1022 }
1023 }
1024
1025 if (events & (POLLOUT | POLLWRNORM))
1026 revents |= events & (POLLOUT | POLLWRNORM);
1027
1028 mutex_exit(&tp->tun_lock);
1029
1030 return revents;
1031 }
1032
1033 static void
1034 filt_tunrdetach(struct knote *kn)
1035 {
1036 struct tun_softc *tp = kn->kn_hook;
1037
1038 mutex_enter(&tp->tun_lock);
1039 SLIST_REMOVE(&tp->tun_rsel.sel_klist, kn, knote, kn_selnext);
1040 mutex_exit(&tp->tun_lock);
1041 }
1042
1043 static int
1044 filt_tunread(struct knote *kn, long hint)
1045 {
1046 struct tun_softc *tp = kn->kn_hook;
1047 struct ifnet *ifp = &tp->tun_if;
1048 struct mbuf *m;
1049
1050 mutex_enter(&tp->tun_lock);
1051 IF_POLL(&ifp->if_snd, m);
1052 if (m == NULL) {
1053 mutex_exit(&tp->tun_lock);
1054 return 0;
1055 }
1056
1057 for (kn->kn_data = 0; m != NULL; m = m->m_next)
1058 kn->kn_data += m->m_len;
1059
1060 mutex_exit(&tp->tun_lock);
1061
1062 return 1;
1063 }
1064
1065 static const struct filterops tunread_filtops =
1066 { 1, NULL, filt_tunrdetach, filt_tunread };
1067
1068 static const struct filterops tun_seltrue_filtops =
1069 { 1, NULL, filt_tunrdetach, filt_seltrue };
1070
1071 int
1072 tunkqfilter(dev_t dev, struct knote *kn)
1073 {
1074 struct tun_softc *tp;
1075 struct klist *klist;
1076 int rv = 0;
1077
1078 tp = tun_find_unit(dev);
1079 if (tp == NULL)
1080 goto out_nolock;
1081
1082 switch (kn->kn_filter) {
1083 case EVFILT_READ:
1084 klist = &tp->tun_rsel.sel_klist;
1085 kn->kn_fop = &tunread_filtops;
1086 break;
1087
1088 case EVFILT_WRITE:
1089 klist = &tp->tun_rsel.sel_klist;
1090 kn->kn_fop = &tun_seltrue_filtops;
1091 break;
1092
1093 default:
1094 rv = EINVAL;
1095 goto out;
1096 }
1097
1098 kn->kn_hook = tp;
1099
1100 SLIST_INSERT_HEAD(klist, kn, kn_selnext);
1101
1102 out:
1103 mutex_exit(&tp->tun_lock);
1104 out_nolock:
1105 return rv;
1106 }
1107
1108 /*
1109 * Module infrastructure
1110 */
1111 #include "if_module.h"
1112
1113 IF_MODULE(MODULE_CLASS_DRIVER, tun, "")
1114