if_tun.c revision 1.154 1 /* $NetBSD: if_tun.c,v 1.154 2019/03/25 09:07:24 pgoyette Exp $ */
2
3 /*
4 * Copyright (c) 1988, Julian Onions <jpo (at) cs.nott.ac.uk>
5 * Nottingham University 1987.
6 *
7 * This source may be freely distributed, however I would be interested
8 * in any changes that are made.
9 *
10 * This driver takes packets off the IP i/f and hands them up to a
11 * user process to have its wicked way with. This driver has its
12 * roots in a similar driver written by Phil Cockcroft (formerly) at
13 * UCL. This driver is based much more on read/write/poll mode of
14 * operation though.
15 */
16
17 /*
18 * tun - tunnel software network interface.
19 */
20
21 #include <sys/cdefs.h>
22 __KERNEL_RCSID(0, "$NetBSD: if_tun.c,v 1.154 2019/03/25 09:07:24 pgoyette Exp $");
23
24 #ifdef _KERNEL_OPT
25 #include "opt_inet.h"
26 #endif
27
28 #include <sys/param.h>
29
30 #include <sys/buf.h>
31 #include <sys/conf.h>
32 #include <sys/cpu.h>
33 #include <sys/device.h>
34 #include <sys/file.h>
35 #include <sys/ioctl.h>
36 #include <sys/kauth.h>
37 #include <sys/kmem.h>
38 #include <sys/lwp.h>
39 #include <sys/mbuf.h>
40 #include <sys/module.h>
41 #include <sys/mutex.h>
42 #include <sys/poll.h>
43 #include <sys/select.h>
44 #include <sys/signalvar.h>
45 #include <sys/socket.h>
46
47 #include <net/bpf.h>
48 #include <net/if.h>
49 #include <net/if_types.h>
50 #include <net/route.h>
51
52 #ifdef INET
53 #include <netinet/in.h>
54 #include <netinet/in_systm.h>
55 #include <netinet/in_var.h>
56 #include <netinet/ip.h>
57 #include <netinet/if_inarp.h>
58 #endif
59
60 #include <net/if_tun.h>
61
62 #include "ioconf.h"
63
64 #define TUNDEBUG if (tundebug) printf
65 int tundebug = 0;
66
67 extern int ifqmaxlen;
68
69 static LIST_HEAD(, tun_softc) tun_softc_list;
70 static LIST_HEAD(, tun_softc) tunz_softc_list;
71 static kmutex_t tun_softc_lock;
72
73 static int tun_ioctl(struct ifnet *, u_long, void *);
74 static int tun_output(struct ifnet *, struct mbuf *,
75 const struct sockaddr *, const struct rtentry *rt);
76 static int tun_clone_create(struct if_clone *, int);
77 static int tun_clone_destroy(struct ifnet *);
78
79 static struct if_clone tun_cloner =
80 IF_CLONE_INITIALIZER("tun", tun_clone_create, tun_clone_destroy);
81
82 static void tunattach0(struct tun_softc *);
83 static void tun_enable(struct tun_softc *, const struct ifaddr *);
84 static void tun_i_softintr(void *);
85 static void tun_o_softintr(void *);
86 #ifdef ALTQ
87 static void tunstart(struct ifnet *);
88 #endif
89 static struct tun_softc *tun_find_unit(dev_t);
90 static struct tun_softc *tun_find_zunit(int);
91
92 static dev_type_open(tunopen);
93 static dev_type_close(tunclose);
94 static dev_type_read(tunread);
95 static dev_type_write(tunwrite);
96 static dev_type_ioctl(tunioctl);
97 static dev_type_poll(tunpoll);
98 static dev_type_kqfilter(tunkqfilter);
99
100 const struct cdevsw tun_cdevsw = {
101 .d_open = tunopen,
102 .d_close = tunclose,
103 .d_read = tunread,
104 .d_write = tunwrite,
105 .d_ioctl = tunioctl,
106 .d_stop = nostop,
107 .d_tty = notty,
108 .d_poll = tunpoll,
109 .d_mmap = nommap,
110 .d_kqfilter = tunkqfilter,
111 .d_discard = nodiscard,
112 .d_flag = D_OTHER | D_MPSAFE
113 };
114
115 #ifdef _MODULE
116 devmajor_t tun_bmajor = -1, tun_cmajor = -1;
117 #endif
118
119 void
120 tunattach(int unused)
121 {
122
123 /*
124 * Nothing to do here, initialization is handled by the
125 * module initialization code in tuninit() below).
126 */
127 }
128
129 static void
130 tuninit(void)
131 {
132
133 mutex_init(&tun_softc_lock, MUTEX_DEFAULT, IPL_NET);
134 LIST_INIT(&tun_softc_list);
135 LIST_INIT(&tunz_softc_list);
136 if_clone_attach(&tun_cloner);
137 #ifdef _MODULE
138 devsw_attach("tun", NULL, &tun_bmajor, &tun_cdevsw, &tun_cmajor);
139 #endif
140 }
141
142 static int
143 tundetach(void)
144 {
145 int error = 0;
146
147 if_clone_detach(&tun_cloner);
148 #ifdef _MODULE
149 error = devsw_detach(NULL, &tun_cdevsw);
150 if (error != 0) {
151 if_clone_attach(&tun_cloner);
152 return error;
153 }
154 #endif
155
156 if (!LIST_EMPTY(&tun_softc_list) || !LIST_EMPTY(&tunz_softc_list)) {
157 #ifdef _MODULE
158 devsw_attach("tun", NULL, &tun_bmajor, &tun_cdevsw, &tun_cmajor);
159 #endif
160 if_clone_attach(&tun_cloner);
161 return EBUSY;
162 }
163
164 mutex_destroy(&tun_softc_lock);
165
166 return 0;
167 }
168
169 /*
170 * Find driver instance from dev_t.
171 * Returns with tp locked (if found).
172 */
173 static struct tun_softc *
174 tun_find_unit(dev_t dev)
175 {
176 struct tun_softc *tp;
177 int unit = minor(dev);
178
179 mutex_enter(&tun_softc_lock);
180 LIST_FOREACH(tp, &tun_softc_list, tun_list)
181 if (unit == tp->tun_unit)
182 break;
183 if (tp)
184 mutex_enter(&tp->tun_lock);
185 mutex_exit(&tun_softc_lock);
186
187 return tp;
188 }
189
190 /*
191 * Find zombie driver instance by unit number.
192 * Remove tp from list and return it unlocked (if found).
193 */
194 static struct tun_softc *
195 tun_find_zunit(int unit)
196 {
197 struct tun_softc *tp;
198
199 mutex_enter(&tun_softc_lock);
200 LIST_FOREACH(tp, &tunz_softc_list, tun_list)
201 if (unit == tp->tun_unit)
202 break;
203 if (tp)
204 LIST_REMOVE(tp, tun_list);
205 mutex_exit(&tun_softc_lock);
206 KASSERTMSG(!tp || (tp->tun_flags & (TUN_INITED|TUN_OPEN)) == TUN_OPEN,
207 "tun%d: inconsistent flags: %x", unit, tp->tun_flags);
208
209 return tp;
210 }
211
212 static int
213 tun_clone_create(struct if_clone *ifc, int unit)
214 {
215 struct tun_softc *tp;
216
217 if ((tp = tun_find_zunit(unit)) == NULL) {
218 tp = kmem_zalloc(sizeof(*tp), KM_SLEEP);
219
220 tp->tun_unit = unit;
221 mutex_init(&tp->tun_lock, MUTEX_DEFAULT, IPL_NET);
222 cv_init(&tp->tun_cv, "tunread");
223 selinit(&tp->tun_rsel);
224 selinit(&tp->tun_wsel);
225 } else {
226 /* Revive tunnel instance; clear ifp part */
227 (void)memset(&tp->tun_if, 0, sizeof(struct ifnet));
228 }
229
230 if_initname(&tp->tun_if, ifc->ifc_name, unit);
231 tunattach0(tp);
232 tp->tun_flags |= TUN_INITED;
233 tp->tun_osih = softint_establish(SOFTINT_CLOCK, tun_o_softintr, tp);
234 tp->tun_isih = softint_establish(SOFTINT_CLOCK, tun_i_softintr, tp);
235
236 mutex_enter(&tun_softc_lock);
237 LIST_INSERT_HEAD(&tun_softc_list, tp, tun_list);
238 mutex_exit(&tun_softc_lock);
239
240 return 0;
241 }
242
243 static void
244 tunattach0(struct tun_softc *tp)
245 {
246 struct ifnet *ifp;
247
248 ifp = &tp->tun_if;
249 ifp->if_softc = tp;
250 ifp->if_mtu = TUNMTU;
251 ifp->if_ioctl = tun_ioctl;
252 ifp->if_output = tun_output;
253 #ifdef ALTQ
254 ifp->if_start = tunstart;
255 #endif
256 ifp->if_flags = IFF_POINTOPOINT;
257 ifp->if_extflags = IFEF_NO_LINK_STATE_CHANGE;
258 ifp->if_type = IFT_TUNNEL;
259 ifp->if_snd.ifq_maxlen = ifqmaxlen;
260 ifp->if_collisions = 0;
261 ifp->if_ierrors = 0;
262 ifp->if_oerrors = 0;
263 ifp->if_ipackets = 0;
264 ifp->if_opackets = 0;
265 ifp->if_ibytes = 0;
266 ifp->if_obytes = 0;
267 ifp->if_dlt = DLT_NULL;
268 IFQ_SET_READY(&ifp->if_snd);
269 if_attach(ifp);
270 if_alloc_sadl(ifp);
271 bpf_attach(ifp, DLT_NULL, sizeof(uint32_t));
272 }
273
274 static int
275 tun_clone_destroy(struct ifnet *ifp)
276 {
277 struct tun_softc *tp = (void *)ifp;
278 bool zombie = false;
279
280 IF_PURGE(&ifp->if_snd);
281 ifp->if_flags &= ~IFF_RUNNING;
282
283 mutex_enter(&tun_softc_lock);
284 mutex_enter(&tp->tun_lock);
285 LIST_REMOVE(tp, tun_list);
286 if (tp->tun_flags & TUN_OPEN) {
287 /* Hang on to storage until last close. */
288 tp->tun_flags &= ~TUN_INITED;
289 LIST_INSERT_HEAD(&tunz_softc_list, tp, tun_list);
290 zombie = true;
291 }
292 mutex_exit(&tun_softc_lock);
293
294 if (tp->tun_flags & TUN_RWAIT) {
295 tp->tun_flags &= ~TUN_RWAIT;
296 cv_broadcast(&tp->tun_cv);
297 }
298 selnotify(&tp->tun_rsel, 0, NOTE_SUBMIT);
299
300 mutex_exit(&tp->tun_lock);
301
302 if (tp->tun_flags & TUN_ASYNC && tp->tun_pgid)
303 fownsignal(tp->tun_pgid, SIGIO, POLL_HUP, 0, NULL);
304
305 bpf_detach(ifp);
306 if_detach(ifp);
307
308 if (!zombie) {
309 seldestroy(&tp->tun_rsel);
310 seldestroy(&tp->tun_wsel);
311 softint_disestablish(tp->tun_osih);
312 softint_disestablish(tp->tun_isih);
313 mutex_destroy(&tp->tun_lock);
314 cv_destroy(&tp->tun_cv);
315 kmem_free(tp, sizeof(*tp));
316 }
317
318 return 0;
319 }
320
321 /*
322 * tunnel open - must be superuser & the device must be
323 * configured in
324 */
325 static int
326 tunopen(dev_t dev, int flag, int mode, struct lwp *l)
327 {
328 struct ifnet *ifp;
329 struct tun_softc *tp;
330 int error;
331
332 error = kauth_authorize_network(l->l_cred, KAUTH_NETWORK_INTERFACE_TUN,
333 KAUTH_REQ_NETWORK_INTERFACE_TUN_ADD, NULL, NULL, NULL);
334 if (error)
335 return error;
336
337 tp = tun_find_unit(dev);
338
339 if (tp == NULL) {
340 (void)tun_clone_create(&tun_cloner, minor(dev));
341 tp = tun_find_unit(dev);
342 if (tp == NULL) {
343 return ENXIO;
344 }
345 }
346
347 if (tp->tun_flags & TUN_OPEN) {
348 mutex_exit(&tp->tun_lock);
349 return EBUSY;
350 }
351
352 ifp = &tp->tun_if;
353 tp->tun_flags |= TUN_OPEN;
354 TUNDEBUG("%s: open\n", ifp->if_xname);
355
356 mutex_exit(&tp->tun_lock);
357
358 return error;
359 }
360
361 /*
362 * tunclose - close the device - mark i/f down & delete
363 * routing info
364 */
365 int
366 tunclose(dev_t dev, int flag, int mode,
367 struct lwp *l)
368 {
369 struct tun_softc *tp;
370 struct ifnet *ifp;
371
372 if ((tp = tun_find_zunit(minor(dev))) != NULL) {
373 /* interface was "destroyed" before the close */
374 seldestroy(&tp->tun_rsel);
375 seldestroy(&tp->tun_wsel);
376 softint_disestablish(tp->tun_osih);
377 softint_disestablish(tp->tun_isih);
378 mutex_destroy(&tp->tun_lock);
379 kmem_free(tp, sizeof(*tp));
380 return 0;
381 }
382
383 if ((tp = tun_find_unit(dev)) == NULL)
384 goto out_nolock;
385
386 ifp = &tp->tun_if;
387
388 tp->tun_flags &= ~TUN_OPEN;
389
390 tp->tun_pgid = 0;
391 selnotify(&tp->tun_rsel, 0, NOTE_SUBMIT);
392
393 TUNDEBUG ("%s: closed\n", ifp->if_xname);
394 mutex_exit(&tp->tun_lock);
395
396 /*
397 * junk all pending output
398 */
399 IFQ_PURGE(&ifp->if_snd);
400
401 if (ifp->if_flags & IFF_UP) {
402 if_down(ifp);
403 if (ifp->if_flags & IFF_RUNNING) {
404 /* find internet addresses and delete routes */
405 struct ifaddr *ifa;
406 IFADDR_READER_FOREACH(ifa, ifp) {
407 #if defined(INET) || defined(INET6)
408 if (ifa->ifa_addr->sa_family == AF_INET ||
409 ifa->ifa_addr->sa_family == AF_INET6) {
410 rtinit(ifa, (int)RTM_DELETE,
411 tp->tun_flags & TUN_DSTADDR
412 ? RTF_HOST
413 : 0);
414 }
415 #endif
416 }
417 }
418 }
419 out_nolock:
420 return 0;
421 }
422
423 static void
424 tun_enable(struct tun_softc *tp, const struct ifaddr *ifa)
425 {
426 struct ifnet *ifp = &tp->tun_if;
427
428 TUNDEBUG("%s: %s\n", __func__, ifp->if_xname);
429
430 mutex_enter(&tp->tun_lock);
431 tp->tun_flags &= ~(TUN_IASET|TUN_DSTADDR);
432
433 switch (ifa->ifa_addr->sa_family) {
434 #ifdef INET
435 case AF_INET: {
436 struct sockaddr_in *sin;
437
438 sin = satosin(ifa->ifa_addr);
439 if (sin && sin->sin_addr.s_addr)
440 tp->tun_flags |= TUN_IASET;
441
442 if (ifp->if_flags & IFF_POINTOPOINT) {
443 sin = satosin(ifa->ifa_dstaddr);
444 if (sin && sin->sin_addr.s_addr)
445 tp->tun_flags |= TUN_DSTADDR;
446 }
447 break;
448 }
449 #endif
450 #ifdef INET6
451 case AF_INET6: {
452 struct sockaddr_in6 *sin;
453
454 sin = satosin6(ifa->ifa_addr);
455 if (!IN6_IS_ADDR_UNSPECIFIED(&sin->sin6_addr))
456 tp->tun_flags |= TUN_IASET;
457
458 if (ifp->if_flags & IFF_POINTOPOINT) {
459 sin = satosin6(ifa->ifa_dstaddr);
460 if (sin && !IN6_IS_ADDR_UNSPECIFIED(&sin->sin6_addr))
461 tp->tun_flags |= TUN_DSTADDR;
462 } else
463 tp->tun_flags &= ~TUN_DSTADDR;
464 break;
465 }
466 #endif /* INET6 */
467 default:
468 break;
469 }
470 ifp->if_flags |= IFF_UP | IFF_RUNNING;
471 mutex_exit(&tp->tun_lock);
472 }
473
474 /*
475 * Process an ioctl request.
476 */
477 static int
478 tun_ioctl(struct ifnet *ifp, u_long cmd, void *data)
479 {
480 struct tun_softc *tp = (struct tun_softc *)(ifp->if_softc);
481 struct ifreq *ifr = (struct ifreq *)data;
482 struct ifaddr *ifa = (struct ifaddr *)data;
483 int error = 0;
484
485 switch (cmd) {
486 case SIOCINITIFADDR:
487 tun_enable(tp, ifa);
488 ifa->ifa_rtrequest = p2p_rtrequest;
489 TUNDEBUG("%s: address set\n", ifp->if_xname);
490 break;
491 case SIOCSIFBRDADDR:
492 TUNDEBUG("%s: broadcast address set\n", ifp->if_xname);
493 break;
494 case SIOCSIFMTU:
495 if (ifr->ifr_mtu > TUNMTU || ifr->ifr_mtu < 576) {
496 error = EINVAL;
497 break;
498 }
499 TUNDEBUG("%s: interface mtu set\n", ifp->if_xname);
500 if ((error = ifioctl_common(ifp, cmd, data)) == ENETRESET)
501 error = 0;
502 break;
503 case SIOCADDMULTI:
504 case SIOCDELMULTI:
505 if (ifr == NULL) {
506 error = EAFNOSUPPORT; /* XXX */
507 break;
508 }
509 switch (ifreq_getaddr(cmd, ifr)->sa_family) {
510 #ifdef INET
511 case AF_INET:
512 break;
513 #endif
514 #ifdef INET6
515 case AF_INET6:
516 break;
517 #endif
518 default:
519 error = EAFNOSUPPORT;
520 break;
521 }
522 break;
523 default:
524 error = ifioctl_common(ifp, cmd, data);
525 }
526
527 return error;
528 }
529
530 /*
531 * tun_output - queue packets from higher level ready to put out.
532 */
533 static int
534 tun_output(struct ifnet *ifp, struct mbuf *m0, const struct sockaddr *dst,
535 const struct rtentry *rt)
536 {
537 struct tun_softc *tp = ifp->if_softc;
538 int error;
539 #if defined(INET) || defined(INET6)
540 int mlen;
541 uint32_t *af;
542 #endif
543
544 mutex_enter(&tp->tun_lock);
545 TUNDEBUG ("%s: tun_output\n", ifp->if_xname);
546
547 if ((tp->tun_flags & TUN_READY) != TUN_READY) {
548 TUNDEBUG ("%s: not ready 0%o\n", ifp->if_xname,
549 tp->tun_flags);
550 error = EHOSTDOWN;
551 mutex_exit(&tp->tun_lock);
552 goto out;
553 }
554 // XXXrmind
555 mutex_exit(&tp->tun_lock);
556
557 /*
558 * if the queueing discipline needs packet classification,
559 * do it before prepending link headers.
560 */
561 IFQ_CLASSIFY(&ifp->if_snd, m0, dst->sa_family);
562
563 bpf_mtap_af(ifp, dst->sa_family, m0, BPF_D_OUT);
564
565 if ((error = pfil_run_hooks(ifp->if_pfil, &m0, ifp, PFIL_OUT)) != 0)
566 goto out;
567 if (m0 == NULL)
568 goto out;
569
570 switch(dst->sa_family) {
571 #ifdef INET6
572 case AF_INET6:
573 #endif
574 #ifdef INET
575 case AF_INET:
576 #endif
577 #if defined(INET) || defined(INET6)
578 if (tp->tun_flags & TUN_PREPADDR) {
579 /* Simple link-layer header */
580 M_PREPEND(m0, dst->sa_len, M_DONTWAIT);
581 if (m0 == NULL) {
582 IF_DROP(&ifp->if_snd);
583 error = ENOBUFS;
584 goto out;
585 }
586 memcpy(mtod(m0, char *), dst, dst->sa_len);
587 }
588
589 if (tp->tun_flags & TUN_IFHEAD) {
590 /* Prepend the address family */
591 M_PREPEND(m0, sizeof(*af), M_DONTWAIT);
592 if (m0 == NULL) {
593 IF_DROP(&ifp->if_snd);
594 error = ENOBUFS;
595 goto out;
596 }
597 af = mtod(m0,uint32_t *);
598 *af = htonl(dst->sa_family);
599 } else {
600 #ifdef INET
601 if (dst->sa_family != AF_INET)
602 #endif
603 {
604 error = EAFNOSUPPORT;
605 goto out;
606 }
607 }
608 /* FALLTHROUGH */
609 case AF_UNSPEC:
610 IFQ_ENQUEUE(&ifp->if_snd, m0, error);
611 if (error) {
612 ifp->if_collisions++;
613 error = EAFNOSUPPORT;
614 m0 = NULL;
615 goto out;
616 }
617 mlen = m0->m_pkthdr.len;
618 ifp->if_opackets++;
619 ifp->if_obytes += mlen;
620 break;
621 #endif
622 default:
623 error = EAFNOSUPPORT;
624 goto out;
625 }
626
627 mutex_enter(&tp->tun_lock);
628 if (tp->tun_flags & TUN_RWAIT) {
629 tp->tun_flags &= ~TUN_RWAIT;
630 cv_broadcast(&tp->tun_cv);
631 }
632 if (tp->tun_flags & TUN_ASYNC && tp->tun_pgid)
633 softint_schedule(tp->tun_isih);
634
635 selnotify(&tp->tun_rsel, 0, NOTE_SUBMIT);
636
637 mutex_exit(&tp->tun_lock);
638 out:
639 if (error && m0)
640 m_freem(m0);
641
642 return error;
643 }
644
645 static void
646 tun_i_softintr(void *cookie)
647 {
648 struct tun_softc *tp = cookie;
649
650 if (tp->tun_flags & TUN_ASYNC && tp->tun_pgid)
651 fownsignal(tp->tun_pgid, SIGIO, POLL_IN, POLLIN|POLLRDNORM,
652 NULL);
653 }
654
655 static void
656 tun_o_softintr(void *cookie)
657 {
658 struct tun_softc *tp = cookie;
659
660 if (tp->tun_flags & TUN_ASYNC && tp->tun_pgid)
661 fownsignal(tp->tun_pgid, SIGIO, POLL_OUT, POLLOUT|POLLWRNORM,
662 NULL);
663 }
664
665 /*
666 * the cdevsw interface is now pretty minimal.
667 */
668 int
669 tunioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
670 {
671 struct tun_softc *tp;
672 int error = 0;
673
674 tp = tun_find_unit(dev);
675
676 /* interface was "destroyed" already */
677 if (tp == NULL) {
678 return ENXIO;
679 }
680
681 switch (cmd) {
682 case TUNSDEBUG:
683 tundebug = *(int *)data;
684 break;
685
686 case TUNGDEBUG:
687 *(int *)data = tundebug;
688 break;
689
690 case TUNSIFMODE:
691 switch (*(int *)data & (IFF_POINTOPOINT|IFF_BROADCAST)) {
692 case IFF_POINTOPOINT:
693 case IFF_BROADCAST:
694 if (tp->tun_if.if_flags & IFF_UP) {
695 error = EBUSY;
696 goto out;
697 }
698 tp->tun_if.if_flags &=
699 ~(IFF_BROADCAST|IFF_POINTOPOINT|IFF_MULTICAST);
700 tp->tun_if.if_flags |= *(int *)data;
701 break;
702 default:
703 error = EINVAL;
704 goto out;
705 }
706 break;
707
708 case TUNSLMODE:
709 if (*(int *)data) {
710 tp->tun_flags |= TUN_PREPADDR;
711 tp->tun_flags &= ~TUN_IFHEAD;
712 } else
713 tp->tun_flags &= ~TUN_PREPADDR;
714 break;
715
716 case TUNSIFHEAD:
717 if (*(int *)data) {
718 tp->tun_flags |= TUN_IFHEAD;
719 tp->tun_flags &= ~TUN_PREPADDR;
720 } else
721 tp->tun_flags &= ~TUN_IFHEAD;
722 break;
723
724 case TUNGIFHEAD:
725 *(int *)data = (tp->tun_flags & TUN_IFHEAD);
726 break;
727
728 case FIONBIO:
729 if (*(int *)data)
730 tp->tun_flags |= TUN_NBIO;
731 else
732 tp->tun_flags &= ~TUN_NBIO;
733 break;
734
735 case FIOASYNC:
736 if (*(int *)data)
737 tp->tun_flags |= TUN_ASYNC;
738 else
739 tp->tun_flags &= ~TUN_ASYNC;
740 break;
741
742 case FIONREAD:
743 if (tp->tun_if.if_snd.ifq_head)
744 *(int *)data = tp->tun_if.if_snd.ifq_head->m_pkthdr.len;
745 else
746 *(int *)data = 0;
747 break;
748
749 case TIOCSPGRP:
750 case FIOSETOWN:
751 error = fsetown(&tp->tun_pgid, cmd, data);
752 break;
753
754 case TIOCGPGRP:
755 case FIOGETOWN:
756 error = fgetown(tp->tun_pgid, cmd, data);
757 break;
758
759 default:
760 error = ENOTTY;
761 }
762
763 out:
764 mutex_exit(&tp->tun_lock);
765
766 return error;
767 }
768
769 /*
770 * The cdevsw read interface - reads a packet at a time, or at
771 * least as much of a packet as can be read.
772 */
773 int
774 tunread(dev_t dev, struct uio *uio, int ioflag)
775 {
776 struct tun_softc *tp;
777 struct ifnet *ifp;
778 struct mbuf *m, *m0;
779 int error = 0, len;
780
781 tp = tun_find_unit(dev);
782
783 /* interface was "destroyed" already */
784 if (tp == NULL) {
785 return ENXIO;
786 }
787
788 ifp = &tp->tun_if;
789
790 TUNDEBUG ("%s: read\n", ifp->if_xname);
791 if ((tp->tun_flags & TUN_READY) != TUN_READY) {
792 TUNDEBUG ("%s: not ready 0%o\n", ifp->if_xname, tp->tun_flags);
793 error = EHOSTDOWN;
794 goto out;
795 }
796
797 tp->tun_flags &= ~TUN_RWAIT;
798
799 do {
800 IFQ_DEQUEUE(&ifp->if_snd, m0);
801 if (m0 == 0) {
802 if (tp->tun_flags & TUN_NBIO) {
803 error = EWOULDBLOCK;
804 goto out;
805 }
806 tp->tun_flags |= TUN_RWAIT;
807 if (cv_wait_sig(&tp->tun_cv, &tp->tun_lock)) {
808 error = EINTR;
809 goto out;
810 }
811 }
812 } while (m0 == 0);
813
814 mutex_exit(&tp->tun_lock);
815
816 /* Copy the mbuf chain */
817 while (m0 && uio->uio_resid > 0 && error == 0) {
818 len = uimin(uio->uio_resid, m0->m_len);
819 if (len != 0)
820 error = uiomove(mtod(m0, void *), len, uio);
821 m0 = m = m_free(m0);
822 }
823
824 if (m0) {
825 TUNDEBUG("Dropping mbuf\n");
826 m_freem(m0);
827 }
828 if (error)
829 ifp->if_ierrors++;
830
831 return error;
832
833 out:
834 mutex_exit(&tp->tun_lock);
835
836 return error;
837 }
838
839 /*
840 * the cdevsw write interface - an atomic write is a packet - or else!
841 */
842 int
843 tunwrite(dev_t dev, struct uio *uio, int ioflag)
844 {
845 struct tun_softc *tp;
846 struct ifnet *ifp;
847 struct mbuf *top, **mp, *m;
848 pktqueue_t *pktq;
849 struct sockaddr dst;
850 int error = 0, tlen, mlen;
851 uint32_t family;
852
853 tp = tun_find_unit(dev);
854 if (tp == NULL) {
855 /* Interface was "destroyed" already. */
856 return ENXIO;
857 }
858
859 /* Unlock until we've got the data */
860 mutex_exit(&tp->tun_lock);
861
862 ifp = &tp->tun_if;
863
864 TUNDEBUG("%s: tunwrite\n", ifp->if_xname);
865
866 if (tp->tun_flags & TUN_PREPADDR) {
867 if (uio->uio_resid < sizeof(dst)) {
868 error = EIO;
869 goto out0;
870 }
871 error = uiomove((void *)&dst, sizeof(dst), uio);
872 if (dst.sa_len > sizeof(dst)) {
873 /* Duh.. */
874 int n = dst.sa_len - sizeof(dst);
875 while (n--) {
876 char discard;
877 error = uiomove(&discard, 1, uio);
878 if (error) {
879 goto out0;
880 }
881 }
882 }
883 } else if (tp->tun_flags & TUN_IFHEAD) {
884 if (uio->uio_resid < sizeof(family)){
885 error = EIO;
886 goto out0;
887 }
888 error = uiomove((void *)&family, sizeof(family), uio);
889 dst.sa_family = ntohl(family);
890 } else {
891 #ifdef INET
892 dst.sa_family = AF_INET;
893 #endif
894 }
895
896 if (uio->uio_resid > TUNMTU) {
897 TUNDEBUG("%s: len=%lu!\n", ifp->if_xname,
898 (unsigned long)uio->uio_resid);
899 error = EIO;
900 goto out0;
901 }
902
903 switch (dst.sa_family) {
904 #ifdef INET
905 case AF_INET:
906 pktq = ip_pktq;
907 break;
908 #endif
909 #ifdef INET6
910 case AF_INET6:
911 pktq = ip6_pktq;
912 break;
913 #endif
914 default:
915 error = EAFNOSUPPORT;
916 goto out0;
917 }
918
919 tlen = uio->uio_resid;
920
921 /* get a header mbuf */
922 MGETHDR(m, M_DONTWAIT, MT_DATA);
923 if (m == NULL) {
924 return ENOBUFS;
925 }
926 mlen = MHLEN;
927
928 top = NULL;
929 mp = ⊤
930 while (error == 0 && uio->uio_resid > 0) {
931 m->m_len = uimin(mlen, uio->uio_resid);
932 error = uiomove(mtod(m, void *), m->m_len, uio);
933 *mp = m;
934 mp = &m->m_next;
935 if (error == 0 && uio->uio_resid > 0) {
936 MGET(m, M_DONTWAIT, MT_DATA);
937 if (m == NULL) {
938 error = ENOBUFS;
939 break;
940 }
941 mlen = MLEN;
942 }
943 }
944 if (error) {
945 if (top != NULL)
946 m_freem(top);
947 ifp->if_ierrors++;
948 goto out0;
949 }
950
951 top->m_pkthdr.len = tlen;
952 m_set_rcvif(top, ifp);
953
954 bpf_mtap_af(ifp, dst.sa_family, top, BPF_D_IN);
955
956 if ((error = pfil_run_hooks(ifp->if_pfil, &top, ifp, PFIL_IN)) != 0)
957 goto out0;
958 if (top == NULL)
959 goto out0;
960
961 mutex_enter(&tp->tun_lock);
962 if ((tp->tun_flags & TUN_INITED) == 0) {
963 /* Interface was destroyed */
964 error = ENXIO;
965 goto out;
966 }
967 if (__predict_false(!pktq_enqueue(pktq, top, 0))) {
968 ifp->if_collisions++;
969 mutex_exit(&tp->tun_lock);
970 error = ENOBUFS;
971 m_freem(top);
972 goto out0;
973 }
974 ifp->if_ipackets++;
975 ifp->if_ibytes += tlen;
976 out:
977 mutex_exit(&tp->tun_lock);
978 out0:
979 return error;
980 }
981
982 #ifdef ALTQ
983 /*
984 * Start packet transmission on the interface.
985 * when the interface queue is rate-limited by ALTQ or TBR,
986 * if_start is needed to drain packets from the queue in order
987 * to notify readers when outgoing packets become ready.
988 */
989 static void
990 tunstart(struct ifnet *ifp)
991 {
992 struct tun_softc *tp = ifp->if_softc;
993
994 if (!ALTQ_IS_ENABLED(&ifp->if_snd) && !TBR_IS_ENABLED(&ifp->if_snd))
995 return;
996
997 mutex_enter(&tp->tun_lock);
998 if (!IF_IS_EMPTY(&ifp->if_snd)) {
999 if (tp->tun_flags & TUN_RWAIT) {
1000 tp->tun_flags &= ~TUN_RWAIT;
1001 cv_broadcast(&tp->tun_cv);
1002 }
1003 if (tp->tun_flags & TUN_ASYNC && tp->tun_pgid)
1004 softint_schedule(tp->tun_osih);
1005
1006 selnotify(&tp->tun_rsel, 0, NOTE_SUBMIT);
1007 }
1008 mutex_exit(&tp->tun_lock);
1009 }
1010 #endif /* ALTQ */
1011 /*
1012 * tunpoll - the poll interface, this is only useful on reads
1013 * really. The write detect always returns true, write never blocks
1014 * anyway, it either accepts the packet or drops it.
1015 */
1016 int
1017 tunpoll(dev_t dev, int events, struct lwp *l)
1018 {
1019 struct tun_softc *tp;
1020 struct ifnet *ifp;
1021 int revents = 0;
1022
1023 tp = tun_find_unit(dev);
1024 if (tp == NULL) {
1025 /* Interface was "destroyed" already. */
1026 return 0;
1027 }
1028 ifp = &tp->tun_if;
1029
1030 TUNDEBUG("%s: tunpoll\n", ifp->if_xname);
1031
1032 if (events & (POLLIN | POLLRDNORM)) {
1033 if (!IFQ_IS_EMPTY(&ifp->if_snd)) {
1034 TUNDEBUG("%s: tunpoll q=%d\n", ifp->if_xname,
1035 ifp->if_snd.ifq_len);
1036 revents |= events & (POLLIN | POLLRDNORM);
1037 } else {
1038 TUNDEBUG("%s: tunpoll waiting\n", ifp->if_xname);
1039 selrecord(l, &tp->tun_rsel);
1040 }
1041 }
1042
1043 if (events & (POLLOUT | POLLWRNORM))
1044 revents |= events & (POLLOUT | POLLWRNORM);
1045
1046 mutex_exit(&tp->tun_lock);
1047
1048 return revents;
1049 }
1050
1051 static void
1052 filt_tunrdetach(struct knote *kn)
1053 {
1054 struct tun_softc *tp = kn->kn_hook;
1055
1056 mutex_enter(&tp->tun_lock);
1057 SLIST_REMOVE(&tp->tun_rsel.sel_klist, kn, knote, kn_selnext);
1058 mutex_exit(&tp->tun_lock);
1059 }
1060
1061 static int
1062 filt_tunread(struct knote *kn, long hint)
1063 {
1064 struct tun_softc *tp = kn->kn_hook;
1065 struct ifnet *ifp = &tp->tun_if;
1066 struct mbuf *m;
1067 int ready;
1068
1069 if (hint & NOTE_SUBMIT)
1070 KASSERT(mutex_owned(&tp->tun_lock));
1071 else
1072 mutex_enter(&tp->tun_lock);
1073
1074 IF_POLL(&ifp->if_snd, m);
1075 ready = (m != NULL);
1076 for (kn->kn_data = 0; m != NULL; m = m->m_next)
1077 kn->kn_data += m->m_len;
1078
1079 if (hint & NOTE_SUBMIT)
1080 KASSERT(mutex_owned(&tp->tun_lock));
1081 else
1082 mutex_exit(&tp->tun_lock);
1083
1084 return ready;
1085 }
1086
1087 static const struct filterops tunread_filtops = {
1088 .f_isfd = 1,
1089 .f_attach = NULL,
1090 .f_detach = filt_tunrdetach,
1091 .f_event = filt_tunread,
1092 };
1093
1094 static const struct filterops tun_seltrue_filtops = {
1095 .f_isfd = 1,
1096 .f_attach = NULL,
1097 .f_detach = filt_tunrdetach,
1098 .f_event = filt_seltrue,
1099 };
1100
1101 int
1102 tunkqfilter(dev_t dev, struct knote *kn)
1103 {
1104 struct tun_softc *tp;
1105 struct klist *klist;
1106 int rv = 0;
1107
1108 tp = tun_find_unit(dev);
1109 if (tp == NULL)
1110 goto out_nolock;
1111
1112 switch (kn->kn_filter) {
1113 case EVFILT_READ:
1114 klist = &tp->tun_rsel.sel_klist;
1115 kn->kn_fop = &tunread_filtops;
1116 break;
1117
1118 case EVFILT_WRITE:
1119 klist = &tp->tun_rsel.sel_klist;
1120 kn->kn_fop = &tun_seltrue_filtops;
1121 break;
1122
1123 default:
1124 rv = EINVAL;
1125 goto out;
1126 }
1127
1128 kn->kn_hook = tp;
1129
1130 SLIST_INSERT_HEAD(klist, kn, kn_selnext);
1131
1132 out:
1133 mutex_exit(&tp->tun_lock);
1134 out_nolock:
1135 return rv;
1136 }
1137
1138 /*
1139 * Module infrastructure
1140 */
1141 #include "if_module.h"
1142
1143 IF_MODULE(MODULE_CLASS_DRIVER, tun, "")
1144