if_tun.c revision 1.164 1 /* $NetBSD: if_tun.c,v 1.164 2021/09/26 15:58:33 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 1988, Julian Onions <jpo (at) cs.nott.ac.uk>
5 * Nottingham University 1987.
6 *
7 * This source may be freely distributed, however I would be interested
8 * in any changes that are made.
9 *
10 * This driver takes packets off the IP i/f and hands them up to a
11 * user process to have its wicked way with. This driver has its
12 * roots in a similar driver written by Phil Cockcroft (formerly) at
13 * UCL. This driver is based much more on read/write/poll mode of
14 * operation though.
15 */
16
17 /*
18 * tun - tunnel software network interface.
19 */
20
21 #include <sys/cdefs.h>
22 __KERNEL_RCSID(0, "$NetBSD: if_tun.c,v 1.164 2021/09/26 15:58:33 thorpej Exp $");
23
24 #ifdef _KERNEL_OPT
25 #include "opt_inet.h"
26 #endif
27
28 #include <sys/param.h>
29
30 #include <sys/buf.h>
31 #include <sys/conf.h>
32 #include <sys/cpu.h>
33 #include <sys/device.h>
34 #include <sys/file.h>
35 #include <sys/ioctl.h>
36 #include <sys/kauth.h>
37 #include <sys/kmem.h>
38 #include <sys/lwp.h>
39 #include <sys/mbuf.h>
40 #include <sys/module.h>
41 #include <sys/mutex.h>
42 #include <sys/poll.h>
43 #include <sys/select.h>
44 #include <sys/signalvar.h>
45 #include <sys/socket.h>
46
47 #include <net/bpf.h>
48 #include <net/if.h>
49 #include <net/if_types.h>
50 #include <net/route.h>
51
52 #ifdef INET
53 #include <netinet/in.h>
54 #include <netinet/in_systm.h>
55 #include <netinet/in_var.h>
56 #include <netinet/ip.h>
57 #include <netinet/if_inarp.h>
58 #endif
59
60 #include <net/if_tun.h>
61
62 #include "ioconf.h"
63
64 #define TUNDEBUG if (tundebug) printf
65 int tundebug = 0;
66
67 extern int ifqmaxlen;
68
69 static LIST_HEAD(, tun_softc) tun_softc_list;
70 static LIST_HEAD(, tun_softc) tunz_softc_list;
71 static kmutex_t tun_softc_lock;
72
73 static int tun_ioctl(struct ifnet *, u_long, void *);
74 static int tun_output(struct ifnet *, struct mbuf *,
75 const struct sockaddr *, const struct rtentry *rt);
76 static int tun_clone_create(struct if_clone *, int);
77 static int tun_clone_destroy(struct ifnet *);
78
79 static struct if_clone tun_cloner =
80 IF_CLONE_INITIALIZER("tun", tun_clone_create, tun_clone_destroy);
81
82 static void tunattach0(struct tun_softc *);
83 static void tun_enable(struct tun_softc *, const struct ifaddr *);
84 static void tun_i_softintr(void *);
85 static void tun_o_softintr(void *);
86 #ifdef ALTQ
87 static void tunstart(struct ifnet *);
88 #endif
89 static struct tun_softc *tun_find_unit(dev_t);
90 static struct tun_softc *tun_find_zunit(int);
91
92 static dev_type_open(tunopen);
93 static dev_type_close(tunclose);
94 static dev_type_read(tunread);
95 static dev_type_write(tunwrite);
96 static dev_type_ioctl(tunioctl);
97 static dev_type_poll(tunpoll);
98 static dev_type_kqfilter(tunkqfilter);
99
100 const struct cdevsw tun_cdevsw = {
101 .d_open = tunopen,
102 .d_close = tunclose,
103 .d_read = tunread,
104 .d_write = tunwrite,
105 .d_ioctl = tunioctl,
106 .d_stop = nostop,
107 .d_tty = notty,
108 .d_poll = tunpoll,
109 .d_mmap = nommap,
110 .d_kqfilter = tunkqfilter,
111 .d_discard = nodiscard,
112 .d_flag = D_OTHER | D_MPSAFE
113 };
114
115 #ifdef _MODULE
116 devmajor_t tun_bmajor = -1, tun_cmajor = -1;
117 #endif
118
119 void
120 tunattach(int unused)
121 {
122
123 /*
124 * Nothing to do here, initialization is handled by the
125 * module initialization code in tuninit() below).
126 */
127 }
128
129 static void
130 tuninit(void)
131 {
132
133 mutex_init(&tun_softc_lock, MUTEX_DEFAULT, IPL_NET);
134 LIST_INIT(&tun_softc_list);
135 LIST_INIT(&tunz_softc_list);
136 if_clone_attach(&tun_cloner);
137 #ifdef _MODULE
138 devsw_attach("tun", NULL, &tun_bmajor, &tun_cdevsw, &tun_cmajor);
139 #endif
140 }
141
142 static int
143 tundetach(void)
144 {
145 #ifdef _MODULE
146 int error;
147 #endif
148
149 if_clone_detach(&tun_cloner);
150 #ifdef _MODULE
151 error = devsw_detach(NULL, &tun_cdevsw);
152 if (error != 0) {
153 if_clone_attach(&tun_cloner);
154 return error;
155 }
156 #endif
157
158 if (!LIST_EMPTY(&tun_softc_list) || !LIST_EMPTY(&tunz_softc_list)) {
159 #ifdef _MODULE
160 devsw_attach("tun", NULL, &tun_bmajor, &tun_cdevsw, &tun_cmajor);
161 #endif
162 if_clone_attach(&tun_cloner);
163 return EBUSY;
164 }
165
166 mutex_destroy(&tun_softc_lock);
167
168 return 0;
169 }
170
171 /*
172 * Find driver instance from dev_t.
173 * Returns with tp locked (if found).
174 */
175 static struct tun_softc *
176 tun_find_unit(dev_t dev)
177 {
178 struct tun_softc *tp;
179 int unit = minor(dev);
180
181 mutex_enter(&tun_softc_lock);
182 LIST_FOREACH(tp, &tun_softc_list, tun_list)
183 if (unit == tp->tun_unit)
184 break;
185 if (tp)
186 mutex_enter(&tp->tun_lock);
187 mutex_exit(&tun_softc_lock);
188
189 return tp;
190 }
191
192 /*
193 * Find zombie driver instance by unit number.
194 * Remove tp from list and return it unlocked (if found).
195 */
196 static struct tun_softc *
197 tun_find_zunit(int unit)
198 {
199 struct tun_softc *tp;
200
201 mutex_enter(&tun_softc_lock);
202 LIST_FOREACH(tp, &tunz_softc_list, tun_list)
203 if (unit == tp->tun_unit)
204 break;
205 if (tp)
206 LIST_REMOVE(tp, tun_list);
207 mutex_exit(&tun_softc_lock);
208 KASSERTMSG(!tp || (tp->tun_flags & (TUN_INITED|TUN_OPEN)) == TUN_OPEN,
209 "tun%d: inconsistent flags: %x", unit, tp->tun_flags);
210
211 return tp;
212 }
213
214 static int
215 tun_clone_create(struct if_clone *ifc, int unit)
216 {
217 struct tun_softc *tp;
218
219 if ((tp = tun_find_zunit(unit)) == NULL) {
220 tp = kmem_zalloc(sizeof(*tp), KM_SLEEP);
221
222 tp->tun_unit = unit;
223 mutex_init(&tp->tun_lock, MUTEX_DEFAULT, IPL_NET);
224 cv_init(&tp->tun_cv, "tunread");
225 selinit(&tp->tun_rsel);
226 selinit(&tp->tun_wsel);
227 } else {
228 /* Revive tunnel instance; clear ifp part */
229 (void)memset(&tp->tun_if, 0, sizeof(struct ifnet));
230 }
231
232 if_initname(&tp->tun_if, ifc->ifc_name, unit);
233 tunattach0(tp);
234 tp->tun_flags |= TUN_INITED;
235 tp->tun_osih = softint_establish(SOFTINT_CLOCK, tun_o_softintr, tp);
236 tp->tun_isih = softint_establish(SOFTINT_CLOCK, tun_i_softintr, tp);
237
238 mutex_enter(&tun_softc_lock);
239 LIST_INSERT_HEAD(&tun_softc_list, tp, tun_list);
240 mutex_exit(&tun_softc_lock);
241
242 return 0;
243 }
244
245 static void
246 tunattach0(struct tun_softc *tp)
247 {
248 struct ifnet *ifp;
249
250 ifp = &tp->tun_if;
251 ifp->if_softc = tp;
252 ifp->if_mtu = TUNMTU;
253 ifp->if_ioctl = tun_ioctl;
254 ifp->if_output = tun_output;
255 #ifdef ALTQ
256 ifp->if_start = tunstart;
257 #endif
258 ifp->if_flags = IFF_POINTOPOINT;
259 ifp->if_type = IFT_TUNNEL;
260 ifp->if_snd.ifq_maxlen = ifqmaxlen;
261 ifp->if_dlt = DLT_NULL;
262 IFQ_SET_READY(&ifp->if_snd);
263 if_attach(ifp);
264 ifp->if_link_state = LINK_STATE_DOWN;
265 if_alloc_sadl(ifp);
266 bpf_attach(ifp, DLT_NULL, sizeof(uint32_t));
267 }
268
269 static int
270 tun_clone_destroy(struct ifnet *ifp)
271 {
272 struct tun_softc *tp = (void *)ifp;
273 bool zombie = false;
274
275 IF_PURGE(&ifp->if_snd);
276 ifp->if_flags &= ~IFF_RUNNING;
277
278 mutex_enter(&tun_softc_lock);
279 mutex_enter(&tp->tun_lock);
280 LIST_REMOVE(tp, tun_list);
281 if (tp->tun_flags & TUN_OPEN) {
282 /* Hang on to storage until last close. */
283 tp->tun_flags &= ~TUN_INITED;
284 LIST_INSERT_HEAD(&tunz_softc_list, tp, tun_list);
285 zombie = true;
286 }
287 mutex_exit(&tun_softc_lock);
288
289 if (tp->tun_flags & TUN_RWAIT) {
290 tp->tun_flags &= ~TUN_RWAIT;
291 cv_broadcast(&tp->tun_cv);
292 }
293 selnotify(&tp->tun_rsel, 0, NOTE_SUBMIT);
294
295 mutex_exit(&tp->tun_lock);
296
297 if (tp->tun_flags & TUN_ASYNC && tp->tun_pgid)
298 fownsignal(tp->tun_pgid, SIGIO, POLL_HUP, 0, NULL);
299
300 bpf_detach(ifp);
301 if_detach(ifp);
302
303 if (!zombie) {
304 seldestroy(&tp->tun_rsel);
305 seldestroy(&tp->tun_wsel);
306 softint_disestablish(tp->tun_osih);
307 softint_disestablish(tp->tun_isih);
308 mutex_destroy(&tp->tun_lock);
309 cv_destroy(&tp->tun_cv);
310 kmem_free(tp, sizeof(*tp));
311 }
312
313 return 0;
314 }
315
316 /*
317 * tunnel open - must be superuser & the device must be
318 * configured in
319 */
320 static int
321 tunopen(dev_t dev, int flag, int mode, struct lwp *l)
322 {
323 struct ifnet *ifp;
324 struct tun_softc *tp;
325 int error;
326
327 error = kauth_authorize_network(l->l_cred, KAUTH_NETWORK_INTERFACE_TUN,
328 KAUTH_REQ_NETWORK_INTERFACE_TUN_ADD, NULL, NULL, NULL);
329 if (error)
330 return error;
331
332 tp = tun_find_unit(dev);
333
334 if (tp == NULL) {
335 (void)tun_clone_create(&tun_cloner, minor(dev));
336 tp = tun_find_unit(dev);
337 if (tp == NULL) {
338 return ENXIO;
339 }
340 }
341
342 if (tp->tun_flags & TUN_OPEN) {
343 mutex_exit(&tp->tun_lock);
344 return EBUSY;
345 }
346
347 ifp = &tp->tun_if;
348 tp->tun_flags |= TUN_OPEN;
349 TUNDEBUG("%s: open\n", ifp->if_xname);
350 if_link_state_change(ifp, LINK_STATE_UP);
351
352 mutex_exit(&tp->tun_lock);
353
354 return error;
355 }
356
357 /*
358 * tunclose - close the device - mark i/f down & delete
359 * routing info
360 */
361 int
362 tunclose(dev_t dev, int flag, int mode,
363 struct lwp *l)
364 {
365 struct tun_softc *tp;
366 struct ifnet *ifp;
367
368 if ((tp = tun_find_zunit(minor(dev))) != NULL) {
369 /* interface was "destroyed" before the close */
370 seldestroy(&tp->tun_rsel);
371 seldestroy(&tp->tun_wsel);
372 softint_disestablish(tp->tun_osih);
373 softint_disestablish(tp->tun_isih);
374 mutex_destroy(&tp->tun_lock);
375 kmem_free(tp, sizeof(*tp));
376 return 0;
377 }
378
379 if ((tp = tun_find_unit(dev)) == NULL)
380 goto out_nolock;
381
382 ifp = &tp->tun_if;
383
384 tp->tun_flags &= ~TUN_OPEN;
385
386 tp->tun_pgid = 0;
387 selnotify(&tp->tun_rsel, 0, NOTE_SUBMIT);
388
389 TUNDEBUG ("%s: closed\n", ifp->if_xname);
390 mutex_exit(&tp->tun_lock);
391
392 /*
393 * junk all pending output
394 */
395 IFQ_PURGE(&ifp->if_snd);
396
397 if (ifp->if_flags & IFF_UP) {
398 if_down(ifp);
399 if (ifp->if_flags & IFF_RUNNING) {
400 /* find internet addresses and delete routes */
401 struct ifaddr *ifa;
402 IFADDR_READER_FOREACH(ifa, ifp) {
403 #if defined(INET) || defined(INET6)
404 if (ifa->ifa_addr->sa_family == AF_INET ||
405 ifa->ifa_addr->sa_family == AF_INET6) {
406 rtinit(ifa, (int)RTM_DELETE,
407 tp->tun_flags & TUN_DSTADDR
408 ? RTF_HOST
409 : 0);
410 }
411 #endif
412 }
413 }
414 }
415
416 if_link_state_change(ifp, LINK_STATE_DOWN);
417
418 out_nolock:
419 return 0;
420 }
421
422 static void
423 tun_enable(struct tun_softc *tp, const struct ifaddr *ifa)
424 {
425 struct ifnet *ifp = &tp->tun_if;
426
427 TUNDEBUG("%s: %s\n", __func__, ifp->if_xname);
428
429 mutex_enter(&tp->tun_lock);
430 tp->tun_flags &= ~(TUN_IASET|TUN_DSTADDR);
431
432 switch (ifa->ifa_addr->sa_family) {
433 #ifdef INET
434 case AF_INET: {
435 struct sockaddr_in *sin;
436
437 sin = satosin(ifa->ifa_addr);
438 if (sin && sin->sin_addr.s_addr)
439 tp->tun_flags |= TUN_IASET;
440
441 if (ifp->if_flags & IFF_POINTOPOINT) {
442 sin = satosin(ifa->ifa_dstaddr);
443 if (sin && sin->sin_addr.s_addr)
444 tp->tun_flags |= TUN_DSTADDR;
445 }
446 break;
447 }
448 #endif
449 #ifdef INET6
450 case AF_INET6: {
451 struct sockaddr_in6 *sin;
452
453 sin = satosin6(ifa->ifa_addr);
454 if (!IN6_IS_ADDR_UNSPECIFIED(&sin->sin6_addr))
455 tp->tun_flags |= TUN_IASET;
456
457 if (ifp->if_flags & IFF_POINTOPOINT) {
458 sin = satosin6(ifa->ifa_dstaddr);
459 if (sin && !IN6_IS_ADDR_UNSPECIFIED(&sin->sin6_addr))
460 tp->tun_flags |= TUN_DSTADDR;
461 } else
462 tp->tun_flags &= ~TUN_DSTADDR;
463 break;
464 }
465 #endif /* INET6 */
466 default:
467 break;
468 }
469 ifp->if_flags |= IFF_UP | IFF_RUNNING;
470 mutex_exit(&tp->tun_lock);
471 }
472
473 /*
474 * Process an ioctl request.
475 */
476 static int
477 tun_ioctl(struct ifnet *ifp, u_long cmd, void *data)
478 {
479 struct tun_softc *tp = (struct tun_softc *)(ifp->if_softc);
480 struct ifreq *ifr = (struct ifreq *)data;
481 struct ifaddr *ifa = (struct ifaddr *)data;
482 int error = 0;
483
484 switch (cmd) {
485 case SIOCINITIFADDR:
486 tun_enable(tp, ifa);
487 ifa->ifa_rtrequest = p2p_rtrequest;
488 TUNDEBUG("%s: address set\n", ifp->if_xname);
489 break;
490 case SIOCSIFBRDADDR:
491 TUNDEBUG("%s: broadcast address set\n", ifp->if_xname);
492 break;
493 case SIOCSIFMTU:
494 if (ifr->ifr_mtu > TUNMTU || ifr->ifr_mtu < 576) {
495 error = EINVAL;
496 break;
497 }
498 TUNDEBUG("%s: interface mtu set\n", ifp->if_xname);
499 if ((error = ifioctl_common(ifp, cmd, data)) == ENETRESET)
500 error = 0;
501 break;
502 case SIOCADDMULTI:
503 case SIOCDELMULTI:
504 if (ifr == NULL) {
505 error = EAFNOSUPPORT; /* XXX */
506 break;
507 }
508 switch (ifreq_getaddr(cmd, ifr)->sa_family) {
509 #ifdef INET
510 case AF_INET:
511 break;
512 #endif
513 #ifdef INET6
514 case AF_INET6:
515 break;
516 #endif
517 default:
518 error = EAFNOSUPPORT;
519 break;
520 }
521 break;
522 default:
523 error = ifioctl_common(ifp, cmd, data);
524 }
525
526 return error;
527 }
528
529 /*
530 * tun_output - queue packets from higher level ready to put out.
531 */
532 static int
533 tun_output(struct ifnet *ifp, struct mbuf *m0, const struct sockaddr *dst,
534 const struct rtentry *rt)
535 {
536 struct tun_softc *tp = ifp->if_softc;
537 int error;
538 #if defined(INET) || defined(INET6)
539 int mlen;
540 uint32_t *af;
541 #endif
542
543 mutex_enter(&tp->tun_lock);
544 TUNDEBUG ("%s: tun_output\n", ifp->if_xname);
545
546 if ((tp->tun_flags & TUN_READY) != TUN_READY) {
547 TUNDEBUG ("%s: not ready 0%o\n", ifp->if_xname,
548 tp->tun_flags);
549 error = EHOSTDOWN;
550 mutex_exit(&tp->tun_lock);
551 goto out;
552 }
553 // XXXrmind
554 mutex_exit(&tp->tun_lock);
555
556 /*
557 * if the queueing discipline needs packet classification,
558 * do it before prepending link headers.
559 */
560 IFQ_CLASSIFY(&ifp->if_snd, m0, dst->sa_family);
561
562 bpf_mtap_af(ifp, dst->sa_family, m0, BPF_D_OUT);
563
564 if ((error = pfil_run_hooks(ifp->if_pfil, &m0, ifp, PFIL_OUT)) != 0)
565 goto out;
566 if (m0 == NULL)
567 goto out;
568
569 switch(dst->sa_family) {
570 #ifdef INET6
571 case AF_INET6:
572 #endif
573 #ifdef INET
574 case AF_INET:
575 #endif
576 #if defined(INET) || defined(INET6)
577 if (tp->tun_flags & TUN_PREPADDR) {
578 /* Simple link-layer header */
579 M_PREPEND(m0, dst->sa_len, M_DONTWAIT);
580 if (m0 == NULL) {
581 IF_DROP(&ifp->if_snd);
582 error = ENOBUFS;
583 goto out;
584 }
585 memcpy(mtod(m0, char *), dst, dst->sa_len);
586 }
587
588 if (tp->tun_flags & TUN_IFHEAD) {
589 /* Prepend the address family */
590 M_PREPEND(m0, sizeof(*af), M_DONTWAIT);
591 if (m0 == NULL) {
592 IF_DROP(&ifp->if_snd);
593 error = ENOBUFS;
594 goto out;
595 }
596 af = mtod(m0,uint32_t *);
597 *af = htonl(dst->sa_family);
598 } else {
599 #ifdef INET
600 if (dst->sa_family != AF_INET)
601 #endif
602 {
603 error = EAFNOSUPPORT;
604 goto out;
605 }
606 }
607 /* FALLTHROUGH */
608 case AF_UNSPEC:
609 mlen = m0->m_pkthdr.len;
610 IFQ_ENQUEUE(&ifp->if_snd, m0, error);
611 if (error) {
612 if_statinc(ifp, if_collisions);
613 error = EAFNOSUPPORT;
614 m0 = NULL;
615 goto out;
616 }
617 if_statadd2(ifp, if_opackets, 1, if_obytes, mlen);
618 break;
619 #endif
620 default:
621 error = EAFNOSUPPORT;
622 goto out;
623 }
624
625 mutex_enter(&tp->tun_lock);
626 if (tp->tun_flags & TUN_RWAIT) {
627 tp->tun_flags &= ~TUN_RWAIT;
628 cv_broadcast(&tp->tun_cv);
629 }
630 if (tp->tun_flags & TUN_ASYNC && tp->tun_pgid)
631 softint_schedule(tp->tun_isih);
632
633 selnotify(&tp->tun_rsel, 0, NOTE_SUBMIT);
634
635 mutex_exit(&tp->tun_lock);
636 out:
637 if (error && m0)
638 m_freem(m0);
639
640 return error;
641 }
642
643 static void
644 tun_i_softintr(void *cookie)
645 {
646 struct tun_softc *tp = cookie;
647
648 if (tp->tun_flags & TUN_ASYNC && tp->tun_pgid)
649 fownsignal(tp->tun_pgid, SIGIO, POLL_IN, POLLIN|POLLRDNORM,
650 NULL);
651 }
652
653 static void
654 tun_o_softintr(void *cookie)
655 {
656 struct tun_softc *tp = cookie;
657
658 if (tp->tun_flags & TUN_ASYNC && tp->tun_pgid)
659 fownsignal(tp->tun_pgid, SIGIO, POLL_OUT, POLLOUT|POLLWRNORM,
660 NULL);
661 }
662
663 /*
664 * the cdevsw interface is now pretty minimal.
665 */
666 int
667 tunioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
668 {
669 struct tun_softc *tp;
670 int error = 0;
671
672 tp = tun_find_unit(dev);
673
674 /* interface was "destroyed" already */
675 if (tp == NULL) {
676 return ENXIO;
677 }
678
679 switch (cmd) {
680 case TUNSDEBUG:
681 tundebug = *(int *)data;
682 break;
683
684 case TUNGDEBUG:
685 *(int *)data = tundebug;
686 break;
687
688 case TUNSIFMODE:
689 switch (*(int *)data & (IFF_POINTOPOINT|IFF_BROADCAST)) {
690 case IFF_POINTOPOINT:
691 case IFF_BROADCAST:
692 if (tp->tun_if.if_flags & IFF_UP) {
693 error = EBUSY;
694 goto out;
695 }
696 tp->tun_if.if_flags &=
697 ~(IFF_BROADCAST|IFF_POINTOPOINT|IFF_MULTICAST);
698 tp->tun_if.if_flags |= *(int *)data;
699 break;
700 default:
701 error = EINVAL;
702 goto out;
703 }
704 break;
705
706 case TUNSLMODE:
707 if (*(int *)data) {
708 tp->tun_flags |= TUN_PREPADDR;
709 tp->tun_flags &= ~TUN_IFHEAD;
710 } else
711 tp->tun_flags &= ~TUN_PREPADDR;
712 break;
713
714 case TUNSIFHEAD:
715 if (*(int *)data) {
716 tp->tun_flags |= TUN_IFHEAD;
717 tp->tun_flags &= ~TUN_PREPADDR;
718 } else
719 tp->tun_flags &= ~TUN_IFHEAD;
720 break;
721
722 case TUNGIFHEAD:
723 *(int *)data = (tp->tun_flags & TUN_IFHEAD);
724 break;
725
726 case FIONBIO:
727 if (*(int *)data)
728 tp->tun_flags |= TUN_NBIO;
729 else
730 tp->tun_flags &= ~TUN_NBIO;
731 break;
732
733 case FIOASYNC:
734 if (*(int *)data)
735 tp->tun_flags |= TUN_ASYNC;
736 else
737 tp->tun_flags &= ~TUN_ASYNC;
738 break;
739
740 case FIONREAD:
741 if (tp->tun_if.if_snd.ifq_head)
742 *(int *)data = tp->tun_if.if_snd.ifq_head->m_pkthdr.len;
743 else
744 *(int *)data = 0;
745 break;
746
747 case TIOCSPGRP:
748 case FIOSETOWN:
749 error = fsetown(&tp->tun_pgid, cmd, data);
750 break;
751
752 case TIOCGPGRP:
753 case FIOGETOWN:
754 error = fgetown(tp->tun_pgid, cmd, data);
755 break;
756
757 default:
758 error = ENOTTY;
759 }
760
761 out:
762 mutex_exit(&tp->tun_lock);
763
764 return error;
765 }
766
767 /*
768 * The cdevsw read interface - reads a packet at a time, or at
769 * least as much of a packet as can be read.
770 */
771 int
772 tunread(dev_t dev, struct uio *uio, int ioflag)
773 {
774 struct tun_softc *tp;
775 struct ifnet *ifp;
776 struct mbuf *m, *m0;
777 int error = 0, len;
778
779 tp = tun_find_unit(dev);
780
781 /* interface was "destroyed" already */
782 if (tp == NULL) {
783 return ENXIO;
784 }
785
786 ifp = &tp->tun_if;
787
788 TUNDEBUG ("%s: read\n", ifp->if_xname);
789 if ((tp->tun_flags & TUN_READY) != TUN_READY) {
790 TUNDEBUG ("%s: not ready 0%o\n", ifp->if_xname, tp->tun_flags);
791 error = EHOSTDOWN;
792 goto out;
793 }
794
795 tp->tun_flags &= ~TUN_RWAIT;
796
797 do {
798 IFQ_DEQUEUE(&ifp->if_snd, m0);
799 if (m0 == 0) {
800 if (tp->tun_flags & TUN_NBIO) {
801 error = EWOULDBLOCK;
802 goto out;
803 }
804 tp->tun_flags |= TUN_RWAIT;
805 if (cv_wait_sig(&tp->tun_cv, &tp->tun_lock)) {
806 error = EINTR;
807 goto out;
808 }
809 }
810 } while (m0 == 0);
811
812 mutex_exit(&tp->tun_lock);
813
814 /* Copy the mbuf chain */
815 while (m0 && uio->uio_resid > 0 && error == 0) {
816 len = uimin(uio->uio_resid, m0->m_len);
817 if (len != 0)
818 error = uiomove(mtod(m0, void *), len, uio);
819 m0 = m = m_free(m0);
820 }
821
822 if (m0) {
823 TUNDEBUG("Dropping mbuf\n");
824 m_freem(m0);
825 }
826 if (error)
827 if_statinc(ifp, if_ierrors);
828
829 return error;
830
831 out:
832 mutex_exit(&tp->tun_lock);
833
834 return error;
835 }
836
837 /*
838 * the cdevsw write interface - an atomic write is a packet - or else!
839 */
840 int
841 tunwrite(dev_t dev, struct uio *uio, int ioflag)
842 {
843 struct tun_softc *tp;
844 struct ifnet *ifp;
845 struct mbuf *top, **mp, *m;
846 pktqueue_t *pktq;
847 struct sockaddr dst;
848 int error = 0, tlen, mlen;
849 uint32_t family;
850
851 tp = tun_find_unit(dev);
852 if (tp == NULL) {
853 /* Interface was "destroyed" already. */
854 return ENXIO;
855 }
856
857 /* Unlock until we've got the data */
858 mutex_exit(&tp->tun_lock);
859
860 ifp = &tp->tun_if;
861
862 TUNDEBUG("%s: tunwrite\n", ifp->if_xname);
863
864 if (tp->tun_flags & TUN_PREPADDR) {
865 if (uio->uio_resid < sizeof(dst)) {
866 error = EIO;
867 goto out0;
868 }
869 error = uiomove((void *)&dst, sizeof(dst), uio);
870 if (dst.sa_len > sizeof(dst)) {
871 /* Duh.. */
872 int n = dst.sa_len - sizeof(dst);
873 while (n--) {
874 char discard;
875 error = uiomove(&discard, 1, uio);
876 if (error) {
877 goto out0;
878 }
879 }
880 }
881 } else if (tp->tun_flags & TUN_IFHEAD) {
882 if (uio->uio_resid < sizeof(family)){
883 error = EIO;
884 goto out0;
885 }
886 error = uiomove((void *)&family, sizeof(family), uio);
887 dst.sa_family = ntohl(family);
888 } else {
889 #ifdef INET
890 dst.sa_family = AF_INET;
891 #endif
892 }
893
894 if (uio->uio_resid == 0 || uio->uio_resid > TUNMTU) {
895 TUNDEBUG("%s: len=%lu!\n", ifp->if_xname,
896 (unsigned long)uio->uio_resid);
897 error = EIO;
898 goto out0;
899 }
900
901 switch (dst.sa_family) {
902 #ifdef INET
903 case AF_INET:
904 pktq = ip_pktq;
905 break;
906 #endif
907 #ifdef INET6
908 case AF_INET6:
909 pktq = ip6_pktq;
910 break;
911 #endif
912 default:
913 error = EAFNOSUPPORT;
914 goto out0;
915 }
916
917 tlen = uio->uio_resid;
918
919 /* get a header mbuf */
920 MGETHDR(m, M_DONTWAIT, MT_DATA);
921 if (m == NULL) {
922 return ENOBUFS;
923 }
924 mlen = MHLEN;
925
926 top = NULL;
927 mp = ⊤
928 while (error == 0 && uio->uio_resid > 0) {
929 m->m_len = uimin(mlen, uio->uio_resid);
930 error = uiomove(mtod(m, void *), m->m_len, uio);
931 *mp = m;
932 mp = &m->m_next;
933 if (error == 0 && uio->uio_resid > 0) {
934 MGET(m, M_DONTWAIT, MT_DATA);
935 if (m == NULL) {
936 error = ENOBUFS;
937 break;
938 }
939 mlen = MLEN;
940 }
941 }
942 if (error) {
943 if (top != NULL)
944 m_freem(top);
945 if_statinc(ifp, if_ierrors);
946 goto out0;
947 }
948
949 top->m_pkthdr.len = tlen;
950 m_set_rcvif(top, ifp);
951
952 bpf_mtap_af(ifp, dst.sa_family, top, BPF_D_IN);
953
954 if ((error = pfil_run_hooks(ifp->if_pfil, &top, ifp, PFIL_IN)) != 0)
955 goto out0;
956 if (top == NULL)
957 goto out0;
958
959 mutex_enter(&tp->tun_lock);
960 if ((tp->tun_flags & TUN_INITED) == 0) {
961 /* Interface was destroyed */
962 error = ENXIO;
963 goto out;
964 }
965 if (__predict_false(!pktq_enqueue(pktq, top, 0))) {
966 if_statinc(ifp, if_collisions);
967 mutex_exit(&tp->tun_lock);
968 error = ENOBUFS;
969 m_freem(top);
970 goto out0;
971 }
972 if_statadd2(ifp, if_ipackets, 1, if_ibytes, tlen);
973 out:
974 mutex_exit(&tp->tun_lock);
975 out0:
976 return error;
977 }
978
979 #ifdef ALTQ
980 /*
981 * Start packet transmission on the interface.
982 * when the interface queue is rate-limited by ALTQ or TBR,
983 * if_start is needed to drain packets from the queue in order
984 * to notify readers when outgoing packets become ready.
985 */
986 static void
987 tunstart(struct ifnet *ifp)
988 {
989 struct tun_softc *tp = ifp->if_softc;
990
991 if (!ALTQ_IS_ENABLED(&ifp->if_snd) && !TBR_IS_ENABLED(&ifp->if_snd))
992 return;
993
994 mutex_enter(&tp->tun_lock);
995 if (!IF_IS_EMPTY(&ifp->if_snd)) {
996 if (tp->tun_flags & TUN_RWAIT) {
997 tp->tun_flags &= ~TUN_RWAIT;
998 cv_broadcast(&tp->tun_cv);
999 }
1000 if (tp->tun_flags & TUN_ASYNC && tp->tun_pgid)
1001 softint_schedule(tp->tun_osih);
1002
1003 selnotify(&tp->tun_rsel, 0, NOTE_SUBMIT);
1004 }
1005 mutex_exit(&tp->tun_lock);
1006 }
1007 #endif /* ALTQ */
1008 /*
1009 * tunpoll - the poll interface, this is only useful on reads
1010 * really. The write detect always returns true, write never blocks
1011 * anyway, it either accepts the packet or drops it.
1012 */
1013 int
1014 tunpoll(dev_t dev, int events, struct lwp *l)
1015 {
1016 struct tun_softc *tp;
1017 struct ifnet *ifp;
1018 int revents = 0;
1019
1020 tp = tun_find_unit(dev);
1021 if (tp == NULL) {
1022 /* Interface was "destroyed" already. */
1023 return 0;
1024 }
1025 ifp = &tp->tun_if;
1026
1027 TUNDEBUG("%s: tunpoll\n", ifp->if_xname);
1028
1029 if (events & (POLLIN | POLLRDNORM)) {
1030 if (!IFQ_IS_EMPTY(&ifp->if_snd)) {
1031 TUNDEBUG("%s: tunpoll q=%d\n", ifp->if_xname,
1032 ifp->if_snd.ifq_len);
1033 revents |= events & (POLLIN | POLLRDNORM);
1034 } else {
1035 TUNDEBUG("%s: tunpoll waiting\n", ifp->if_xname);
1036 selrecord(l, &tp->tun_rsel);
1037 }
1038 }
1039
1040 if (events & (POLLOUT | POLLWRNORM))
1041 revents |= events & (POLLOUT | POLLWRNORM);
1042
1043 mutex_exit(&tp->tun_lock);
1044
1045 return revents;
1046 }
1047
1048 static void
1049 filt_tunrdetach(struct knote *kn)
1050 {
1051 struct tun_softc *tp = kn->kn_hook;
1052
1053 mutex_enter(&tp->tun_lock);
1054 selremove_knote(&tp->tun_rsel, kn);
1055 mutex_exit(&tp->tun_lock);
1056 }
1057
1058 static int
1059 filt_tunread(struct knote *kn, long hint)
1060 {
1061 struct tun_softc *tp = kn->kn_hook;
1062 struct ifnet *ifp = &tp->tun_if;
1063 struct mbuf *m;
1064 int ready;
1065
1066 if (hint & NOTE_SUBMIT)
1067 KASSERT(mutex_owned(&tp->tun_lock));
1068 else
1069 mutex_enter(&tp->tun_lock);
1070
1071 IF_POLL(&ifp->if_snd, m);
1072 ready = (m != NULL);
1073 for (kn->kn_data = 0; m != NULL; m = m->m_next)
1074 kn->kn_data += m->m_len;
1075
1076 if (hint & NOTE_SUBMIT)
1077 KASSERT(mutex_owned(&tp->tun_lock));
1078 else
1079 mutex_exit(&tp->tun_lock);
1080
1081 return ready;
1082 }
1083
1084 static const struct filterops tunread_filtops = {
1085 .f_flags = FILTEROP_ISFD,
1086 .f_attach = NULL,
1087 .f_detach = filt_tunrdetach,
1088 .f_event = filt_tunread,
1089 };
1090
1091 int
1092 tunkqfilter(dev_t dev, struct knote *kn)
1093 {
1094 struct tun_softc *tp;
1095 int rv = 0;
1096
1097 tp = tun_find_unit(dev);
1098 if (tp == NULL)
1099 goto out_nolock;
1100
1101 switch (kn->kn_filter) {
1102 case EVFILT_READ:
1103 kn->kn_fop = &tunread_filtops;
1104 kn->kn_hook = tp;
1105 selrecord_knote(&tp->tun_rsel, kn);
1106 break;
1107
1108 case EVFILT_WRITE:
1109 kn->kn_fop = &seltrue_filtops;
1110 break;
1111
1112 default:
1113 rv = EINVAL;
1114 goto out;
1115 }
1116
1117 out:
1118 mutex_exit(&tp->tun_lock);
1119 out_nolock:
1120 return rv;
1121 }
1122
1123 /*
1124 * Module infrastructure
1125 */
1126 #include "if_module.h"
1127
1128 IF_MODULE(MODULE_CLASS_DRIVER, tun, NULL)
1129