if_tun.c revision 1.36.2.1 1 /* $NetBSD: if_tun.c,v 1.36.2.1 1998/12/11 04:53:06 kenh Exp $ */
2
3 /*
4 * Copyright (c) 1988, Julian Onions <jpo (at) cs.nott.ac.uk>
5 * Nottingham University 1987.
6 *
7 * This source may be freely distributed, however I would be interested
8 * in any changes that are made.
9 *
10 * This driver takes packets off the IP i/f and hands them up to a
11 * user process to have its wicked way with. This driver has its
12 * roots in a similar driver written by Phil Cockcroft (formerly) at
13 * UCL. This driver is based much more on read/write/poll mode of
14 * operation though.
15 */
16
17 #include "tun.h"
18 #if NTUN > 0
19
20 #include "opt_inet.h"
21 #include "opt_ns.h"
22
23 #include <sys/param.h>
24 #include <sys/proc.h>
25 #include <sys/systm.h>
26 #include <sys/mbuf.h>
27 #include <sys/buf.h>
28 #include <sys/protosw.h>
29 #include <sys/socket.h>
30 #include <sys/ioctl.h>
31 #include <sys/errno.h>
32 #include <sys/syslog.h>
33 #include <sys/select.h>
34 #include <sys/poll.h>
35 #include <sys/file.h>
36 #include <sys/signalvar.h>
37 #include <sys/conf.h>
38
39 #include <machine/cpu.h>
40
41 #include <net/if.h>
42 #include <net/if_ether.h>
43 #include <net/netisr.h>
44 #include <net/route.h>
45
46
47 #ifdef INET
48 #include <netinet/in.h>
49 #include <netinet/in_systm.h>
50 #include <netinet/in_var.h>
51 #include <netinet/ip.h>
52 #include <netinet/if_inarp.h>
53 #endif
54
55 #ifdef NS
56 #include <netns/ns.h>
57 #include <netns/ns_if.h>
58 #endif
59
60 #include "bpfilter.h"
61 #if NBPFILTER > 0
62 #include <sys/time.h>
63 #include <net/bpf.h>
64 #endif
65
66 #include <net/if_tun.h>
67
68 #define TUNDEBUG if (tundebug) printf
69 int tundebug = 0;
70
71 struct tun_softc tunctl[NTUN];
72 extern int ifqmaxlen;
73 void tunattach __P((int));
74
75 int tun_ioctl __P((struct ifnet *, u_long, caddr_t));
76 int tun_output __P((struct ifnet *, struct mbuf *, struct sockaddr *,
77 struct rtentry *rt));
78
79 static void tuninit __P((struct tun_softc *));
80
81 void
82 tunattach(unused)
83 int unused;
84 {
85 register int i;
86 struct ifnet *ifp;
87
88 for (i = 0; i < NTUN; i++) {
89 tunctl[i].tun_flags = TUN_INITED;
90
91 #ifdef _HAS_IF_ALLOC
92 ifp = if_alloc();
93 tunctl[i].tun_if = ifp;
94 #else
95 ifp = &tunctl[i].tun_if;
96 #endif
97 sprintf(ifp->if_xname, "tun%d", i);
98 ifp->if_softc = &tunctl[i];
99 ifp->if_mtu = TUNMTU;
100 ifp->if_ioctl = tun_ioctl;
101 ifp->if_output = tun_output;
102 ifp->if_flags = IFF_POINTOPOINT;
103 ifp->if_snd.ifq_maxlen = ifqmaxlen;
104 ifp->if_collisions = 0;
105 ifp->if_ierrors = 0;
106 ifp->if_oerrors = 0;
107 ifp->if_ipackets = 0;
108 ifp->if_opackets = 0;
109 if_attach(ifp);
110 #if NBPFILTER > 0
111 bpfattach(&tunctl[i].tun_bpf, ifp, DLT_NULL, sizeof(u_int32_t));
112 #endif
113 }
114 }
115
116 /*
117 * tunnel open - must be superuser & the device must be
118 * configured in
119 */
120 int
121 tunopen(dev, flag, mode, p)
122 dev_t dev;
123 int flag, mode;
124 struct proc *p;
125 {
126 struct ifnet *ifp;
127 struct tun_softc *tp;
128 register int unit, error;
129
130 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
131 return (error);
132
133 if ((unit = minor(dev)) >= NTUN)
134 return (ENXIO);
135 tp = &tunctl[unit];
136 if (tp->tun_flags & TUN_OPEN)
137 return ENXIO;
138 #ifdef _HAS_IF_ALLOC
139 ifp = tp->tun_if;
140 #else
141 ifp = &tp->tun_if;
142 #endif
143 tp->tun_flags |= TUN_OPEN;
144 TUNDEBUG("%s: open\n", ifp->if_xname);
145 return (0);
146 }
147
148 /*
149 * tunclose - close the device - mark i/f down & delete
150 * routing info
151 */
152 int
153 tunclose(dev, flag, mode, p)
154 dev_t dev;
155 int flag;
156 int mode;
157 struct proc *p;
158 {
159 register int unit = minor(dev), s;
160 struct tun_softc *tp = &tunctl[unit];
161 #ifdef _HAS_IF_ALLOC
162 struct ifnet *ifp = tp->tun_if;
163 #else
164 struct ifnet *ifp = &tp->tun_if;
165 #endif
166 struct mbuf *m;
167
168 tp->tun_flags &= ~TUN_OPEN;
169
170 /*
171 * junk all pending output
172 */
173 do {
174 s = splimp();
175 IF_DEQUEUE(&ifp->if_snd, m);
176 splx(s);
177 if (m)
178 m_freem(m);
179 } while (m);
180
181 if (ifp->if_flags & IFF_UP) {
182 s = splimp();
183 if_down(ifp);
184 if (ifp->if_flags & IFF_RUNNING) {
185 /* find internet addresses and delete routes */
186 register struct ifaddr *ifa;
187 for (ifa = ifp->if_addrlist.tqh_first; ifa != 0;
188 ifa = ifa->ifa_list.tqe_next) {
189 if (ifa->ifa_addr->sa_family == AF_INET) {
190 rtinit(ifa, (int)RTM_DELETE,
191 tp->tun_flags & TUN_DSTADDR
192 ? RTF_HOST
193 : 0);
194 }
195 }
196 }
197 splx(s);
198 }
199 tp->tun_pgrp = 0;
200 selwakeup(&tp->tun_rsel);
201
202 TUNDEBUG ("%s: closed\n", ifp->if_xname);
203 return (0);
204 }
205
206 static void
207 tuninit(tp)
208 struct tun_softc *tp;
209 {
210 #ifdef _HAS_IF_ALLOC
211 struct ifnet *ifp = tp->tun_if;
212 #else
213 struct ifnet *ifp = &tp->tun_if;
214 #endif
215 register struct ifaddr *ifa;
216
217 TUNDEBUG("%s: tuninit\n", ifp->if_xname);
218
219 ifp->if_flags |= IFF_UP | IFF_RUNNING;
220
221 tp->tun_flags &= ~(TUN_IASET|TUN_DSTADDR);
222 for (ifa = ifp->if_addrlist.tqh_first; ifa != 0;
223 ifa = ifa->ifa_list.tqe_next) {
224 if (ifa->ifa_addr->sa_family == AF_INET) {
225 struct sockaddr_in *sin;
226
227 sin = satosin(ifa->ifa_addr);
228 if (sin && sin->sin_addr.s_addr)
229 tp->tun_flags |= TUN_IASET;
230
231 if (ifp->if_flags & IFF_POINTOPOINT) {
232 sin = satosin(ifa->ifa_dstaddr);
233 if (sin && sin->sin_addr.s_addr)
234 tp->tun_flags |= TUN_DSTADDR;
235 }
236 }
237 }
238
239 return;
240 }
241
242 /*
243 * Process an ioctl request.
244 */
245 int
246 tun_ioctl(ifp, cmd, data)
247 struct ifnet *ifp;
248 u_long cmd;
249 caddr_t data;
250 {
251 int error = 0, s;
252
253 s = splimp();
254 switch(cmd) {
255 case SIOCSIFADDR:
256 tuninit((struct tun_softc *)(ifp->if_softc));
257 TUNDEBUG("%s: address set\n", ifp->if_xname);
258 break;
259 case SIOCSIFDSTADDR:
260 tuninit((struct tun_softc *)(ifp->if_softc));
261 TUNDEBUG("%s: destination address set\n", ifp->if_xname);
262 break;
263 case SIOCSIFBRDADDR:
264 TUNDEBUG("%s: broadcast address set\n", ifp->if_xname);
265 break;
266 case SIOCSIFMTU: {
267 struct ifreq *ifr = (struct ifreq *) data;
268 if (ifr->ifr_mtu > TUNMTU || ifr->ifr_mtu < 576) {
269 error = EINVAL;
270 break;
271 }
272 TUNDEBUG("%s: interface mtu set\n", ifp->if_xname);
273 ifp->if_mtu = ifr->ifr_mtu;
274 break;
275 }
276 case SIOCADDMULTI:
277 case SIOCDELMULTI: {
278 struct ifreq *ifr = (struct ifreq *) data;
279 if (ifr == 0) {
280 error = EAFNOSUPPORT; /* XXX */
281 break;
282 }
283 switch (ifr->ifr_addr.sa_family) {
284
285 #ifdef INET
286 case AF_INET:
287 break;
288 #endif
289
290 default:
291 error = EAFNOSUPPORT;
292 break;
293 }
294 break;
295 }
296 default:
297 error = EINVAL;
298 }
299 splx(s);
300 return (error);
301 }
302
303 /*
304 * tun_output - queue packets from higher level ready to put out.
305 */
306 int
307 tun_output(ifp, m0, dst, rt)
308 struct ifnet *ifp;
309 struct mbuf *m0;
310 struct sockaddr *dst;
311 struct rtentry *rt;
312 {
313 struct tun_softc *tp = ifp->if_softc;
314 struct proc *p;
315 int s;
316
317 TUNDEBUG ("%s: tun_output\n", ifp->if_xname);
318
319 if ((tp->tun_flags & TUN_READY) != TUN_READY) {
320 TUNDEBUG ("%s: not ready 0%o\n", ifp->if_xname,
321 tp->tun_flags);
322 m_freem (m0);
323 return (EHOSTDOWN);
324 }
325
326 #if NBPFILTER > 0
327 if (tp->tun_bpf) {
328 /*
329 * We need to prepend the address family as
330 * a four byte field. Cons up a dummy header
331 * to pacify bpf. This is safe because bpf
332 * will only read from the mbuf (i.e., it won't
333 * try to free it or keep a pointer to it).
334 */
335 struct mbuf m;
336 u_int32_t af = dst->sa_family;
337
338 m.m_next = m0;
339 m.m_len = sizeof(af);
340 m.m_data = (char *)⁡
341
342 bpf_mtap(tp->tun_bpf, &m);
343 }
344 #endif
345
346 switch(dst->sa_family) {
347 #ifdef INET
348 case AF_INET:
349 if (tp->tun_flags & TUN_PREPADDR) {
350 /* Simple link-layer header */
351 M_PREPEND(m0, dst->sa_len, M_DONTWAIT);
352 if (m0 == NULL) {
353 IF_DROP(&ifp->if_snd);
354 return (ENOBUFS);
355 }
356 bcopy(dst, mtod(m0, char *), dst->sa_len);
357 }
358 /* FALLTHROUGH */
359 case AF_UNSPEC:
360 s = splimp();
361 if (IF_QFULL(&ifp->if_snd)) {
362 IF_DROP(&ifp->if_snd);
363 m_freem(m0);
364 splx(s);
365 ifp->if_collisions++;
366 return (ENOBUFS);
367 }
368 IF_ENQUEUE(&ifp->if_snd, m0);
369 splx(s);
370 ifp->if_opackets++;
371 break;
372 #endif
373 default:
374 m_freem(m0);
375 return (EAFNOSUPPORT);
376 }
377
378 if (tp->tun_flags & TUN_RWAIT) {
379 tp->tun_flags &= ~TUN_RWAIT;
380 wakeup((caddr_t)tp);
381 }
382 if (tp->tun_flags & TUN_ASYNC && tp->tun_pgrp) {
383 if (tp->tun_pgrp > 0)
384 gsignal(tp->tun_pgrp, SIGIO);
385 else if ((p = pfind(-tp->tun_pgrp)) != NULL)
386 psignal(p, SIGIO);
387 }
388 selwakeup(&tp->tun_rsel);
389 return (0);
390 }
391
392 /*
393 * the cdevsw interface is now pretty minimal.
394 */
395 int
396 tunioctl(dev, cmd, data, flag, p)
397 dev_t dev;
398 u_long cmd;
399 caddr_t data;
400 int flag;
401 struct proc *p;
402 {
403 int unit = minor(dev), s;
404 struct tun_softc *tp = &tunctl[unit];
405 #ifdef _HAS_IF_ALLOC
406 struct ifnet *ifp = tp->tun_if;
407 #else
408 struct ifnet *ifp = &tp->tun_if;
409 #endif
410
411 switch (cmd) {
412 case TUNSDEBUG:
413 tundebug = *(int *)data;
414 break;
415
416 case TUNGDEBUG:
417 *(int *)data = tundebug;
418 break;
419
420 case TUNSIFMODE:
421 switch (*(int *)data & (IFF_POINTOPOINT|IFF_BROADCAST)) {
422 case IFF_POINTOPOINT:
423 case IFF_BROADCAST:
424 s = splimp();
425 if (ifp->if_flags & IFF_UP) {
426 splx(s);
427 return (EBUSY);
428 }
429 ifp->if_flags &=
430 ~(IFF_BROADCAST|IFF_POINTOPOINT|IFF_MULTICAST);
431 ifp->if_flags |= *(int *)data;
432 splx(s);
433 break;
434 default:
435 return (EINVAL);
436 break;
437 }
438 break;
439
440 case TUNSLMODE:
441 if (*(int *)data)
442 tp->tun_flags |= TUN_PREPADDR;
443 else
444 tp->tun_flags &= ~TUN_PREPADDR;
445 break;
446
447 case FIONBIO:
448 if (*(int *)data)
449 tp->tun_flags |= TUN_NBIO;
450 else
451 tp->tun_flags &= ~TUN_NBIO;
452 break;
453
454 case FIOASYNC:
455 if (*(int *)data)
456 tp->tun_flags |= TUN_ASYNC;
457 else
458 tp->tun_flags &= ~TUN_ASYNC;
459 break;
460
461 case FIONREAD:
462 s = splimp();
463 if (ifp->if_snd.ifq_head)
464 *(int *)data = ifp->if_snd.ifq_head->m_pkthdr.len;
465 else
466 *(int *)data = 0;
467 splx(s);
468 break;
469
470 case TIOCSPGRP:
471 tp->tun_pgrp = *(int *)data;
472 break;
473
474 case TIOCGPGRP:
475 *(int *)data = tp->tun_pgrp;
476 break;
477
478 default:
479 return (ENOTTY);
480 }
481 return (0);
482 }
483
484 /*
485 * The cdevsw read interface - reads a packet at a time, or at
486 * least as much of a packet as can be read.
487 */
488 int
489 tunread(dev, uio, ioflag)
490 dev_t dev;
491 struct uio *uio;
492 int ioflag;
493 {
494 int unit = minor(dev);
495 struct tun_softc *tp = &tunctl[unit];
496 #ifdef _HAS_IF_ALLOC
497 struct ifnet *ifp = tp->tun_if;
498 #else
499 struct ifnet *ifp = &tp->tun_if;
500 #endif
501 struct mbuf *m, *m0;
502 int error=0, len, s;
503
504 TUNDEBUG ("%s: read\n", ifp->if_xname);
505 if ((tp->tun_flags & TUN_READY) != TUN_READY) {
506 TUNDEBUG ("%s: not ready 0%o\n", ifp->if_xname, tp->tun_flags);
507 return EHOSTDOWN;
508 }
509
510 tp->tun_flags &= ~TUN_RWAIT;
511
512 s = splimp();
513 do {
514 IF_DEQUEUE(&ifp->if_snd, m0);
515 if (m0 == 0) {
516 if (tp->tun_flags & TUN_NBIO) {
517 splx(s);
518 return (EWOULDBLOCK);
519 }
520 tp->tun_flags |= TUN_RWAIT;
521 if (tsleep((caddr_t)tp, PZERO|PCATCH, "tunread", 0)) {
522 splx(s);
523 return (EINTR);
524 }
525 }
526 } while (m0 == 0);
527 splx(s);
528
529 while (m0 && uio->uio_resid > 0 && error == 0) {
530 len = min(uio->uio_resid, m0->m_len);
531 if (len == 0)
532 break;
533 error = uiomove(mtod(m0, caddr_t), len, uio);
534 MFREE(m0, m);
535 m0 = m;
536 }
537
538 if (m0) {
539 TUNDEBUG("Dropping mbuf\n");
540 m_freem(m0);
541 }
542 if (error)
543 ifp->if_ierrors++;
544 return (error);
545 }
546
547 /*
548 * the cdevsw write interface - an atomic write is a packet - or else!
549 */
550 int
551 tunwrite(dev, uio, ioflag)
552 dev_t dev;
553 struct uio *uio;
554 int ioflag;
555 {
556 int unit = minor (dev);
557 struct tun_softc *tp = &tunctl[unit];
558 #ifdef _HAS_IF_ALLOC
559 struct ifnet *ifp = tp->tun_if;
560 #else
561 struct ifnet *ifp = &tp->tun_if;
562 #endif
563 struct mbuf *top, **mp, *m;
564 struct ifqueue *ifq;
565 struct sockaddr dst;
566 int isr, error=0, s, tlen, mlen;
567
568 TUNDEBUG("%s: tunwrite\n", ifp->if_xname);
569
570 if (tp->tun_flags & TUN_PREPADDR) {
571 if (uio->uio_resid < sizeof(dst))
572 return (EIO);
573 error = uiomove((caddr_t)&dst, sizeof(dst), uio);
574 if (dst.sa_len > sizeof(dst)) {
575 /* Duh.. */
576 char discard;
577 int n = dst.sa_len - sizeof(dst);
578 while (n--)
579 if ((error = uiomove(&discard, 1, uio)) != 0)
580 return (error);
581 }
582 } else {
583 #ifdef INET
584 dst.sa_family = AF_INET;
585 #endif
586 }
587
588 if (uio->uio_resid < 0 || uio->uio_resid > TUNMTU) {
589 TUNDEBUG("%s: len=%d!\n", ifp->if_xname, uio->uio_resid);
590 return (EIO);
591 }
592
593 switch (dst.sa_family) {
594 #ifdef INET
595 case AF_INET:
596 ifq = &ipintrq;
597 isr = NETISR_IP;
598 break;
599 #endif
600 default:
601 return (EAFNOSUPPORT);
602 }
603
604 tlen = uio->uio_resid;
605
606 /* get a header mbuf */
607 MGETHDR(m, M_DONTWAIT, MT_DATA);
608 if (m == NULL)
609 return (ENOBUFS);
610 mlen = MHLEN;
611
612 top = 0;
613 mp = ⊤
614 while (error == 0 && uio->uio_resid > 0) {
615 m->m_len = min(mlen, uio->uio_resid);
616 error = uiomove(mtod (m, caddr_t), m->m_len, uio);
617 *mp = m;
618 mp = &m->m_next;
619 if (uio->uio_resid > 0) {
620 MGET (m, M_DONTWAIT, MT_DATA);
621 if (m == 0) {
622 error = ENOBUFS;
623 break;
624 }
625 mlen = MLEN;
626 }
627 }
628 if (error) {
629 if (top)
630 m_freem (top);
631 ifp->if_ierrors++;
632 return (error);
633 }
634
635 top->m_pkthdr.len = tlen;
636 top->m_pkthdr.rcvif = ifp;
637 if_addref(ifp);
638
639 #if NBPFILTER > 0
640 if (tp->tun_bpf) {
641 /*
642 * We need to prepend the address family as
643 * a four byte field. Cons up a dummy header
644 * to pacify bpf. This is safe because bpf
645 * will only read from the mbuf (i.e., it won't
646 * try to free it or keep a pointer to it).
647 */
648 struct mbuf m;
649 u_int32_t af = AF_INET;
650
651 m.m_next = top;
652 m.m_len = sizeof(af);
653 m.m_data = (char *)⁡
654
655 bpf_mtap(tp->tun_bpf, &m);
656 }
657 #endif
658
659 s = splimp();
660 if (IF_QFULL(ifq)) {
661 IF_DROP(ifq);
662 splx(s);
663 ifp->if_collisions++;
664 m_freem(top);
665 return (ENOBUFS);
666 }
667 IF_ENQUEUE(ifq, top);
668 splx(s);
669 ifp->if_ipackets++;
670 schednetisr(isr);
671 return (error);
672 }
673
674 /*
675 * tunpoll - the poll interface, this is only useful on reads
676 * really. The write detect always returns true, write never blocks
677 * anyway, it either accepts the packet or drops it.
678 */
679 int
680 tunpoll(dev, events, p)
681 dev_t dev;
682 int events;
683 struct proc *p;
684 {
685 int unit = minor(dev), s;
686 struct tun_softc *tp = &tunctl[unit];
687 #ifdef _HAS_IF_ALLOC
688 struct ifnet *ifp = tp->tun_if;
689 #else
690 struct ifnet *ifp = &tp->tun_if;
691 #endif
692 int revents = 0;
693
694 s = splimp();
695 TUNDEBUG("%s: tunpoll\n", ifp->if_xname);
696
697 if (events & (POLLIN | POLLRDNORM)) {
698 if (ifp->if_snd.ifq_len > 0) {
699 TUNDEBUG("%s: tunpoll q=%d\n", ifp->if_xname,
700 ifp->if_snd.ifq_len);
701 revents |= events & (POLLIN | POLLRDNORM);
702 } else {
703 TUNDEBUG("%s: tunpoll waiting\n", ifp->if_xname);
704 selrecord(p, &tp->tun_rsel);
705 }
706 }
707
708 if (events & (POLLOUT | POLLWRNORM))
709 revents |= events & (POLLOUT | POLLWRNORM);
710
711 splx(s);
712 return (revents);
713 }
714
715 #endif /* NTUN */
716