if_tap.c revision 1.38.6.1 1 /* $NetBSD: if_tap.c,v 1.38.6.1 2008/04/03 12:43:07 mjf Exp $ */
2
3 /*
4 * Copyright (c) 2003, 2004, 2008 The NetBSD Foundation.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of The NetBSD Foundation nor the names of its
16 * contributors may be used to endorse or promote products derived
17 * from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * tap(4) is a virtual Ethernet interface. It appears as a real Ethernet
34 * device to the system, but can also be accessed by userland through a
35 * character device interface, which allows reading and injecting frames.
36 */
37
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: if_tap.c,v 1.38.6.1 2008/04/03 12:43:07 mjf Exp $");
40
41 #if defined(_KERNEL_OPT)
42 #include "bpfilter.h"
43 #endif
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/malloc.h>
49 #include <sys/conf.h>
50 #include <sys/device.h>
51 #include <sys/file.h>
52 #include <sys/filedesc.h>
53 #include <sys/ksyms.h>
54 #include <sys/poll.h>
55 #include <sys/select.h>
56 #include <sys/sockio.h>
57 #include <sys/sysctl.h>
58 #include <sys/kauth.h>
59 #include <sys/mutex.h>
60 #include <sys/simplelock.h>
61
62 #include <net/if.h>
63 #include <net/if_dl.h>
64 #include <net/if_ether.h>
65 #include <net/if_media.h>
66 #include <net/if_tap.h>
67 #if NBPFILTER > 0
68 #include <net/bpf.h>
69 #endif
70
71 #include <compat/sys/sockio.h>
72
73 /*
74 * sysctl node management
75 *
76 * It's not really possible to use a SYSCTL_SETUP block with
77 * current LKM implementation, so it is easier to just define
78 * our own function.
79 *
80 * The handler function is a "helper" in Andrew Brown's sysctl
81 * framework terminology. It is used as a gateway for sysctl
82 * requests over the nodes.
83 *
84 * tap_log allows the module to log creations of nodes and
85 * destroy them all at once using sysctl_teardown.
86 */
87 static int tap_node;
88 static int tap_sysctl_handler(SYSCTLFN_PROTO);
89 SYSCTL_SETUP_PROTO(sysctl_tap_setup);
90
91 /*
92 * Since we're an Ethernet device, we need the 3 following
93 * components: a leading struct device, a struct ethercom,
94 * and also a struct ifmedia since we don't attach a PHY to
95 * ourselves. We could emulate one, but there's no real
96 * point.
97 */
98
99 struct tap_softc {
100 device_t sc_dev;
101 struct ifmedia sc_im;
102 struct ethercom sc_ec;
103 int sc_flags;
104 #define TAP_INUSE 0x00000001 /* tap device can only be opened once */
105 #define TAP_ASYNCIO 0x00000002 /* user is using async I/O (SIGIO) on the device */
106 #define TAP_NBIO 0x00000004 /* user wants calls to avoid blocking */
107 #define TAP_GOING 0x00000008 /* interface is being destroyed */
108 struct selinfo sc_rsel;
109 pid_t sc_pgid; /* For async. IO */
110 kmutex_t sc_rdlock;
111 struct simplelock sc_kqlock;
112 };
113
114 /* autoconf(9) glue */
115
116 void tapattach(int);
117
118 static int tap_match(device_t, cfdata_t, void *);
119 static void tap_attach(device_t, device_t, void *);
120 static int tap_detach(device_t, int);
121
122 CFATTACH_DECL_NEW(tap, sizeof(struct tap_softc),
123 tap_match, tap_attach, tap_detach, NULL);
124 extern struct cfdriver tap_cd;
125
126 /* Real device access routines */
127 static int tap_dev_close(struct tap_softc *);
128 static int tap_dev_read(int, struct uio *, int);
129 static int tap_dev_write(int, struct uio *, int);
130 static int tap_dev_ioctl(int, u_long, void *, struct lwp *);
131 static int tap_dev_poll(int, int, struct lwp *);
132 static int tap_dev_kqfilter(int, struct knote *);
133
134 /* Fileops access routines */
135 static int tap_fops_close(file_t *);
136 static int tap_fops_read(file_t *, off_t *, struct uio *,
137 kauth_cred_t, int);
138 static int tap_fops_write(file_t *, off_t *, struct uio *,
139 kauth_cred_t, int);
140 static int tap_fops_ioctl(file_t *, u_long, void *);
141 static int tap_fops_poll(file_t *, int);
142 static int tap_fops_kqfilter(file_t *, struct knote *);
143
144 static const struct fileops tap_fileops = {
145 tap_fops_read,
146 tap_fops_write,
147 tap_fops_ioctl,
148 fnullop_fcntl,
149 tap_fops_poll,
150 fbadop_stat,
151 tap_fops_close,
152 tap_fops_kqfilter,
153 };
154
155 /* Helper for cloning open() */
156 static int tap_dev_cloner(struct lwp *);
157
158 /* Character device routines */
159 static int tap_cdev_open(dev_t, int, int, struct lwp *);
160 static int tap_cdev_close(dev_t, int, int, struct lwp *);
161 static int tap_cdev_read(dev_t, struct uio *, int);
162 static int tap_cdev_write(dev_t, struct uio *, int);
163 static int tap_cdev_ioctl(dev_t, u_long, void *, int, struct lwp *);
164 static int tap_cdev_poll(dev_t, int, struct lwp *);
165 static int tap_cdev_kqfilter(dev_t, struct knote *);
166
167 const struct cdevsw tap_cdevsw = {
168 tap_cdev_open, tap_cdev_close,
169 tap_cdev_read, tap_cdev_write,
170 tap_cdev_ioctl, nostop, notty,
171 tap_cdev_poll, nommap,
172 tap_cdev_kqfilter,
173 D_OTHER,
174 };
175
176 #define TAP_CLONER 0xfffff /* Maximal minor value */
177
178 /* kqueue-related routines */
179 static void tap_kqdetach(struct knote *);
180 static int tap_kqread(struct knote *, long);
181
182 /*
183 * Those are needed by the if_media interface.
184 */
185
186 static int tap_mediachange(struct ifnet *);
187 static void tap_mediastatus(struct ifnet *, struct ifmediareq *);
188
189 /*
190 * Those are needed by the ifnet interface, and would typically be
191 * there for any network interface driver.
192 * Some other routines are optional: watchdog and drain.
193 */
194
195 static void tap_start(struct ifnet *);
196 static void tap_stop(struct ifnet *, int);
197 static int tap_init(struct ifnet *);
198 static int tap_ioctl(struct ifnet *, u_long, void *);
199
200 /* This is an internal function to keep tap_ioctl readable */
201 static int tap_lifaddr(struct ifnet *, u_long, struct ifaliasreq *);
202
203 /*
204 * tap is a clonable interface, although it is highly unrealistic for
205 * an Ethernet device.
206 *
207 * Here are the bits needed for a clonable interface.
208 */
209 static int tap_clone_create(struct if_clone *, int);
210 static int tap_clone_destroy(struct ifnet *);
211
212 struct if_clone tap_cloners = IF_CLONE_INITIALIZER("tap",
213 tap_clone_create,
214 tap_clone_destroy);
215
216 /* Helper functionis shared by the two cloning code paths */
217 static struct tap_softc * tap_clone_creator(int);
218 int tap_clone_destroyer(device_t);
219
220 void
221 tapattach(int n)
222 {
223 int error;
224
225 error = config_cfattach_attach(tap_cd.cd_name, &tap_ca);
226 if (error) {
227 aprint_error("%s: unable to register cfattach\n",
228 tap_cd.cd_name);
229 (void)config_cfdriver_detach(&tap_cd);
230 return;
231 }
232
233 if_clone_attach(&tap_cloners);
234 }
235
236 /* Pretty much useless for a pseudo-device */
237 static int
238 tap_match(device_t parent, cfdata_t cfdata, void *arg)
239 {
240
241 return (1);
242 }
243
244 void
245 tap_attach(device_t parent, device_t self, void *aux)
246 {
247 struct tap_softc *sc = device_private(self);
248 struct ifnet *ifp;
249 const struct sysctlnode *node;
250 uint8_t enaddr[ETHER_ADDR_LEN] =
251 { 0xf2, 0x0b, 0xa4, 0xff, 0xff, 0xff };
252 char enaddrstr[3 * ETHER_ADDR_LEN];
253 struct timeval tv;
254 uint32_t ui;
255 int error;
256
257 sc->sc_dev = self;
258
259 /*
260 * In order to obtain unique initial Ethernet address on a host,
261 * do some randomisation using the current uptime. It's not meant
262 * for anything but avoiding hard-coding an address.
263 */
264 getmicrouptime(&tv);
265 ui = (tv.tv_sec ^ tv.tv_usec) & 0xffffff;
266 memcpy(enaddr+3, (uint8_t *)&ui, 3);
267
268 aprint_verbose_dev(self, "Ethernet address %s\n",
269 ether_snprintf(enaddrstr, sizeof(enaddrstr), enaddr));
270
271 /*
272 * Why 1000baseT? Why not? You can add more.
273 *
274 * Note that there are 3 steps: init, one or several additions to
275 * list of supported media, and in the end, the selection of one
276 * of them.
277 */
278 ifmedia_init(&sc->sc_im, 0, tap_mediachange, tap_mediastatus);
279 ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_1000_T, 0, NULL);
280 ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_1000_T|IFM_FDX, 0, NULL);
281 ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_100_TX, 0, NULL);
282 ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL);
283 ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_10_T, 0, NULL);
284 ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
285 ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_AUTO, 0, NULL);
286 ifmedia_set(&sc->sc_im, IFM_ETHER|IFM_AUTO);
287
288 /*
289 * One should note that an interface must do multicast in order
290 * to support IPv6.
291 */
292 ifp = &sc->sc_ec.ec_if;
293 strcpy(ifp->if_xname, device_xname(self));
294 ifp->if_softc = sc;
295 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
296 ifp->if_ioctl = tap_ioctl;
297 ifp->if_start = tap_start;
298 ifp->if_stop = tap_stop;
299 ifp->if_init = tap_init;
300 IFQ_SET_READY(&ifp->if_snd);
301
302 sc->sc_ec.ec_capabilities = ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU;
303
304 /* Those steps are mandatory for an Ethernet driver, the fisrt call
305 * being common to all network interface drivers. */
306 if_attach(ifp);
307 ether_ifattach(ifp, enaddr);
308
309 sc->sc_flags = 0;
310
311 /*
312 * Add a sysctl node for that interface.
313 *
314 * The pointer transmitted is not a string, but instead a pointer to
315 * the softc structure, which we can use to build the string value on
316 * the fly in the helper function of the node. See the comments for
317 * tap_sysctl_handler for details.
318 *
319 * Usually sysctl_createv is called with CTL_CREATE as the before-last
320 * component. However, we can allocate a number ourselves, as we are
321 * the only consumer of the net.link.<iface> node. In this case, the
322 * unit number is conveniently used to number the node. CTL_CREATE
323 * would just work, too.
324 */
325 if ((error = sysctl_createv(NULL, 0, NULL,
326 &node, CTLFLAG_READWRITE,
327 CTLTYPE_STRING, device_xname(self), NULL,
328 tap_sysctl_handler, 0, sc, 18,
329 CTL_NET, AF_LINK, tap_node, device_unit(sc->sc_dev),
330 CTL_EOL)) != 0)
331 aprint_error_dev(self, "sysctl_createv returned %d, ignoring\n",
332 error);
333
334 /*
335 * Initialize the two locks for the device.
336 *
337 * We need a lock here because even though the tap device can be
338 * opened only once, the file descriptor might be passed to another
339 * process, say a fork(2)ed child.
340 *
341 * The Giant saves us from most of the hassle, but since the read
342 * operation can sleep, we don't want two processes to wake up at
343 * the same moment and both try and dequeue a single packet.
344 *
345 * The queue for event listeners (used by kqueue(9), see below) has
346 * to be protected, too, but we don't need the same level of
347 * complexity for that lock, so a simple spinning lock is fine.
348 */
349 mutex_init(&sc->sc_rdlock, MUTEX_DEFAULT, IPL_NONE);
350 simple_lock_init(&sc->sc_kqlock);
351 }
352
353 /*
354 * When detaching, we do the inverse of what is done in the attach
355 * routine, in reversed order.
356 */
357 static int
358 tap_detach(device_t self, int flags)
359 {
360 struct tap_softc *sc = device_private(self);
361 struct ifnet *ifp = &sc->sc_ec.ec_if;
362 int error, s;
363
364 sc->sc_flags |= TAP_GOING;
365 s = splnet();
366 tap_stop(ifp, 1);
367 if_down(ifp);
368 splx(s);
369
370 /*
371 * Destroying a single leaf is a very straightforward operation using
372 * sysctl_destroyv. One should be sure to always end the path with
373 * CTL_EOL.
374 */
375 if ((error = sysctl_destroyv(NULL, CTL_NET, AF_LINK, tap_node,
376 device_unit(sc->sc_dev), CTL_EOL)) != 0)
377 aprint_error_dev(self,
378 "sysctl_destroyv returned %d, ignoring\n", error);
379 ether_ifdetach(ifp);
380 if_detach(ifp);
381 ifmedia_delete_instance(&sc->sc_im, IFM_INST_ANY);
382 mutex_destroy(&sc->sc_rdlock);
383
384 return (0);
385 }
386
387 /*
388 * This function is called by the ifmedia layer to notify the driver
389 * that the user requested a media change. A real driver would
390 * reconfigure the hardware.
391 */
392 static int
393 tap_mediachange(struct ifnet *ifp)
394 {
395 return (0);
396 }
397
398 /*
399 * Here the user asks for the currently used media.
400 */
401 static void
402 tap_mediastatus(struct ifnet *ifp, struct ifmediareq *imr)
403 {
404 struct tap_softc *sc = (struct tap_softc *)ifp->if_softc;
405 imr->ifm_active = sc->sc_im.ifm_cur->ifm_media;
406 }
407
408 /*
409 * This is the function where we SEND packets.
410 *
411 * There is no 'receive' equivalent. A typical driver will get
412 * interrupts from the hardware, and from there will inject new packets
413 * into the network stack.
414 *
415 * Once handled, a packet must be freed. A real driver might not be able
416 * to fit all the pending packets into the hardware, and is allowed to
417 * return before having sent all the packets. It should then use the
418 * if_flags flag IFF_OACTIVE to notify the upper layer.
419 *
420 * There are also other flags one should check, such as IFF_PAUSE.
421 *
422 * It is our duty to make packets available to BPF listeners.
423 *
424 * You should be aware that this function is called by the Ethernet layer
425 * at splnet().
426 *
427 * When the device is opened, we have to pass the packet(s) to the
428 * userland. For that we stay in OACTIVE mode while the userland gets
429 * the packets, and we send a signal to the processes waiting to read.
430 *
431 * wakeup(sc) is the counterpart to the tsleep call in
432 * tap_dev_read, while selnotify() is used for kevent(2) and
433 * poll(2) (which includes select(2)) listeners.
434 */
435 static void
436 tap_start(struct ifnet *ifp)
437 {
438 struct tap_softc *sc = (struct tap_softc *)ifp->if_softc;
439 struct mbuf *m0;
440
441 if ((sc->sc_flags & TAP_INUSE) == 0) {
442 /* Simply drop packets */
443 for(;;) {
444 IFQ_DEQUEUE(&ifp->if_snd, m0);
445 if (m0 == NULL)
446 return;
447
448 ifp->if_opackets++;
449 #if NBPFILTER > 0
450 if (ifp->if_bpf)
451 bpf_mtap(ifp->if_bpf, m0);
452 #endif
453
454 m_freem(m0);
455 }
456 } else if (!IFQ_IS_EMPTY(&ifp->if_snd)) {
457 ifp->if_flags |= IFF_OACTIVE;
458 wakeup(sc);
459 selnotify(&sc->sc_rsel, 0, 1);
460 if (sc->sc_flags & TAP_ASYNCIO)
461 fownsignal(sc->sc_pgid, SIGIO, POLL_IN,
462 POLLIN|POLLRDNORM, NULL);
463 }
464 }
465
466 /*
467 * A typical driver will only contain the following handlers for
468 * ioctl calls, except SIOCSIFPHYADDR.
469 * The latter is a hack I used to set the Ethernet address of the
470 * faked device.
471 *
472 * Note that both ifmedia_ioctl() and ether_ioctl() have to be
473 * called under splnet().
474 */
475 static int
476 tap_ioctl(struct ifnet *ifp, u_long cmd, void *data)
477 {
478 struct tap_softc *sc = (struct tap_softc *)ifp->if_softc;
479 struct ifreq *ifr = (struct ifreq *)data;
480 int s, error;
481
482 s = splnet();
483
484 switch (cmd) {
485 #ifdef OSIOCSIFMEDIA
486 case OSIOCSIFMEDIA:
487 #endif
488 case SIOCSIFMEDIA:
489 case SIOCGIFMEDIA:
490 error = ifmedia_ioctl(ifp, ifr, &sc->sc_im, cmd);
491 break;
492 case SIOCSIFPHYADDR:
493 error = tap_lifaddr(ifp, cmd, (struct ifaliasreq *)data);
494 break;
495 default:
496 error = ether_ioctl(ifp, cmd, data);
497 if (error == ENETRESET)
498 error = 0;
499 break;
500 }
501
502 splx(s);
503
504 return (error);
505 }
506
507 /*
508 * Helper function to set Ethernet address. This shouldn't be done there,
509 * and should actually be available to all Ethernet drivers, real or not.
510 */
511 static int
512 tap_lifaddr(struct ifnet *ifp, u_long cmd, struct ifaliasreq *ifra)
513 {
514 const struct sockaddr_dl *sdl = satosdl(&ifra->ifra_addr);
515
516 if (sdl->sdl_family != AF_LINK)
517 return (EINVAL);
518
519 if_set_sadl(ifp, CLLADDR(sdl), ETHER_ADDR_LEN);
520
521 return (0);
522 }
523
524 /*
525 * _init() would typically be called when an interface goes up,
526 * meaning it should configure itself into the state in which it
527 * can send packets.
528 */
529 static int
530 tap_init(struct ifnet *ifp)
531 {
532 ifp->if_flags |= IFF_RUNNING;
533
534 tap_start(ifp);
535
536 return (0);
537 }
538
539 /*
540 * _stop() is called when an interface goes down. It is our
541 * responsability to validate that state by clearing the
542 * IFF_RUNNING flag.
543 *
544 * We have to wake up all the sleeping processes to have the pending
545 * read requests cancelled.
546 */
547 static void
548 tap_stop(struct ifnet *ifp, int disable)
549 {
550 struct tap_softc *sc = (struct tap_softc *)ifp->if_softc;
551
552 ifp->if_flags &= ~IFF_RUNNING;
553 wakeup(sc);
554 selnotify(&sc->sc_rsel, 0, 1);
555 if (sc->sc_flags & TAP_ASYNCIO)
556 fownsignal(sc->sc_pgid, SIGIO, POLL_HUP, 0, NULL);
557 }
558
559 /*
560 * The 'create' command of ifconfig can be used to create
561 * any numbered instance of a given device. Thus we have to
562 * make sure we have enough room in cd_devs to create the
563 * user-specified instance. config_attach_pseudo will do this
564 * for us.
565 */
566 static int
567 tap_clone_create(struct if_clone *ifc, int unit)
568 {
569 if (tap_clone_creator(unit) == NULL) {
570 aprint_error("%s%d: unable to attach an instance\n",
571 tap_cd.cd_name, unit);
572 return (ENXIO);
573 }
574
575 return (0);
576 }
577
578 /*
579 * tap(4) can be cloned by two ways:
580 * using 'ifconfig tap0 create', which will use the network
581 * interface cloning API, and call tap_clone_create above.
582 * opening the cloning device node, whose minor number is TAP_CLONER.
583 * See below for an explanation on how this part work.
584 */
585 static struct tap_softc *
586 tap_clone_creator(int unit)
587 {
588 struct cfdata *cf;
589
590 cf = malloc(sizeof(*cf), M_DEVBUF, M_WAITOK);
591 cf->cf_name = tap_cd.cd_name;
592 cf->cf_atname = tap_ca.ca_name;
593 if (unit == -1) {
594 /* let autoconf find the first free one */
595 cf->cf_unit = 0;
596 cf->cf_fstate = FSTATE_STAR;
597 } else {
598 cf->cf_unit = unit;
599 cf->cf_fstate = FSTATE_NOTFOUND;
600 }
601
602 return device_private(config_attach_pseudo(cf));
603 }
604
605 /*
606 * The clean design of if_clone and autoconf(9) makes that part
607 * really straightforward. The second argument of config_detach
608 * means neither QUIET nor FORCED.
609 */
610 static int
611 tap_clone_destroy(struct ifnet *ifp)
612 {
613 return tap_clone_destroyer(device_private(ifp->if_softc));
614 }
615
616 int
617 tap_clone_destroyer(device_t dev)
618 {
619 cfdata_t cf = device_cfdata(dev);
620 int error;
621
622 if ((error = config_detach(dev, 0)) != 0)
623 aprint_error_dev(dev, "unable to detach instance\n");
624 free(cf, M_DEVBUF);
625
626 return (error);
627 }
628
629 /*
630 * tap(4) is a bit of an hybrid device. It can be used in two different
631 * ways:
632 * 1. ifconfig tapN create, then use /dev/tapN to read/write off it.
633 * 2. open /dev/tap, get a new interface created and read/write off it.
634 * That interface is destroyed when the process that had it created exits.
635 *
636 * The first way is managed by the cdevsw structure, and you access interfaces
637 * through a (major, minor) mapping: tap4 is obtained by the minor number
638 * 4. The entry points for the cdevsw interface are prefixed by tap_cdev_.
639 *
640 * The second way is the so-called "cloning" device. It's a special minor
641 * number (chosen as the maximal number, to allow as much tap devices as
642 * possible). The user first opens the cloner (e.g., /dev/tap), and that
643 * call ends in tap_cdev_open. The actual place where it is handled is
644 * tap_dev_cloner.
645 *
646 * An tap device cannot be opened more than once at a time, so the cdevsw
647 * part of open() does nothing but noting that the interface is being used and
648 * hence ready to actually handle packets.
649 */
650
651 static int
652 tap_cdev_open(dev_t dev, int flags, int fmt, struct lwp *l)
653 {
654 struct tap_softc *sc;
655
656 if (minor(dev) == TAP_CLONER)
657 return tap_dev_cloner(l);
658
659 sc = device_private(device_lookup(&tap_cd, minor(dev)));
660 if (sc == NULL)
661 return (ENXIO);
662
663 /* The device can only be opened once */
664 if (sc->sc_flags & TAP_INUSE)
665 return (EBUSY);
666 sc->sc_flags |= TAP_INUSE;
667 return (0);
668 }
669
670 /*
671 * There are several kinds of cloning devices, and the most simple is the one
672 * tap(4) uses. What it does is change the file descriptor with a new one,
673 * with its own fileops structure (which maps to the various read, write,
674 * ioctl functions). It starts allocating a new file descriptor with falloc,
675 * then actually creates the new tap devices.
676 *
677 * Once those two steps are successful, we can re-wire the existing file
678 * descriptor to its new self. This is done with fdclone(): it fills the fp
679 * structure as needed (notably f_data gets filled with the fifth parameter
680 * passed, the unit of the tap device which will allows us identifying the
681 * device later), and returns EMOVEFD.
682 *
683 * That magic value is interpreted by sys_open() which then replaces the
684 * current file descriptor by the new one (through a magic member of struct
685 * lwp, l_dupfd).
686 *
687 * The tap device is flagged as being busy since it otherwise could be
688 * externally accessed through the corresponding device node with the cdevsw
689 * interface.
690 */
691
692 static int
693 tap_dev_cloner(struct lwp *l)
694 {
695 struct tap_softc *sc;
696 file_t *fp;
697 int error, fd;
698
699 if ((error = fd_allocfile(&fp, &fd)) != 0)
700 return (error);
701
702 if ((sc = tap_clone_creator(-1)) == NULL) {
703 fd_abort(curproc, fp, fd);
704 return (ENXIO);
705 }
706
707 sc->sc_flags |= TAP_INUSE;
708
709 return fd_clone(fp, fd, FREAD|FWRITE, &tap_fileops,
710 (void *)(intptr_t)device_unit(sc->sc_dev));
711 }
712
713 /*
714 * While all other operations (read, write, ioctl, poll and kqfilter) are
715 * really the same whether we are in cdevsw or fileops mode, the close()
716 * function is slightly different in the two cases.
717 *
718 * As for the other, the core of it is shared in tap_dev_close. What
719 * it does is sufficient for the cdevsw interface, but the cloning interface
720 * needs another thing: the interface is destroyed when the processes that
721 * created it closes it.
722 */
723 static int
724 tap_cdev_close(dev_t dev, int flags, int fmt,
725 struct lwp *l)
726 {
727 struct tap_softc *sc =
728 device_private(device_lookup(&tap_cd, minor(dev)));
729
730 if (sc == NULL)
731 return (ENXIO);
732
733 return tap_dev_close(sc);
734 }
735
736 /*
737 * It might happen that the administrator used ifconfig to externally destroy
738 * the interface. In that case, tap_fops_close will be called while
739 * tap_detach is already happening. If we called it again from here, we
740 * would dead lock. TAP_GOING ensures that this situation doesn't happen.
741 */
742 static int
743 tap_fops_close(file_t *fp)
744 {
745 int unit = (intptr_t)fp->f_data;
746 struct tap_softc *sc;
747 int error;
748
749 sc = device_private(device_lookup(&tap_cd, unit));
750 if (sc == NULL)
751 return (ENXIO);
752
753 /* tap_dev_close currently always succeeds, but it might not
754 * always be the case. */
755 if ((error = tap_dev_close(sc)) != 0)
756 return (error);
757
758 /* Destroy the device now that it is no longer useful,
759 * unless it's already being destroyed. */
760 if ((sc->sc_flags & TAP_GOING) != 0)
761 return (0);
762
763 return tap_clone_destroyer(sc->sc_dev);
764 }
765
766 static int
767 tap_dev_close(struct tap_softc *sc)
768 {
769 struct ifnet *ifp;
770 int s;
771
772 s = splnet();
773 /* Let tap_start handle packets again */
774 ifp = &sc->sc_ec.ec_if;
775 ifp->if_flags &= ~IFF_OACTIVE;
776
777 /* Purge output queue */
778 if (!(IFQ_IS_EMPTY(&ifp->if_snd))) {
779 struct mbuf *m;
780
781 for (;;) {
782 IFQ_DEQUEUE(&ifp->if_snd, m);
783 if (m == NULL)
784 break;
785
786 ifp->if_opackets++;
787 #if NBPFILTER > 0
788 if (ifp->if_bpf)
789 bpf_mtap(ifp->if_bpf, m);
790 #endif
791 }
792 }
793 splx(s);
794
795 sc->sc_flags &= ~(TAP_INUSE | TAP_ASYNCIO);
796
797 return (0);
798 }
799
800 static int
801 tap_cdev_read(dev_t dev, struct uio *uio, int flags)
802 {
803 return tap_dev_read(minor(dev), uio, flags);
804 }
805
806 static int
807 tap_fops_read(file_t *fp, off_t *offp, struct uio *uio,
808 kauth_cred_t cred, int flags)
809 {
810 return tap_dev_read((intptr_t)fp->f_data, uio, flags);
811 }
812
813 static int
814 tap_dev_read(int unit, struct uio *uio, int flags)
815 {
816 struct tap_softc *sc =
817 device_private(device_lookup(&tap_cd, unit));
818 struct ifnet *ifp;
819 struct mbuf *m, *n;
820 int error = 0, s;
821
822 if (sc == NULL)
823 return (ENXIO);
824
825 ifp = &sc->sc_ec.ec_if;
826 if ((ifp->if_flags & IFF_UP) == 0)
827 return (EHOSTDOWN);
828
829 /*
830 * In the TAP_NBIO case, we have to make sure we won't be sleeping
831 */
832 if ((sc->sc_flags & TAP_NBIO) != 0) {
833 if (!mutex_tryenter(&sc->sc_rdlock))
834 return (EWOULDBLOCK);
835 } else {
836 mutex_enter(&sc->sc_rdlock);
837 }
838
839 s = splnet();
840 if (IFQ_IS_EMPTY(&ifp->if_snd)) {
841 ifp->if_flags &= ~IFF_OACTIVE;
842 splx(s);
843 /*
844 * We must release the lock before sleeping, and re-acquire it
845 * after.
846 */
847 mutex_exit(&sc->sc_rdlock);
848 if (sc->sc_flags & TAP_NBIO)
849 error = EWOULDBLOCK;
850 else
851 error = tsleep(sc, PSOCK|PCATCH, "tap", 0);
852 if (error != 0)
853 return (error);
854 /* The device might have been downed */
855 if ((ifp->if_flags & IFF_UP) == 0)
856 return (EHOSTDOWN);
857 if ((sc->sc_flags & TAP_NBIO)) {
858 if (!mutex_tryenter(&sc->sc_rdlock))
859 return (EWOULDBLOCK);
860 } else {
861 mutex_enter(&sc->sc_rdlock);
862 }
863 s = splnet();
864 }
865
866 IFQ_DEQUEUE(&ifp->if_snd, m);
867 ifp->if_flags &= ~IFF_OACTIVE;
868 splx(s);
869 if (m == NULL) {
870 error = 0;
871 goto out;
872 }
873
874 ifp->if_opackets++;
875 #if NBPFILTER > 0
876 if (ifp->if_bpf)
877 bpf_mtap(ifp->if_bpf, m);
878 #endif
879
880 /*
881 * One read is one packet.
882 */
883 do {
884 error = uiomove(mtod(m, void *),
885 min(m->m_len, uio->uio_resid), uio);
886 MFREE(m, n);
887 m = n;
888 } while (m != NULL && uio->uio_resid > 0 && error == 0);
889
890 if (m != NULL)
891 m_freem(m);
892
893 out:
894 mutex_exit(&sc->sc_rdlock);
895 return (error);
896 }
897
898 static int
899 tap_cdev_write(dev_t dev, struct uio *uio, int flags)
900 {
901 return tap_dev_write(minor(dev), uio, flags);
902 }
903
904 static int
905 tap_fops_write(file_t *fp, off_t *offp, struct uio *uio,
906 kauth_cred_t cred, int flags)
907 {
908 return tap_dev_write((intptr_t)fp->f_data, uio, flags);
909 }
910
911 static int
912 tap_dev_write(int unit, struct uio *uio, int flags)
913 {
914 struct tap_softc *sc =
915 device_private(device_lookup(&tap_cd, unit));
916 struct ifnet *ifp;
917 struct mbuf *m, **mp;
918 int error = 0;
919 int s;
920
921 if (sc == NULL)
922 return (ENXIO);
923
924 ifp = &sc->sc_ec.ec_if;
925
926 /* One write, one packet, that's the rule */
927 MGETHDR(m, M_DONTWAIT, MT_DATA);
928 if (m == NULL) {
929 ifp->if_ierrors++;
930 return (ENOBUFS);
931 }
932 m->m_pkthdr.len = uio->uio_resid;
933
934 mp = &m;
935 while (error == 0 && uio->uio_resid > 0) {
936 if (*mp != m) {
937 MGET(*mp, M_DONTWAIT, MT_DATA);
938 if (*mp == NULL) {
939 error = ENOBUFS;
940 break;
941 }
942 }
943 (*mp)->m_len = min(MHLEN, uio->uio_resid);
944 error = uiomove(mtod(*mp, void *), (*mp)->m_len, uio);
945 mp = &(*mp)->m_next;
946 }
947 if (error) {
948 ifp->if_ierrors++;
949 m_freem(m);
950 return (error);
951 }
952
953 ifp->if_ipackets++;
954 m->m_pkthdr.rcvif = ifp;
955
956 #if NBPFILTER > 0
957 if (ifp->if_bpf)
958 bpf_mtap(ifp->if_bpf, m);
959 #endif
960 s =splnet();
961 (*ifp->if_input)(ifp, m);
962 splx(s);
963
964 return (0);
965 }
966
967 static int
968 tap_cdev_ioctl(dev_t dev, u_long cmd, void *data, int flags,
969 struct lwp *l)
970 {
971 return tap_dev_ioctl(minor(dev), cmd, data, l);
972 }
973
974 static int
975 tap_fops_ioctl(file_t *fp, u_long cmd, void *data)
976 {
977 return tap_dev_ioctl((intptr_t)fp->f_data, cmd, data, curlwp);
978 }
979
980 static int
981 tap_dev_ioctl(int unit, u_long cmd, void *data, struct lwp *l)
982 {
983 struct tap_softc *sc =
984 device_private(device_lookup(&tap_cd, unit));
985 int error = 0;
986
987 if (sc == NULL)
988 return (ENXIO);
989
990 switch (cmd) {
991 case FIONREAD:
992 {
993 struct ifnet *ifp = &sc->sc_ec.ec_if;
994 struct mbuf *m;
995 int s;
996
997 s = splnet();
998 IFQ_POLL(&ifp->if_snd, m);
999
1000 if (m == NULL)
1001 *(int *)data = 0;
1002 else
1003 *(int *)data = m->m_pkthdr.len;
1004 splx(s);
1005 } break;
1006 case TIOCSPGRP:
1007 case FIOSETOWN:
1008 error = fsetown(&sc->sc_pgid, cmd, data);
1009 break;
1010 case TIOCGPGRP:
1011 case FIOGETOWN:
1012 error = fgetown(sc->sc_pgid, cmd, data);
1013 break;
1014 case FIOASYNC:
1015 if (*(int *)data)
1016 sc->sc_flags |= TAP_ASYNCIO;
1017 else
1018 sc->sc_flags &= ~TAP_ASYNCIO;
1019 break;
1020 case FIONBIO:
1021 if (*(int *)data)
1022 sc->sc_flags |= TAP_NBIO;
1023 else
1024 sc->sc_flags &= ~TAP_NBIO;
1025 break;
1026 #ifdef OTAPGIFNAME
1027 case OTAPGIFNAME:
1028 #endif
1029 case TAPGIFNAME:
1030 {
1031 struct ifreq *ifr = (struct ifreq *)data;
1032 struct ifnet *ifp = &sc->sc_ec.ec_if;
1033
1034 strlcpy(ifr->ifr_name, ifp->if_xname, IFNAMSIZ);
1035 } break;
1036 default:
1037 error = ENOTTY;
1038 break;
1039 }
1040
1041 return (0);
1042 }
1043
1044 static int
1045 tap_cdev_poll(dev_t dev, int events, struct lwp *l)
1046 {
1047 return tap_dev_poll(minor(dev), events, l);
1048 }
1049
1050 static int
1051 tap_fops_poll(file_t *fp, int events)
1052 {
1053 return tap_dev_poll((intptr_t)fp->f_data, events, curlwp);
1054 }
1055
1056 static int
1057 tap_dev_poll(int unit, int events, struct lwp *l)
1058 {
1059 struct tap_softc *sc =
1060 device_private(device_lookup(&tap_cd, unit));
1061 int revents = 0;
1062
1063 if (sc == NULL)
1064 return POLLERR;
1065
1066 if (events & (POLLIN|POLLRDNORM)) {
1067 struct ifnet *ifp = &sc->sc_ec.ec_if;
1068 struct mbuf *m;
1069 int s;
1070
1071 s = splnet();
1072 IFQ_POLL(&ifp->if_snd, m);
1073 splx(s);
1074
1075 if (m != NULL)
1076 revents |= events & (POLLIN|POLLRDNORM);
1077 else {
1078 simple_lock(&sc->sc_kqlock);
1079 selrecord(l, &sc->sc_rsel);
1080 simple_unlock(&sc->sc_kqlock);
1081 }
1082 }
1083 revents |= events & (POLLOUT|POLLWRNORM);
1084
1085 return (revents);
1086 }
1087
1088 static struct filterops tap_read_filterops = { 1, NULL, tap_kqdetach,
1089 tap_kqread };
1090 static struct filterops tap_seltrue_filterops = { 1, NULL, tap_kqdetach,
1091 filt_seltrue };
1092
1093 static int
1094 tap_cdev_kqfilter(dev_t dev, struct knote *kn)
1095 {
1096 return tap_dev_kqfilter(minor(dev), kn);
1097 }
1098
1099 static int
1100 tap_fops_kqfilter(file_t *fp, struct knote *kn)
1101 {
1102 return tap_dev_kqfilter((intptr_t)fp->f_data, kn);
1103 }
1104
1105 static int
1106 tap_dev_kqfilter(int unit, struct knote *kn)
1107 {
1108 struct tap_softc *sc =
1109 device_private(device_lookup(&tap_cd, unit));
1110
1111 if (sc == NULL)
1112 return (ENXIO);
1113
1114 switch(kn->kn_filter) {
1115 case EVFILT_READ:
1116 kn->kn_fop = &tap_read_filterops;
1117 break;
1118 case EVFILT_WRITE:
1119 kn->kn_fop = &tap_seltrue_filterops;
1120 break;
1121 default:
1122 return (EINVAL);
1123 }
1124
1125 kn->kn_hook = sc;
1126 simple_lock(&sc->sc_kqlock);
1127 SLIST_INSERT_HEAD(&sc->sc_rsel.sel_klist, kn, kn_selnext);
1128 simple_unlock(&sc->sc_kqlock);
1129 return (0);
1130 }
1131
1132 static void
1133 tap_kqdetach(struct knote *kn)
1134 {
1135 struct tap_softc *sc = (struct tap_softc *)kn->kn_hook;
1136
1137 simple_lock(&sc->sc_kqlock);
1138 SLIST_REMOVE(&sc->sc_rsel.sel_klist, kn, knote, kn_selnext);
1139 simple_unlock(&sc->sc_kqlock);
1140 }
1141
1142 static int
1143 tap_kqread(struct knote *kn, long hint)
1144 {
1145 struct tap_softc *sc = (struct tap_softc *)kn->kn_hook;
1146 struct ifnet *ifp = &sc->sc_ec.ec_if;
1147 struct mbuf *m;
1148 int s;
1149
1150 s = splnet();
1151 IFQ_POLL(&ifp->if_snd, m);
1152
1153 if (m == NULL)
1154 kn->kn_data = 0;
1155 else
1156 kn->kn_data = m->m_pkthdr.len;
1157 splx(s);
1158 return (kn->kn_data != 0 ? 1 : 0);
1159 }
1160
1161 /*
1162 * sysctl management routines
1163 * You can set the address of an interface through:
1164 * net.link.tap.tap<number>
1165 *
1166 * Note the consistent use of tap_log in order to use
1167 * sysctl_teardown at unload time.
1168 *
1169 * In the kernel you will find a lot of SYSCTL_SETUP blocks. Those
1170 * blocks register a function in a special section of the kernel
1171 * (called a link set) which is used at init_sysctl() time to cycle
1172 * through all those functions to create the kernel's sysctl tree.
1173 *
1174 * It is not (currently) possible to use link sets in a LKM, so the
1175 * easiest is to simply call our own setup routine at load time.
1176 *
1177 * In the SYSCTL_SETUP blocks you find in the kernel, nodes have the
1178 * CTLFLAG_PERMANENT flag, meaning they cannot be removed. Once the
1179 * whole kernel sysctl tree is built, it is not possible to add any
1180 * permanent node.
1181 *
1182 * It should be noted that we're not saving the sysctlnode pointer
1183 * we are returned when creating the "tap" node. That structure
1184 * cannot be trusted once out of the calling function, as it might
1185 * get reused. So we just save the MIB number, and always give the
1186 * full path starting from the root for later calls to sysctl_createv
1187 * and sysctl_destroyv.
1188 */
1189 SYSCTL_SETUP(sysctl_tap_setup, "sysctl net.link.tap subtree setup")
1190 {
1191 const struct sysctlnode *node;
1192 int error = 0;
1193
1194 if ((error = sysctl_createv(clog, 0, NULL, NULL,
1195 CTLFLAG_PERMANENT,
1196 CTLTYPE_NODE, "net", NULL,
1197 NULL, 0, NULL, 0,
1198 CTL_NET, CTL_EOL)) != 0)
1199 return;
1200
1201 if ((error = sysctl_createv(clog, 0, NULL, NULL,
1202 CTLFLAG_PERMANENT,
1203 CTLTYPE_NODE, "link", NULL,
1204 NULL, 0, NULL, 0,
1205 CTL_NET, AF_LINK, CTL_EOL)) != 0)
1206 return;
1207
1208 /*
1209 * The first four parameters of sysctl_createv are for management.
1210 *
1211 * The four that follows, here starting with a '0' for the flags,
1212 * describe the node.
1213 *
1214 * The next series of four set its value, through various possible
1215 * means.
1216 *
1217 * Last but not least, the path to the node is described. That path
1218 * is relative to the given root (third argument). Here we're
1219 * starting from the root.
1220 */
1221 if ((error = sysctl_createv(clog, 0, NULL, &node,
1222 CTLFLAG_PERMANENT,
1223 CTLTYPE_NODE, "tap", NULL,
1224 NULL, 0, NULL, 0,
1225 CTL_NET, AF_LINK, CTL_CREATE, CTL_EOL)) != 0)
1226 return;
1227 tap_node = node->sysctl_num;
1228 }
1229
1230 /*
1231 * The helper functions make Andrew Brown's interface really
1232 * shine. It makes possible to create value on the fly whether
1233 * the sysctl value is read or written.
1234 *
1235 * As shown as an example in the man page, the first step is to
1236 * create a copy of the node to have sysctl_lookup work on it.
1237 *
1238 * Here, we have more work to do than just a copy, since we have
1239 * to create the string. The first step is to collect the actual
1240 * value of the node, which is a convenient pointer to the softc
1241 * of the interface. From there we create the string and use it
1242 * as the value, but only for the *copy* of the node.
1243 *
1244 * Then we let sysctl_lookup do the magic, which consists in
1245 * setting oldp and newp as required by the operation. When the
1246 * value is read, that means that the string will be copied to
1247 * the user, and when it is written, the new value will be copied
1248 * over in the addr array.
1249 *
1250 * If newp is NULL, the user was reading the value, so we don't
1251 * have anything else to do. If a new value was written, we
1252 * have to check it.
1253 *
1254 * If it is incorrect, we can return an error and leave 'node' as
1255 * it is: since it is a copy of the actual node, the change will
1256 * be forgotten.
1257 *
1258 * Upon a correct input, we commit the change to the ifnet
1259 * structure of our interface.
1260 */
1261 static int
1262 tap_sysctl_handler(SYSCTLFN_ARGS)
1263 {
1264 struct sysctlnode node;
1265 struct tap_softc *sc;
1266 struct ifnet *ifp;
1267 int error;
1268 size_t len;
1269 char addr[3 * ETHER_ADDR_LEN];
1270 uint8_t enaddr[ETHER_ADDR_LEN];
1271
1272 node = *rnode;
1273 sc = node.sysctl_data;
1274 ifp = &sc->sc_ec.ec_if;
1275 (void)ether_snprintf(addr, sizeof(addr), CLLADDR(ifp->if_sadl));
1276 node.sysctl_data = addr;
1277 error = sysctl_lookup(SYSCTLFN_CALL(&node));
1278 if (error || newp == NULL)
1279 return (error);
1280
1281 len = strlen(addr);
1282 if (len < 11 || len > 17)
1283 return (EINVAL);
1284
1285 /* Commit change */
1286 if (ether_nonstatic_aton(enaddr, addr) != 0)
1287 return (EINVAL);
1288 if_set_sadl(ifp, enaddr, ETHER_ADDR_LEN);
1289 return (error);
1290 }
1291