if_tap.c revision 1.85 1 /* $NetBSD: if_tap.c,v 1.85 2016/08/07 17:38:34 christos Exp $ */
2
3 /*
4 * Copyright (c) 2003, 2004, 2008, 2009 The NetBSD Foundation.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /*
30 * tap(4) is a virtual Ethernet interface. It appears as a real Ethernet
31 * device to the system, but can also be accessed by userland through a
32 * character device interface, which allows reading and injecting frames.
33 */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: if_tap.c,v 1.85 2016/08/07 17:38:34 christos Exp $");
37
38 #if defined(_KERNEL_OPT)
39
40 #include "opt_modular.h"
41 #include "opt_compat_netbsd.h"
42 #endif
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/kernel.h>
47 #include <sys/malloc.h>
48 #include <sys/conf.h>
49 #include <sys/cprng.h>
50 #include <sys/device.h>
51 #include <sys/file.h>
52 #include <sys/filedesc.h>
53 #include <sys/poll.h>
54 #include <sys/proc.h>
55 #include <sys/select.h>
56 #include <sys/sockio.h>
57 #if defined(COMPAT_40) || defined(MODULAR)
58 #include <sys/sysctl.h>
59 #endif
60 #include <sys/kauth.h>
61 #include <sys/mutex.h>
62 #include <sys/intr.h>
63 #include <sys/stat.h>
64 #include <sys/device.h>
65 #include <sys/module.h>
66 #include <sys/atomic.h>
67
68 #include <net/if.h>
69 #include <net/if_dl.h>
70 #include <net/if_ether.h>
71 #include <net/if_media.h>
72 #include <net/if_tap.h>
73 #include <net/bpf.h>
74
75 #include <compat/sys/sockio.h>
76
77 #include "ioconf.h"
78
79 #if defined(COMPAT_40) || defined(MODULAR)
80 /*
81 * sysctl node management
82 *
83 * It's not really possible to use a SYSCTL_SETUP block with
84 * current module implementation, so it is easier to just define
85 * our own function.
86 *
87 * The handler function is a "helper" in Andrew Brown's sysctl
88 * framework terminology. It is used as a gateway for sysctl
89 * requests over the nodes.
90 *
91 * tap_log allows the module to log creations of nodes and
92 * destroy them all at once using sysctl_teardown.
93 */
94 static int tap_node;
95 static int tap_sysctl_handler(SYSCTLFN_PROTO);
96 SYSCTL_SETUP_PROTO(sysctl_tap_setup);
97 #endif
98
99 /*
100 * Since we're an Ethernet device, we need the 2 following
101 * components: a struct ethercom and a struct ifmedia
102 * since we don't attach a PHY to ourselves.
103 * We could emulate one, but there's no real point.
104 */
105
106 struct tap_softc {
107 device_t sc_dev;
108 struct ifmedia sc_im;
109 struct ethercom sc_ec;
110 int sc_flags;
111 #define TAP_INUSE 0x00000001 /* tap device can only be opened once */
112 #define TAP_ASYNCIO 0x00000002 /* user is using async I/O (SIGIO) on the device */
113 #define TAP_NBIO 0x00000004 /* user wants calls to avoid blocking */
114 #define TAP_GOING 0x00000008 /* interface is being destroyed */
115 struct selinfo sc_rsel;
116 pid_t sc_pgid; /* For async. IO */
117 kmutex_t sc_rdlock;
118 kmutex_t sc_kqlock;
119 void *sc_sih;
120 struct timespec sc_atime;
121 struct timespec sc_mtime;
122 struct timespec sc_btime;
123 };
124
125 /* autoconf(9) glue */
126
127 static int tap_match(device_t, cfdata_t, void *);
128 static void tap_attach(device_t, device_t, void *);
129 static int tap_detach(device_t, int);
130
131 CFATTACH_DECL_NEW(tap, sizeof(struct tap_softc),
132 tap_match, tap_attach, tap_detach, NULL);
133 extern struct cfdriver tap_cd;
134
135 /* Real device access routines */
136 static int tap_dev_close(struct tap_softc *);
137 static int tap_dev_read(int, struct uio *, int);
138 static int tap_dev_write(int, struct uio *, int);
139 static int tap_dev_ioctl(int, u_long, void *, struct lwp *);
140 static int tap_dev_poll(int, int, struct lwp *);
141 static int tap_dev_kqfilter(int, struct knote *);
142
143 /* Fileops access routines */
144 static int tap_fops_close(file_t *);
145 static int tap_fops_read(file_t *, off_t *, struct uio *,
146 kauth_cred_t, int);
147 static int tap_fops_write(file_t *, off_t *, struct uio *,
148 kauth_cred_t, int);
149 static int tap_fops_ioctl(file_t *, u_long, void *);
150 static int tap_fops_poll(file_t *, int);
151 static int tap_fops_stat(file_t *, struct stat *);
152 static int tap_fops_kqfilter(file_t *, struct knote *);
153
154 static const struct fileops tap_fileops = {
155 .fo_read = tap_fops_read,
156 .fo_write = tap_fops_write,
157 .fo_ioctl = tap_fops_ioctl,
158 .fo_fcntl = fnullop_fcntl,
159 .fo_poll = tap_fops_poll,
160 .fo_stat = tap_fops_stat,
161 .fo_close = tap_fops_close,
162 .fo_kqfilter = tap_fops_kqfilter,
163 .fo_restart = fnullop_restart,
164 };
165
166 /* Helper for cloning open() */
167 static int tap_dev_cloner(struct lwp *);
168
169 /* Character device routines */
170 static int tap_cdev_open(dev_t, int, int, struct lwp *);
171 static int tap_cdev_close(dev_t, int, int, struct lwp *);
172 static int tap_cdev_read(dev_t, struct uio *, int);
173 static int tap_cdev_write(dev_t, struct uio *, int);
174 static int tap_cdev_ioctl(dev_t, u_long, void *, int, struct lwp *);
175 static int tap_cdev_poll(dev_t, int, struct lwp *);
176 static int tap_cdev_kqfilter(dev_t, struct knote *);
177
178 const struct cdevsw tap_cdevsw = {
179 .d_open = tap_cdev_open,
180 .d_close = tap_cdev_close,
181 .d_read = tap_cdev_read,
182 .d_write = tap_cdev_write,
183 .d_ioctl = tap_cdev_ioctl,
184 .d_stop = nostop,
185 .d_tty = notty,
186 .d_poll = tap_cdev_poll,
187 .d_mmap = nommap,
188 .d_kqfilter = tap_cdev_kqfilter,
189 .d_discard = nodiscard,
190 .d_flag = D_OTHER
191 };
192
193 #define TAP_CLONER 0xfffff /* Maximal minor value */
194
195 /* kqueue-related routines */
196 static void tap_kqdetach(struct knote *);
197 static int tap_kqread(struct knote *, long);
198
199 /*
200 * Those are needed by the if_media interface.
201 */
202
203 static int tap_mediachange(struct ifnet *);
204 static void tap_mediastatus(struct ifnet *, struct ifmediareq *);
205
206 /*
207 * Those are needed by the ifnet interface, and would typically be
208 * there for any network interface driver.
209 * Some other routines are optional: watchdog and drain.
210 */
211
212 static void tap_start(struct ifnet *);
213 static void tap_stop(struct ifnet *, int);
214 static int tap_init(struct ifnet *);
215 static int tap_ioctl(struct ifnet *, u_long, void *);
216
217 /* Internal functions */
218 #if defined(COMPAT_40) || defined(MODULAR)
219 static int tap_lifaddr(struct ifnet *, u_long, struct ifaliasreq *);
220 #endif
221 static void tap_softintr(void *);
222
223 /*
224 * tap is a clonable interface, although it is highly unrealistic for
225 * an Ethernet device.
226 *
227 * Here are the bits needed for a clonable interface.
228 */
229 static int tap_clone_create(struct if_clone *, int);
230 static int tap_clone_destroy(struct ifnet *);
231
232 struct if_clone tap_cloners = IF_CLONE_INITIALIZER("tap",
233 tap_clone_create,
234 tap_clone_destroy);
235
236 /* Helper functionis shared by the two cloning code paths */
237 static struct tap_softc * tap_clone_creator(int);
238 int tap_clone_destroyer(device_t);
239
240 static u_int tap_count;
241
242 void
243 tapattach(int n)
244 {
245
246 /*
247 * Nothing to do here, initialization is handled by the
248 * module initialization code in tapinit() below).
249 */
250 }
251
252 static void
253 tapinit(void)
254 {
255 if_clone_attach(&tap_cloners);
256 }
257
258 static int
259 tapdetach(void)
260 {
261 int error = 0;
262
263 if (tap_count != 0)
264 error = EBUSY;
265
266 if (error == 0)
267 if_clone_detach(&tap_cloners);
268
269 return error;
270 }
271
272 /* Pretty much useless for a pseudo-device */
273 static int
274 tap_match(device_t parent, cfdata_t cfdata, void *arg)
275 {
276
277 return (1);
278 }
279
280 void
281 tap_attach(device_t parent, device_t self, void *aux)
282 {
283 struct tap_softc *sc = device_private(self);
284 struct ifnet *ifp;
285 #if defined(COMPAT_40) || defined(MODULAR)
286 const struct sysctlnode *node;
287 int error;
288 #endif
289 uint8_t enaddr[ETHER_ADDR_LEN] =
290 { 0xf2, 0x0b, 0xa4, 0xff, 0xff, 0xff };
291 char enaddrstr[3 * ETHER_ADDR_LEN];
292
293 sc->sc_dev = self;
294 sc->sc_sih = NULL;
295 getnanotime(&sc->sc_btime);
296 sc->sc_atime = sc->sc_mtime = sc->sc_btime;
297 sc->sc_flags = 0;
298 selinit(&sc->sc_rsel);
299
300 /*
301 * Initialize the two locks for the device.
302 *
303 * We need a lock here because even though the tap device can be
304 * opened only once, the file descriptor might be passed to another
305 * process, say a fork(2)ed child.
306 *
307 * The Giant saves us from most of the hassle, but since the read
308 * operation can sleep, we don't want two processes to wake up at
309 * the same moment and both try and dequeue a single packet.
310 *
311 * The queue for event listeners (used by kqueue(9), see below) has
312 * to be protected too, so use a spin lock.
313 */
314 mutex_init(&sc->sc_rdlock, MUTEX_DEFAULT, IPL_NONE);
315 mutex_init(&sc->sc_kqlock, MUTEX_DEFAULT, IPL_VM);
316
317 if (!pmf_device_register(self, NULL, NULL))
318 aprint_error_dev(self, "couldn't establish power handler\n");
319
320 /*
321 * In order to obtain unique initial Ethernet address on a host,
322 * do some randomisation. It's not meant for anything but avoiding
323 * hard-coding an address.
324 */
325 cprng_fast(&enaddr[3], 3);
326
327 aprint_verbose_dev(self, "Ethernet address %s\n",
328 ether_snprintf(enaddrstr, sizeof(enaddrstr), enaddr));
329
330 /*
331 * Why 1000baseT? Why not? You can add more.
332 *
333 * Note that there are 3 steps: init, one or several additions to
334 * list of supported media, and in the end, the selection of one
335 * of them.
336 */
337 ifmedia_init(&sc->sc_im, 0, tap_mediachange, tap_mediastatus);
338 ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_1000_T, 0, NULL);
339 ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_1000_T|IFM_FDX, 0, NULL);
340 ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_100_TX, 0, NULL);
341 ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL);
342 ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_10_T, 0, NULL);
343 ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
344 ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_AUTO, 0, NULL);
345 ifmedia_set(&sc->sc_im, IFM_ETHER|IFM_AUTO);
346
347 /*
348 * One should note that an interface must do multicast in order
349 * to support IPv6.
350 */
351 ifp = &sc->sc_ec.ec_if;
352 strcpy(ifp->if_xname, device_xname(self));
353 ifp->if_softc = sc;
354 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
355 ifp->if_ioctl = tap_ioctl;
356 ifp->if_start = tap_start;
357 ifp->if_stop = tap_stop;
358 ifp->if_init = tap_init;
359 IFQ_SET_READY(&ifp->if_snd);
360
361 sc->sc_ec.ec_capabilities = ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU;
362
363 /* Those steps are mandatory for an Ethernet driver. */
364 if_initialize(ifp);
365 ether_ifattach(ifp, enaddr);
366 if_register(ifp);
367
368 #if defined(COMPAT_40) || defined(MODULAR)
369 /*
370 * Add a sysctl node for that interface.
371 *
372 * The pointer transmitted is not a string, but instead a pointer to
373 * the softc structure, which we can use to build the string value on
374 * the fly in the helper function of the node. See the comments for
375 * tap_sysctl_handler for details.
376 *
377 * Usually sysctl_createv is called with CTL_CREATE as the before-last
378 * component. However, we can allocate a number ourselves, as we are
379 * the only consumer of the net.link.<iface> node. In this case, the
380 * unit number is conveniently used to number the node. CTL_CREATE
381 * would just work, too.
382 */
383 if ((error = sysctl_createv(NULL, 0, NULL,
384 &node, CTLFLAG_READWRITE,
385 CTLTYPE_STRING, device_xname(self), NULL,
386 tap_sysctl_handler, 0, (void *)sc, 18,
387 CTL_NET, AF_LINK, tap_node, device_unit(sc->sc_dev),
388 CTL_EOL)) != 0)
389 aprint_error_dev(self, "sysctl_createv returned %d, ignoring\n",
390 error);
391 #endif
392 }
393
394 /*
395 * When detaching, we do the inverse of what is done in the attach
396 * routine, in reversed order.
397 */
398 static int
399 tap_detach(device_t self, int flags)
400 {
401 struct tap_softc *sc = device_private(self);
402 struct ifnet *ifp = &sc->sc_ec.ec_if;
403 #if defined(COMPAT_40) || defined(MODULAR)
404 int error;
405 #endif
406 int s;
407
408 sc->sc_flags |= TAP_GOING;
409 s = splnet();
410 tap_stop(ifp, 1);
411 if_down(ifp);
412 splx(s);
413
414 if (sc->sc_sih != NULL) {
415 softint_disestablish(sc->sc_sih);
416 sc->sc_sih = NULL;
417 }
418
419 #if defined(COMPAT_40) || defined(MODULAR)
420 /*
421 * Destroying a single leaf is a very straightforward operation using
422 * sysctl_destroyv. One should be sure to always end the path with
423 * CTL_EOL.
424 */
425 if ((error = sysctl_destroyv(NULL, CTL_NET, AF_LINK, tap_node,
426 device_unit(sc->sc_dev), CTL_EOL)) != 0)
427 aprint_error_dev(self,
428 "sysctl_destroyv returned %d, ignoring\n", error);
429 #endif
430 ether_ifdetach(ifp);
431 if_detach(ifp);
432 ifmedia_delete_instance(&sc->sc_im, IFM_INST_ANY);
433 seldestroy(&sc->sc_rsel);
434 mutex_destroy(&sc->sc_rdlock);
435 mutex_destroy(&sc->sc_kqlock);
436
437 pmf_device_deregister(self);
438
439 return (0);
440 }
441
442 /*
443 * This function is called by the ifmedia layer to notify the driver
444 * that the user requested a media change. A real driver would
445 * reconfigure the hardware.
446 */
447 static int
448 tap_mediachange(struct ifnet *ifp)
449 {
450 return (0);
451 }
452
453 /*
454 * Here the user asks for the currently used media.
455 */
456 static void
457 tap_mediastatus(struct ifnet *ifp, struct ifmediareq *imr)
458 {
459 struct tap_softc *sc = (struct tap_softc *)ifp->if_softc;
460 imr->ifm_active = sc->sc_im.ifm_cur->ifm_media;
461 }
462
463 /*
464 * This is the function where we SEND packets.
465 *
466 * There is no 'receive' equivalent. A typical driver will get
467 * interrupts from the hardware, and from there will inject new packets
468 * into the network stack.
469 *
470 * Once handled, a packet must be freed. A real driver might not be able
471 * to fit all the pending packets into the hardware, and is allowed to
472 * return before having sent all the packets. It should then use the
473 * if_flags flag IFF_OACTIVE to notify the upper layer.
474 *
475 * There are also other flags one should check, such as IFF_PAUSE.
476 *
477 * It is our duty to make packets available to BPF listeners.
478 *
479 * You should be aware that this function is called by the Ethernet layer
480 * at splnet().
481 *
482 * When the device is opened, we have to pass the packet(s) to the
483 * userland. For that we stay in OACTIVE mode while the userland gets
484 * the packets, and we send a signal to the processes waiting to read.
485 *
486 * wakeup(sc) is the counterpart to the tsleep call in
487 * tap_dev_read, while selnotify() is used for kevent(2) and
488 * poll(2) (which includes select(2)) listeners.
489 */
490 static void
491 tap_start(struct ifnet *ifp)
492 {
493 struct tap_softc *sc = (struct tap_softc *)ifp->if_softc;
494 struct mbuf *m0;
495
496 if ((sc->sc_flags & TAP_INUSE) == 0) {
497 /* Simply drop packets */
498 for(;;) {
499 IFQ_DEQUEUE(&ifp->if_snd, m0);
500 if (m0 == NULL)
501 return;
502
503 ifp->if_opackets++;
504 bpf_mtap(ifp, m0);
505
506 m_freem(m0);
507 }
508 } else if (!IFQ_IS_EMPTY(&ifp->if_snd)) {
509 ifp->if_flags |= IFF_OACTIVE;
510 wakeup(sc);
511 selnotify(&sc->sc_rsel, 0, 1);
512 if (sc->sc_flags & TAP_ASYNCIO)
513 softint_schedule(sc->sc_sih);
514 }
515 }
516
517 static void
518 tap_softintr(void *cookie)
519 {
520 struct tap_softc *sc;
521 struct ifnet *ifp;
522 int a, b;
523
524 sc = cookie;
525
526 if (sc->sc_flags & TAP_ASYNCIO) {
527 ifp = &sc->sc_ec.ec_if;
528 if (ifp->if_flags & IFF_RUNNING) {
529 a = POLL_IN;
530 b = POLLIN|POLLRDNORM;
531 } else {
532 a = POLL_HUP;
533 b = 0;
534 }
535 fownsignal(sc->sc_pgid, SIGIO, a, b, NULL);
536 }
537 }
538
539 /*
540 * A typical driver will only contain the following handlers for
541 * ioctl calls, except SIOCSIFPHYADDR.
542 * The latter is a hack I used to set the Ethernet address of the
543 * faked device.
544 *
545 * Note that both ifmedia_ioctl() and ether_ioctl() have to be
546 * called under splnet().
547 */
548 static int
549 tap_ioctl(struct ifnet *ifp, u_long cmd, void *data)
550 {
551 struct tap_softc *sc = (struct tap_softc *)ifp->if_softc;
552 struct ifreq *ifr = (struct ifreq *)data;
553 int s, error;
554
555 s = splnet();
556
557 switch (cmd) {
558 #ifdef OSIOCSIFMEDIA
559 case OSIOCSIFMEDIA:
560 #endif
561 case SIOCSIFMEDIA:
562 case SIOCGIFMEDIA:
563 error = ifmedia_ioctl(ifp, ifr, &sc->sc_im, cmd);
564 break;
565 #if defined(COMPAT_40) || defined(MODULAR)
566 case SIOCSIFPHYADDR:
567 error = tap_lifaddr(ifp, cmd, (struct ifaliasreq *)data);
568 break;
569 #endif
570 default:
571 error = ether_ioctl(ifp, cmd, data);
572 if (error == ENETRESET)
573 error = 0;
574 break;
575 }
576
577 splx(s);
578
579 return (error);
580 }
581
582 #if defined(COMPAT_40) || defined(MODULAR)
583 /*
584 * Helper function to set Ethernet address. This has been replaced by
585 * the generic SIOCALIFADDR ioctl on a PF_LINK socket.
586 */
587 static int
588 tap_lifaddr(struct ifnet *ifp, u_long cmd, struct ifaliasreq *ifra)
589 {
590 const struct sockaddr *sa = &ifra->ifra_addr;
591
592 if (sa->sa_family != AF_LINK)
593 return (EINVAL);
594
595 if_set_sadl(ifp, sa->sa_data, ETHER_ADDR_LEN, false);
596
597 return (0);
598 }
599 #endif
600
601 /*
602 * _init() would typically be called when an interface goes up,
603 * meaning it should configure itself into the state in which it
604 * can send packets.
605 */
606 static int
607 tap_init(struct ifnet *ifp)
608 {
609 ifp->if_flags |= IFF_RUNNING;
610
611 tap_start(ifp);
612
613 return (0);
614 }
615
616 /*
617 * _stop() is called when an interface goes down. It is our
618 * responsability to validate that state by clearing the
619 * IFF_RUNNING flag.
620 *
621 * We have to wake up all the sleeping processes to have the pending
622 * read requests cancelled.
623 */
624 static void
625 tap_stop(struct ifnet *ifp, int disable)
626 {
627 struct tap_softc *sc = (struct tap_softc *)ifp->if_softc;
628
629 ifp->if_flags &= ~IFF_RUNNING;
630 wakeup(sc);
631 selnotify(&sc->sc_rsel, 0, 1);
632 if (sc->sc_flags & TAP_ASYNCIO)
633 softint_schedule(sc->sc_sih);
634 }
635
636 /*
637 * The 'create' command of ifconfig can be used to create
638 * any numbered instance of a given device. Thus we have to
639 * make sure we have enough room in cd_devs to create the
640 * user-specified instance. config_attach_pseudo will do this
641 * for us.
642 */
643 static int
644 tap_clone_create(struct if_clone *ifc, int unit)
645 {
646 if (tap_clone_creator(unit) == NULL) {
647 aprint_error("%s%d: unable to attach an instance\n",
648 tap_cd.cd_name, unit);
649 return (ENXIO);
650 }
651 atomic_inc_uint(&tap_count);
652 return (0);
653 }
654
655 /*
656 * tap(4) can be cloned by two ways:
657 * using 'ifconfig tap0 create', which will use the network
658 * interface cloning API, and call tap_clone_create above.
659 * opening the cloning device node, whose minor number is TAP_CLONER.
660 * See below for an explanation on how this part work.
661 */
662 static struct tap_softc *
663 tap_clone_creator(int unit)
664 {
665 struct cfdata *cf;
666
667 cf = malloc(sizeof(*cf), M_DEVBUF, M_WAITOK);
668 cf->cf_name = tap_cd.cd_name;
669 cf->cf_atname = tap_ca.ca_name;
670 if (unit == -1) {
671 /* let autoconf find the first free one */
672 cf->cf_unit = 0;
673 cf->cf_fstate = FSTATE_STAR;
674 } else {
675 cf->cf_unit = unit;
676 cf->cf_fstate = FSTATE_NOTFOUND;
677 }
678
679 return device_private(config_attach_pseudo(cf));
680 }
681
682 /*
683 * The clean design of if_clone and autoconf(9) makes that part
684 * really straightforward. The second argument of config_detach
685 * means neither QUIET nor FORCED.
686 */
687 static int
688 tap_clone_destroy(struct ifnet *ifp)
689 {
690 struct tap_softc *sc = ifp->if_softc;
691 int error = tap_clone_destroyer(sc->sc_dev);
692
693 if (error == 0)
694 atomic_inc_uint(&tap_count);
695 return error;
696 }
697
698 int
699 tap_clone_destroyer(device_t dev)
700 {
701 cfdata_t cf = device_cfdata(dev);
702 int error;
703
704 if ((error = config_detach(dev, 0)) != 0)
705 aprint_error_dev(dev, "unable to detach instance\n");
706 free(cf, M_DEVBUF);
707
708 return (error);
709 }
710
711 /*
712 * tap(4) is a bit of an hybrid device. It can be used in two different
713 * ways:
714 * 1. ifconfig tapN create, then use /dev/tapN to read/write off it.
715 * 2. open /dev/tap, get a new interface created and read/write off it.
716 * That interface is destroyed when the process that had it created exits.
717 *
718 * The first way is managed by the cdevsw structure, and you access interfaces
719 * through a (major, minor) mapping: tap4 is obtained by the minor number
720 * 4. The entry points for the cdevsw interface are prefixed by tap_cdev_.
721 *
722 * The second way is the so-called "cloning" device. It's a special minor
723 * number (chosen as the maximal number, to allow as much tap devices as
724 * possible). The user first opens the cloner (e.g., /dev/tap), and that
725 * call ends in tap_cdev_open. The actual place where it is handled is
726 * tap_dev_cloner.
727 *
728 * An tap device cannot be opened more than once at a time, so the cdevsw
729 * part of open() does nothing but noting that the interface is being used and
730 * hence ready to actually handle packets.
731 */
732
733 static int
734 tap_cdev_open(dev_t dev, int flags, int fmt, struct lwp *l)
735 {
736 struct tap_softc *sc;
737
738 if (minor(dev) == TAP_CLONER)
739 return tap_dev_cloner(l);
740
741 sc = device_lookup_private(&tap_cd, minor(dev));
742 if (sc == NULL)
743 return (ENXIO);
744
745 /* The device can only be opened once */
746 if (sc->sc_flags & TAP_INUSE)
747 return (EBUSY);
748 sc->sc_flags |= TAP_INUSE;
749 return (0);
750 }
751
752 /*
753 * There are several kinds of cloning devices, and the most simple is the one
754 * tap(4) uses. What it does is change the file descriptor with a new one,
755 * with its own fileops structure (which maps to the various read, write,
756 * ioctl functions). It starts allocating a new file descriptor with falloc,
757 * then actually creates the new tap devices.
758 *
759 * Once those two steps are successful, we can re-wire the existing file
760 * descriptor to its new self. This is done with fdclone(): it fills the fp
761 * structure as needed (notably f_devunit gets filled with the fifth parameter
762 * passed, the unit of the tap device which will allows us identifying the
763 * device later), and returns EMOVEFD.
764 *
765 * That magic value is interpreted by sys_open() which then replaces the
766 * current file descriptor by the new one (through a magic member of struct
767 * lwp, l_dupfd).
768 *
769 * The tap device is flagged as being busy since it otherwise could be
770 * externally accessed through the corresponding device node with the cdevsw
771 * interface.
772 */
773
774 static int
775 tap_dev_cloner(struct lwp *l)
776 {
777 struct tap_softc *sc;
778 file_t *fp;
779 int error, fd;
780
781 if ((error = fd_allocfile(&fp, &fd)) != 0)
782 return (error);
783
784 if ((sc = tap_clone_creator(-1)) == NULL) {
785 fd_abort(curproc, fp, fd);
786 return (ENXIO);
787 }
788
789 sc->sc_flags |= TAP_INUSE;
790
791 return fd_clone(fp, fd, FREAD|FWRITE, &tap_fileops,
792 (void *)(intptr_t)device_unit(sc->sc_dev));
793 }
794
795 /*
796 * While all other operations (read, write, ioctl, poll and kqfilter) are
797 * really the same whether we are in cdevsw or fileops mode, the close()
798 * function is slightly different in the two cases.
799 *
800 * As for the other, the core of it is shared in tap_dev_close. What
801 * it does is sufficient for the cdevsw interface, but the cloning interface
802 * needs another thing: the interface is destroyed when the processes that
803 * created it closes it.
804 */
805 static int
806 tap_cdev_close(dev_t dev, int flags, int fmt,
807 struct lwp *l)
808 {
809 struct tap_softc *sc =
810 device_lookup_private(&tap_cd, minor(dev));
811
812 if (sc == NULL)
813 return (ENXIO);
814
815 return tap_dev_close(sc);
816 }
817
818 /*
819 * It might happen that the administrator used ifconfig to externally destroy
820 * the interface. In that case, tap_fops_close will be called while
821 * tap_detach is already happening. If we called it again from here, we
822 * would dead lock. TAP_GOING ensures that this situation doesn't happen.
823 */
824 static int
825 tap_fops_close(file_t *fp)
826 {
827 int unit = fp->f_devunit;
828 struct tap_softc *sc;
829 int error;
830
831 sc = device_lookup_private(&tap_cd, unit);
832 if (sc == NULL)
833 return (ENXIO);
834
835 /* tap_dev_close currently always succeeds, but it might not
836 * always be the case. */
837 KERNEL_LOCK(1, NULL);
838 if ((error = tap_dev_close(sc)) != 0) {
839 KERNEL_UNLOCK_ONE(NULL);
840 return (error);
841 }
842
843 /* Destroy the device now that it is no longer useful,
844 * unless it's already being destroyed. */
845 if ((sc->sc_flags & TAP_GOING) != 0) {
846 KERNEL_UNLOCK_ONE(NULL);
847 return (0);
848 }
849
850 error = tap_clone_destroyer(sc->sc_dev);
851 KERNEL_UNLOCK_ONE(NULL);
852 return error;
853 }
854
855 static int
856 tap_dev_close(struct tap_softc *sc)
857 {
858 struct ifnet *ifp;
859 int s;
860
861 s = splnet();
862 /* Let tap_start handle packets again */
863 ifp = &sc->sc_ec.ec_if;
864 ifp->if_flags &= ~IFF_OACTIVE;
865
866 /* Purge output queue */
867 if (!(IFQ_IS_EMPTY(&ifp->if_snd))) {
868 struct mbuf *m;
869
870 for (;;) {
871 IFQ_DEQUEUE(&ifp->if_snd, m);
872 if (m == NULL)
873 break;
874
875 ifp->if_opackets++;
876 bpf_mtap(ifp, m);
877 m_freem(m);
878 }
879 }
880 splx(s);
881
882 if (sc->sc_sih != NULL) {
883 softint_disestablish(sc->sc_sih);
884 sc->sc_sih = NULL;
885 }
886 sc->sc_flags &= ~(TAP_INUSE | TAP_ASYNCIO);
887
888 return (0);
889 }
890
891 static int
892 tap_cdev_read(dev_t dev, struct uio *uio, int flags)
893 {
894 return tap_dev_read(minor(dev), uio, flags);
895 }
896
897 static int
898 tap_fops_read(file_t *fp, off_t *offp, struct uio *uio,
899 kauth_cred_t cred, int flags)
900 {
901 int error;
902
903 KERNEL_LOCK(1, NULL);
904 error = tap_dev_read(fp->f_devunit, uio, flags);
905 KERNEL_UNLOCK_ONE(NULL);
906 return error;
907 }
908
909 static int
910 tap_dev_read(int unit, struct uio *uio, int flags)
911 {
912 struct tap_softc *sc = device_lookup_private(&tap_cd, unit);
913 struct ifnet *ifp;
914 struct mbuf *m, *n;
915 int error = 0, s;
916
917 if (sc == NULL)
918 return (ENXIO);
919
920 getnanotime(&sc->sc_atime);
921
922 ifp = &sc->sc_ec.ec_if;
923 if ((ifp->if_flags & IFF_UP) == 0)
924 return (EHOSTDOWN);
925
926 /*
927 * In the TAP_NBIO case, we have to make sure we won't be sleeping
928 */
929 if ((sc->sc_flags & TAP_NBIO) != 0) {
930 if (!mutex_tryenter(&sc->sc_rdlock))
931 return (EWOULDBLOCK);
932 } else {
933 mutex_enter(&sc->sc_rdlock);
934 }
935
936 s = splnet();
937 if (IFQ_IS_EMPTY(&ifp->if_snd)) {
938 ifp->if_flags &= ~IFF_OACTIVE;
939 /*
940 * We must release the lock before sleeping, and re-acquire it
941 * after.
942 */
943 mutex_exit(&sc->sc_rdlock);
944 if (sc->sc_flags & TAP_NBIO)
945 error = EWOULDBLOCK;
946 else
947 error = tsleep(sc, PSOCK|PCATCH, "tap", 0);
948 splx(s);
949
950 if (error != 0)
951 return (error);
952 /* The device might have been downed */
953 if ((ifp->if_flags & IFF_UP) == 0)
954 return (EHOSTDOWN);
955 if ((sc->sc_flags & TAP_NBIO)) {
956 if (!mutex_tryenter(&sc->sc_rdlock))
957 return (EWOULDBLOCK);
958 } else {
959 mutex_enter(&sc->sc_rdlock);
960 }
961 s = splnet();
962 }
963
964 IFQ_DEQUEUE(&ifp->if_snd, m);
965 ifp->if_flags &= ~IFF_OACTIVE;
966 splx(s);
967 if (m == NULL) {
968 error = 0;
969 goto out;
970 }
971
972 ifp->if_opackets++;
973 bpf_mtap(ifp, m);
974
975 /*
976 * One read is one packet.
977 */
978 do {
979 error = uiomove(mtod(m, void *),
980 min(m->m_len, uio->uio_resid), uio);
981 MFREE(m, n);
982 m = n;
983 } while (m != NULL && uio->uio_resid > 0 && error == 0);
984
985 if (m != NULL)
986 m_freem(m);
987
988 out:
989 mutex_exit(&sc->sc_rdlock);
990 return (error);
991 }
992
993 static int
994 tap_fops_stat(file_t *fp, struct stat *st)
995 {
996 int error = 0;
997 struct tap_softc *sc;
998 int unit = fp->f_devunit;
999
1000 (void)memset(st, 0, sizeof(*st));
1001
1002 KERNEL_LOCK(1, NULL);
1003 sc = device_lookup_private(&tap_cd, unit);
1004 if (sc == NULL) {
1005 error = ENXIO;
1006 goto out;
1007 }
1008
1009 st->st_dev = makedev(cdevsw_lookup_major(&tap_cdevsw), unit);
1010 st->st_atimespec = sc->sc_atime;
1011 st->st_mtimespec = sc->sc_mtime;
1012 st->st_ctimespec = st->st_birthtimespec = sc->sc_btime;
1013 st->st_uid = kauth_cred_geteuid(fp->f_cred);
1014 st->st_gid = kauth_cred_getegid(fp->f_cred);
1015 out:
1016 KERNEL_UNLOCK_ONE(NULL);
1017 return error;
1018 }
1019
1020 static int
1021 tap_cdev_write(dev_t dev, struct uio *uio, int flags)
1022 {
1023 return tap_dev_write(minor(dev), uio, flags);
1024 }
1025
1026 static int
1027 tap_fops_write(file_t *fp, off_t *offp, struct uio *uio,
1028 kauth_cred_t cred, int flags)
1029 {
1030 int error;
1031
1032 KERNEL_LOCK(1, NULL);
1033 error = tap_dev_write(fp->f_devunit, uio, flags);
1034 KERNEL_UNLOCK_ONE(NULL);
1035 return error;
1036 }
1037
1038 static int
1039 tap_dev_write(int unit, struct uio *uio, int flags)
1040 {
1041 struct tap_softc *sc =
1042 device_lookup_private(&tap_cd, unit);
1043 struct ifnet *ifp;
1044 struct mbuf *m, **mp;
1045 int error = 0;
1046 int s;
1047
1048 if (sc == NULL)
1049 return (ENXIO);
1050
1051 getnanotime(&sc->sc_mtime);
1052 ifp = &sc->sc_ec.ec_if;
1053
1054 /* One write, one packet, that's the rule */
1055 MGETHDR(m, M_DONTWAIT, MT_DATA);
1056 if (m == NULL) {
1057 ifp->if_ierrors++;
1058 return (ENOBUFS);
1059 }
1060 m->m_pkthdr.len = uio->uio_resid;
1061
1062 mp = &m;
1063 while (error == 0 && uio->uio_resid > 0) {
1064 if (*mp != m) {
1065 MGET(*mp, M_DONTWAIT, MT_DATA);
1066 if (*mp == NULL) {
1067 error = ENOBUFS;
1068 break;
1069 }
1070 }
1071 (*mp)->m_len = min(MHLEN, uio->uio_resid);
1072 error = uiomove(mtod(*mp, void *), (*mp)->m_len, uio);
1073 mp = &(*mp)->m_next;
1074 }
1075 if (error) {
1076 ifp->if_ierrors++;
1077 m_freem(m);
1078 return (error);
1079 }
1080
1081 ifp->if_ipackets++;
1082 m_set_rcvif(m, ifp);
1083
1084 bpf_mtap(ifp, m);
1085 s = splnet();
1086 if_input(ifp, m);
1087 splx(s);
1088
1089 return (0);
1090 }
1091
1092 static int
1093 tap_cdev_ioctl(dev_t dev, u_long cmd, void *data, int flags,
1094 struct lwp *l)
1095 {
1096 return tap_dev_ioctl(minor(dev), cmd, data, l);
1097 }
1098
1099 static int
1100 tap_fops_ioctl(file_t *fp, u_long cmd, void *data)
1101 {
1102 return tap_dev_ioctl(fp->f_devunit, cmd, data, curlwp);
1103 }
1104
1105 static int
1106 tap_dev_ioctl(int unit, u_long cmd, void *data, struct lwp *l)
1107 {
1108 struct tap_softc *sc = device_lookup_private(&tap_cd, unit);
1109
1110 if (sc == NULL)
1111 return ENXIO;
1112
1113 switch (cmd) {
1114 case FIONREAD:
1115 {
1116 struct ifnet *ifp = &sc->sc_ec.ec_if;
1117 struct mbuf *m;
1118 int s;
1119
1120 s = splnet();
1121 IFQ_POLL(&ifp->if_snd, m);
1122
1123 if (m == NULL)
1124 *(int *)data = 0;
1125 else
1126 *(int *)data = m->m_pkthdr.len;
1127 splx(s);
1128 return 0;
1129 }
1130 case TIOCSPGRP:
1131 case FIOSETOWN:
1132 return fsetown(&sc->sc_pgid, cmd, data);
1133 case TIOCGPGRP:
1134 case FIOGETOWN:
1135 return fgetown(sc->sc_pgid, cmd, data);
1136 case FIOASYNC:
1137 if (*(int *)data) {
1138 if (sc->sc_sih == NULL) {
1139 sc->sc_sih = softint_establish(SOFTINT_CLOCK,
1140 tap_softintr, sc);
1141 if (sc->sc_sih == NULL)
1142 return EBUSY; /* XXX */
1143 }
1144 sc->sc_flags |= TAP_ASYNCIO;
1145 } else {
1146 sc->sc_flags &= ~TAP_ASYNCIO;
1147 if (sc->sc_sih != NULL) {
1148 softint_disestablish(sc->sc_sih);
1149 sc->sc_sih = NULL;
1150 }
1151 }
1152 return 0;
1153 case FIONBIO:
1154 if (*(int *)data)
1155 sc->sc_flags |= TAP_NBIO;
1156 else
1157 sc->sc_flags &= ~TAP_NBIO;
1158 return 0;
1159 #ifdef OTAPGIFNAME
1160 case OTAPGIFNAME:
1161 #endif
1162 case TAPGIFNAME:
1163 {
1164 struct ifreq *ifr = (struct ifreq *)data;
1165 struct ifnet *ifp = &sc->sc_ec.ec_if;
1166
1167 strlcpy(ifr->ifr_name, ifp->if_xname, IFNAMSIZ);
1168 return 0;
1169 }
1170 default:
1171 return ENOTTY;
1172 }
1173 }
1174
1175 static int
1176 tap_cdev_poll(dev_t dev, int events, struct lwp *l)
1177 {
1178 return tap_dev_poll(minor(dev), events, l);
1179 }
1180
1181 static int
1182 tap_fops_poll(file_t *fp, int events)
1183 {
1184 return tap_dev_poll(fp->f_devunit, events, curlwp);
1185 }
1186
1187 static int
1188 tap_dev_poll(int unit, int events, struct lwp *l)
1189 {
1190 struct tap_softc *sc =
1191 device_lookup_private(&tap_cd, unit);
1192 int revents = 0;
1193
1194 if (sc == NULL)
1195 return POLLERR;
1196
1197 if (events & (POLLIN|POLLRDNORM)) {
1198 struct ifnet *ifp = &sc->sc_ec.ec_if;
1199 struct mbuf *m;
1200 int s;
1201
1202 s = splnet();
1203 IFQ_POLL(&ifp->if_snd, m);
1204
1205 if (m != NULL)
1206 revents |= events & (POLLIN|POLLRDNORM);
1207 else {
1208 mutex_spin_enter(&sc->sc_kqlock);
1209 selrecord(l, &sc->sc_rsel);
1210 mutex_spin_exit(&sc->sc_kqlock);
1211 }
1212 splx(s);
1213 }
1214 revents |= events & (POLLOUT|POLLWRNORM);
1215
1216 return (revents);
1217 }
1218
1219 static struct filterops tap_read_filterops = { 1, NULL, tap_kqdetach,
1220 tap_kqread };
1221 static struct filterops tap_seltrue_filterops = { 1, NULL, tap_kqdetach,
1222 filt_seltrue };
1223
1224 static int
1225 tap_cdev_kqfilter(dev_t dev, struct knote *kn)
1226 {
1227 return tap_dev_kqfilter(minor(dev), kn);
1228 }
1229
1230 static int
1231 tap_fops_kqfilter(file_t *fp, struct knote *kn)
1232 {
1233 return tap_dev_kqfilter(fp->f_devunit, kn);
1234 }
1235
1236 static int
1237 tap_dev_kqfilter(int unit, struct knote *kn)
1238 {
1239 struct tap_softc *sc =
1240 device_lookup_private(&tap_cd, unit);
1241
1242 if (sc == NULL)
1243 return (ENXIO);
1244
1245 KERNEL_LOCK(1, NULL);
1246 switch(kn->kn_filter) {
1247 case EVFILT_READ:
1248 kn->kn_fop = &tap_read_filterops;
1249 break;
1250 case EVFILT_WRITE:
1251 kn->kn_fop = &tap_seltrue_filterops;
1252 break;
1253 default:
1254 KERNEL_UNLOCK_ONE(NULL);
1255 return (EINVAL);
1256 }
1257
1258 kn->kn_hook = sc;
1259 mutex_spin_enter(&sc->sc_kqlock);
1260 SLIST_INSERT_HEAD(&sc->sc_rsel.sel_klist, kn, kn_selnext);
1261 mutex_spin_exit(&sc->sc_kqlock);
1262 KERNEL_UNLOCK_ONE(NULL);
1263 return (0);
1264 }
1265
1266 static void
1267 tap_kqdetach(struct knote *kn)
1268 {
1269 struct tap_softc *sc = (struct tap_softc *)kn->kn_hook;
1270
1271 KERNEL_LOCK(1, NULL);
1272 mutex_spin_enter(&sc->sc_kqlock);
1273 SLIST_REMOVE(&sc->sc_rsel.sel_klist, kn, knote, kn_selnext);
1274 mutex_spin_exit(&sc->sc_kqlock);
1275 KERNEL_UNLOCK_ONE(NULL);
1276 }
1277
1278 static int
1279 tap_kqread(struct knote *kn, long hint)
1280 {
1281 struct tap_softc *sc = (struct tap_softc *)kn->kn_hook;
1282 struct ifnet *ifp = &sc->sc_ec.ec_if;
1283 struct mbuf *m;
1284 int s, rv;
1285
1286 KERNEL_LOCK(1, NULL);
1287 s = splnet();
1288 IFQ_POLL(&ifp->if_snd, m);
1289
1290 if (m == NULL)
1291 kn->kn_data = 0;
1292 else
1293 kn->kn_data = m->m_pkthdr.len;
1294 splx(s);
1295 rv = (kn->kn_data != 0 ? 1 : 0);
1296 KERNEL_UNLOCK_ONE(NULL);
1297 return rv;
1298 }
1299
1300 #if defined(COMPAT_40) || defined(MODULAR)
1301 /*
1302 * sysctl management routines
1303 * You can set the address of an interface through:
1304 * net.link.tap.tap<number>
1305 *
1306 * Note the consistent use of tap_log in order to use
1307 * sysctl_teardown at unload time.
1308 *
1309 * In the kernel you will find a lot of SYSCTL_SETUP blocks. Those
1310 * blocks register a function in a special section of the kernel
1311 * (called a link set) which is used at init_sysctl() time to cycle
1312 * through all those functions to create the kernel's sysctl tree.
1313 *
1314 * It is not possible to use link sets in a module, so the
1315 * easiest is to simply call our own setup routine at load time.
1316 *
1317 * In the SYSCTL_SETUP blocks you find in the kernel, nodes have the
1318 * CTLFLAG_PERMANENT flag, meaning they cannot be removed. Once the
1319 * whole kernel sysctl tree is built, it is not possible to add any
1320 * permanent node.
1321 *
1322 * It should be noted that we're not saving the sysctlnode pointer
1323 * we are returned when creating the "tap" node. That structure
1324 * cannot be trusted once out of the calling function, as it might
1325 * get reused. So we just save the MIB number, and always give the
1326 * full path starting from the root for later calls to sysctl_createv
1327 * and sysctl_destroyv.
1328 */
1329 SYSCTL_SETUP(sysctl_tap_setup, "sysctl net.link.tap subtree setup")
1330 {
1331 const struct sysctlnode *node;
1332 int error = 0;
1333
1334 if ((error = sysctl_createv(clog, 0, NULL, NULL,
1335 CTLFLAG_PERMANENT,
1336 CTLTYPE_NODE, "link", NULL,
1337 NULL, 0, NULL, 0,
1338 CTL_NET, AF_LINK, CTL_EOL)) != 0)
1339 return;
1340
1341 /*
1342 * The first four parameters of sysctl_createv are for management.
1343 *
1344 * The four that follows, here starting with a '0' for the flags,
1345 * describe the node.
1346 *
1347 * The next series of four set its value, through various possible
1348 * means.
1349 *
1350 * Last but not least, the path to the node is described. That path
1351 * is relative to the given root (third argument). Here we're
1352 * starting from the root.
1353 */
1354 if ((error = sysctl_createv(clog, 0, NULL, &node,
1355 CTLFLAG_PERMANENT,
1356 CTLTYPE_NODE, "tap", NULL,
1357 NULL, 0, NULL, 0,
1358 CTL_NET, AF_LINK, CTL_CREATE, CTL_EOL)) != 0)
1359 return;
1360 tap_node = node->sysctl_num;
1361 }
1362
1363 /*
1364 * The helper functions make Andrew Brown's interface really
1365 * shine. It makes possible to create value on the fly whether
1366 * the sysctl value is read or written.
1367 *
1368 * As shown as an example in the man page, the first step is to
1369 * create a copy of the node to have sysctl_lookup work on it.
1370 *
1371 * Here, we have more work to do than just a copy, since we have
1372 * to create the string. The first step is to collect the actual
1373 * value of the node, which is a convenient pointer to the softc
1374 * of the interface. From there we create the string and use it
1375 * as the value, but only for the *copy* of the node.
1376 *
1377 * Then we let sysctl_lookup do the magic, which consists in
1378 * setting oldp and newp as required by the operation. When the
1379 * value is read, that means that the string will be copied to
1380 * the user, and when it is written, the new value will be copied
1381 * over in the addr array.
1382 *
1383 * If newp is NULL, the user was reading the value, so we don't
1384 * have anything else to do. If a new value was written, we
1385 * have to check it.
1386 *
1387 * If it is incorrect, we can return an error and leave 'node' as
1388 * it is: since it is a copy of the actual node, the change will
1389 * be forgotten.
1390 *
1391 * Upon a correct input, we commit the change to the ifnet
1392 * structure of our interface.
1393 */
1394 static int
1395 tap_sysctl_handler(SYSCTLFN_ARGS)
1396 {
1397 struct sysctlnode node;
1398 struct tap_softc *sc;
1399 struct ifnet *ifp;
1400 int error;
1401 size_t len;
1402 char addr[3 * ETHER_ADDR_LEN];
1403 uint8_t enaddr[ETHER_ADDR_LEN];
1404
1405 node = *rnode;
1406 sc = node.sysctl_data;
1407 ifp = &sc->sc_ec.ec_if;
1408 (void)ether_snprintf(addr, sizeof(addr), CLLADDR(ifp->if_sadl));
1409 node.sysctl_data = addr;
1410 error = sysctl_lookup(SYSCTLFN_CALL(&node));
1411 if (error || newp == NULL)
1412 return (error);
1413
1414 len = strlen(addr);
1415 if (len < 11 || len > 17)
1416 return (EINVAL);
1417
1418 /* Commit change */
1419 if (ether_aton_r(enaddr, sizeof(enaddr), addr) != 0)
1420 return (EINVAL);
1421 if_set_sadl(ifp, enaddr, ETHER_ADDR_LEN, false);
1422 return (error);
1423 }
1424 #endif
1425
1426 /*
1427 * Module infrastructure
1428 */
1429 #include "if_module.h"
1430
1431 IF_MODULE(MODULE_CLASS_DRIVER, tap, "")
1432