if_shmem.c revision 1.49 1 /* $NetBSD: if_shmem.c,v 1.49 2013/04/28 10:53:21 pooka Exp $ */
2
3 /*
4 * Copyright (c) 2009, 2010 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by The Nokia Foundation.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
18 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: if_shmem.c,v 1.49 2013/04/28 10:53:21 pooka Exp $");
32
33 #include <sys/param.h>
34 #include <sys/atomic.h>
35 #include <sys/fcntl.h>
36 #include <sys/kmem.h>
37 #include <sys/kthread.h>
38 #include <sys/lock.h>
39 #include <sys/vmem.h>
40 #include <sys/cprng.h>
41
42 #include <net/bpf.h>
43 #include <net/if.h>
44 #include <net/if_dl.h>
45 #include <net/if_ether.h>
46
47 #include <netinet/in.h>
48 #include <netinet/in_var.h>
49
50 #include <rump/rump.h>
51 #include <rump/rumpuser.h>
52
53 #include "rump_private.h"
54 #include "rump_net_private.h"
55 #include "rumpcomp_user.h"
56
57 static int shmif_clone(struct if_clone *, int);
58 static int shmif_unclone(struct ifnet *);
59
60 struct if_clone shmif_cloner =
61 IF_CLONE_INITIALIZER("shmif", shmif_clone, shmif_unclone);
62
63 /*
64 * Do r/w prefault for backend pages when attaching the interface.
65 * At least logically thinking improves performance (although no
66 * mlocking is done, so they might go away).
67 */
68 #define PREFAULT_RW
69
70 /*
71 * A virtual ethernet interface which uses shared memory from a
72 * memory mapped file as the bus.
73 */
74
75 static int shmif_init(struct ifnet *);
76 static int shmif_ioctl(struct ifnet *, u_long, void *);
77 static void shmif_start(struct ifnet *);
78 static void shmif_stop(struct ifnet *, int);
79
80 #include "shmifvar.h"
81
82 struct shmif_sc {
83 struct ethercom sc_ec;
84 struct shmif_mem *sc_busmem;
85 int sc_memfd;
86 int sc_kq;
87 int sc_unit;
88
89 char *sc_backfile;
90 size_t sc_backfilelen;
91
92 uint64_t sc_devgen;
93 uint32_t sc_nextpacket;
94
95 kmutex_t sc_mtx;
96 kcondvar_t sc_cv;
97
98 struct lwp *sc_rcvl;
99 bool sc_dying;
100 };
101
102 static const uint32_t busversion = SHMIF_VERSION;
103
104 static void shmif_rcv(void *);
105
106 #define LOCK_UNLOCKED 0
107 #define LOCK_LOCKED 1
108 #define LOCK_COOLDOWN 1001
109
110 vmem_t *shmif_units;
111
112 /*
113 * This locking needs work and will misbehave severely if:
114 * 1) the backing memory has to be paged in
115 * 2) some lockholder exits while holding the lock
116 */
117 static void
118 shmif_lockbus(struct shmif_mem *busmem)
119 {
120 int i = 0;
121
122 while (__predict_false(atomic_cas_32(&busmem->shm_lock,
123 LOCK_UNLOCKED, LOCK_LOCKED) == LOCK_LOCKED)) {
124 if (__predict_false(++i > LOCK_COOLDOWN)) {
125 uint64_t sec, nsec;
126 int error;
127
128 sec = 0;
129 nsec = 1000*1000; /* 1ms */
130 rumpuser_nanosleep(&sec, &nsec, &error);
131 i = 0;
132 }
133 continue;
134 }
135 membar_enter();
136 }
137
138 static void
139 shmif_unlockbus(struct shmif_mem *busmem)
140 {
141 unsigned int old;
142
143 membar_exit();
144 old = atomic_swap_32(&busmem->shm_lock, LOCK_UNLOCKED);
145 KASSERT(old == LOCK_LOCKED);
146 }
147
148 static int
149 allocif(int unit, struct shmif_sc **scp)
150 {
151 uint8_t enaddr[ETHER_ADDR_LEN] = { 0xb2, 0xa0, 0x00, 0x00, 0x00, 0x00 };
152 struct shmif_sc *sc;
153 struct ifnet *ifp;
154 uint32_t randnum;
155 int error;
156
157 randnum = cprng_fast32();
158 memcpy(&enaddr[2], &randnum, sizeof(randnum));
159
160 sc = kmem_zalloc(sizeof(*sc), KM_SLEEP);
161 sc->sc_memfd = -1;
162 sc->sc_unit = unit;
163
164 ifp = &sc->sc_ec.ec_if;
165
166 sprintf(ifp->if_xname, "shmif%d", unit);
167 ifp->if_softc = sc;
168 ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST;
169 ifp->if_init = shmif_init;
170 ifp->if_ioctl = shmif_ioctl;
171 ifp->if_start = shmif_start;
172 ifp->if_stop = shmif_stop;
173 ifp->if_mtu = ETHERMTU;
174 ifp->if_dlt = DLT_EN10MB;
175
176 mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_NONE);
177 cv_init(&sc->sc_cv, "shmifcv");
178
179 if_attach(ifp);
180 ether_ifattach(ifp, enaddr);
181
182 aprint_verbose("shmif%d: Ethernet address %s\n",
183 unit, ether_sprintf(enaddr));
184
185 if (scp)
186 *scp = sc;
187
188 error = 0;
189 if (rump_threads) {
190 error = kthread_create(PRI_NONE,
191 KTHREAD_MPSAFE | KTHREAD_MUSTJOIN, NULL,
192 shmif_rcv, ifp, &sc->sc_rcvl, "shmif");
193 } else {
194 printf("WARNING: threads not enabled, shmif NOT working\n");
195 }
196
197 if (error) {
198 shmif_unclone(ifp);
199 }
200
201 return error;
202 }
203
204 static int
205 initbackend(struct shmif_sc *sc, int memfd)
206 {
207 volatile uint8_t v;
208 volatile uint8_t *p;
209 int error;
210
211 sc->sc_busmem = rumpuser_filemmap(memfd, 0, BUSMEM_SIZE,
212 RUMPUSER_FILEMMAP_TRUNCATE | RUMPUSER_FILEMMAP_SHARED
213 | RUMPUSER_FILEMMAP_READ | RUMPUSER_FILEMMAP_WRITE, &error);
214 if (error)
215 return error;
216
217 if (sc->sc_busmem->shm_magic
218 && sc->sc_busmem->shm_magic != SHMIF_MAGIC) {
219 printf("bus is not magical");
220 rumpuser_unmap(sc->sc_busmem, BUSMEM_SIZE);
221 return ENOEXEC;
222 }
223
224 /*
225 * Prefault in pages to minimize runtime penalty with buslock.
226 * Use 512 instead of PAGE_SIZE to make sure we catch cases where
227 * rump kernel PAGE_SIZE > host page size.
228 */
229 for (p = (uint8_t *)sc->sc_busmem;
230 p < (uint8_t *)sc->sc_busmem + BUSMEM_SIZE;
231 p += 512)
232 v = *p;
233
234 shmif_lockbus(sc->sc_busmem);
235 /* we're first? initialize bus */
236 if (sc->sc_busmem->shm_magic == 0) {
237 sc->sc_busmem->shm_magic = SHMIF_MAGIC;
238 sc->sc_busmem->shm_first = BUSMEM_DATASIZE;
239 }
240
241 sc->sc_nextpacket = sc->sc_busmem->shm_last;
242 sc->sc_devgen = sc->sc_busmem->shm_gen;
243
244 #ifdef PREFAULT_RW
245 for (p = (uint8_t *)sc->sc_busmem;
246 p < (uint8_t *)sc->sc_busmem + BUSMEM_SIZE;
247 p += PAGE_SIZE) {
248 v = *p;
249 *p = v;
250 }
251 #endif
252 shmif_unlockbus(sc->sc_busmem);
253
254 sc->sc_kq = rumpcomp_shmif_watchsetup(-1, memfd, &error);
255 if (sc->sc_kq == -1) {
256 rumpuser_unmap(sc->sc_busmem, BUSMEM_SIZE);
257 return error;
258 }
259
260 sc->sc_memfd = memfd;
261
262 return error;
263 }
264
265 static void
266 finibackend(struct shmif_sc *sc)
267 {
268
269 if (sc->sc_backfile == NULL)
270 return;
271
272 if (sc->sc_backfile) {
273 kmem_free(sc->sc_backfile, sc->sc_backfilelen);
274 sc->sc_backfile = NULL;
275 sc->sc_backfilelen = 0;
276 }
277
278 rumpuser_unmap(sc->sc_busmem, BUSMEM_SIZE);
279 rumpuser_close(sc->sc_memfd, NULL);
280 rumpuser_close(sc->sc_kq, NULL);
281
282 sc->sc_memfd = -1;
283 }
284
285 int
286 rump_shmif_create(const char *path, int *ifnum)
287 {
288 struct shmif_sc *sc;
289 vmem_addr_t t;
290 int unit, error;
291 int memfd = -1; /* XXXgcc */
292
293 if (path) {
294 memfd = rumpuser_open(path,
295 RUMPUSER_OPEN_RDWR | RUMPUSER_OPEN_CREATE, &error);
296 if (memfd == -1)
297 return error;
298 }
299
300 error = vmem_xalloc(shmif_units, 1, 0, 0, 0,
301 VMEM_ADDR_MIN, VMEM_ADDR_MAX, VM_INSTANTFIT | VM_SLEEP, &t);
302
303 if (error != 0) {
304 if (path)
305 rumpuser_close(memfd, NULL);
306 return error;
307 }
308
309 unit = t - 1;
310
311 if ((error = allocif(unit, &sc)) != 0) {
312 if (path)
313 rumpuser_close(memfd, NULL);
314 return error;
315 }
316
317 if (!path)
318 goto out;
319
320 error = initbackend(sc, memfd);
321 if (error) {
322 shmif_unclone(&sc->sc_ec.ec_if);
323 return error;
324 }
325
326 sc->sc_backfilelen = strlen(path)+1;
327 sc->sc_backfile = kmem_alloc(sc->sc_backfilelen, KM_SLEEP);
328 strcpy(sc->sc_backfile, path);
329
330 out:
331 if (ifnum)
332 *ifnum = unit;
333
334 return 0;
335 }
336
337 static int
338 shmif_clone(struct if_clone *ifc, int unit)
339 {
340 int rc;
341 vmem_addr_t unit2;
342
343 /*
344 * Ok, we know the unit number, but we must still reserve it.
345 * Otherwise the wildcard-side of things might get the same one.
346 * This is slightly offset-happy due to vmem. First, we offset
347 * the range of unit numbers by +1 since vmem cannot deal with
348 * ranges starting from 0. Talk about uuuh.
349 */
350 rc = vmem_xalloc(shmif_units, 1, 0, 0, 0, unit+1, unit+1,
351 VM_SLEEP | VM_INSTANTFIT, &unit2);
352 KASSERT(rc == 0 && unit2-1 == unit);
353
354 return allocif(unit, NULL);
355 }
356
357 static int
358 shmif_unclone(struct ifnet *ifp)
359 {
360 struct shmif_sc *sc = ifp->if_softc;
361
362 shmif_stop(ifp, 1);
363 if_down(ifp);
364 finibackend(sc);
365
366 mutex_enter(&sc->sc_mtx);
367 sc->sc_dying = true;
368 cv_broadcast(&sc->sc_cv);
369 mutex_exit(&sc->sc_mtx);
370
371 if (sc->sc_rcvl)
372 kthread_join(sc->sc_rcvl);
373 sc->sc_rcvl = NULL;
374
375 vmem_xfree(shmif_units, sc->sc_unit+1, 1);
376
377 ether_ifdetach(ifp);
378 if_detach(ifp);
379
380 cv_destroy(&sc->sc_cv);
381 mutex_destroy(&sc->sc_mtx);
382
383 kmem_free(sc, sizeof(*sc));
384
385 return 0;
386 }
387
388 static int
389 shmif_init(struct ifnet *ifp)
390 {
391 struct shmif_sc *sc = ifp->if_softc;
392 int error = 0;
393
394 if (sc->sc_memfd == -1)
395 return ENXIO;
396 KASSERT(sc->sc_busmem);
397
398 ifp->if_flags |= IFF_RUNNING;
399
400 mutex_enter(&sc->sc_mtx);
401 sc->sc_nextpacket = sc->sc_busmem->shm_last;
402 sc->sc_devgen = sc->sc_busmem->shm_gen;
403
404 cv_broadcast(&sc->sc_cv);
405 mutex_exit(&sc->sc_mtx);
406
407 return error;
408 }
409
410 static int
411 shmif_ioctl(struct ifnet *ifp, u_long cmd, void *data)
412 {
413 struct shmif_sc *sc = ifp->if_softc;
414 struct ifdrv *ifd;
415 char *path;
416 int s, rv, memfd;
417
418 s = splnet();
419 switch (cmd) {
420 case SIOCGLINKSTR:
421 ifd = data;
422
423 if (sc->sc_backfilelen == 0) {
424 rv = ENOENT;
425 break;
426 }
427
428 ifd->ifd_len = sc->sc_backfilelen;
429 if (ifd->ifd_cmd == IFLINKSTR_QUERYLEN) {
430 rv = 0;
431 break;
432 }
433
434 if (ifd->ifd_cmd != 0) {
435 rv = EINVAL;
436 break;
437 }
438
439 rv = copyoutstr(sc->sc_backfile, ifd->ifd_data,
440 MIN(sc->sc_backfilelen, ifd->ifd_len), NULL);
441 break;
442 case SIOCSLINKSTR:
443 if (ifp->if_flags & IFF_UP) {
444 rv = EBUSY;
445 break;
446 }
447
448 ifd = data;
449 if (ifd->ifd_cmd == IFLINKSTR_UNSET) {
450 finibackend(sc);
451 rv = 0;
452 break;
453 } else if (ifd->ifd_cmd != 0) {
454 rv = EINVAL;
455 break;
456 } else if (sc->sc_backfile) {
457 rv = EBUSY;
458 break;
459 }
460
461 if (ifd->ifd_len > MAXPATHLEN) {
462 rv = E2BIG;
463 break;
464 } else if (ifd->ifd_len < 1) {
465 rv = EINVAL;
466 break;
467 }
468
469 path = kmem_alloc(ifd->ifd_len, KM_SLEEP);
470 rv = copyinstr(ifd->ifd_data, path, ifd->ifd_len, NULL);
471 if (rv) {
472 kmem_free(path, ifd->ifd_len);
473 break;
474 }
475 memfd = rumpuser_open(path,
476 RUMPUSER_OPEN_RDWR | RUMPUSER_OPEN_CREATE, &rv);
477 if (memfd == -1) {
478 kmem_free(path, ifd->ifd_len);
479 break;
480 }
481 rv = initbackend(sc, memfd);
482 if (rv) {
483 kmem_free(path, ifd->ifd_len);
484 rumpuser_close(memfd, NULL);
485 break;
486 }
487 sc->sc_backfile = path;
488 sc->sc_backfilelen = ifd->ifd_len;
489
490 break;
491 default:
492 rv = ether_ioctl(ifp, cmd, data);
493 if (rv == ENETRESET)
494 rv = 0;
495 break;
496 }
497 splx(s);
498
499 return rv;
500 }
501
502 /* send everything in-context since it's just a matter of mem-to-mem copy */
503 static void
504 shmif_start(struct ifnet *ifp)
505 {
506 struct shmif_sc *sc = ifp->if_softc;
507 struct shmif_mem *busmem = sc->sc_busmem;
508 struct mbuf *m, *m0;
509 uint32_t dataoff;
510 uint32_t pktsize, pktwrote;
511 bool wrote = false;
512 bool wrap;
513 int error;
514
515 ifp->if_flags |= IFF_OACTIVE;
516
517 for (;;) {
518 struct shmif_pkthdr sp;
519 struct timeval tv;
520
521 IF_DEQUEUE(&ifp->if_snd, m0);
522 if (m0 == NULL) {
523 break;
524 }
525
526 pktsize = 0;
527 for (m = m0; m != NULL; m = m->m_next) {
528 pktsize += m->m_len;
529 }
530 KASSERT(pktsize <= ETHERMTU + ETHER_HDR_LEN);
531
532 getmicrouptime(&tv);
533 sp.sp_len = pktsize;
534 sp.sp_sec = tv.tv_sec;
535 sp.sp_usec = tv.tv_usec;
536
537 bpf_mtap(ifp, m0);
538
539 shmif_lockbus(busmem);
540 KASSERT(busmem->shm_magic == SHMIF_MAGIC);
541 busmem->shm_last = shmif_nextpktoff(busmem, busmem->shm_last);
542
543 wrap = false;
544 dataoff = shmif_buswrite(busmem,
545 busmem->shm_last, &sp, sizeof(sp), &wrap);
546 pktwrote = 0;
547 for (m = m0; m != NULL; m = m->m_next) {
548 pktwrote += m->m_len;
549 dataoff = shmif_buswrite(busmem, dataoff,
550 mtod(m, void *), m->m_len, &wrap);
551 }
552 KASSERT(pktwrote == pktsize);
553 if (wrap) {
554 busmem->shm_gen++;
555 DPRINTF(("bus generation now %" PRIu64 "\n",
556 busmem->shm_gen));
557 }
558 shmif_unlockbus(busmem);
559
560 m_freem(m0);
561 wrote = true;
562
563 DPRINTF(("shmif_start: send %d bytes at off %d\n",
564 pktsize, busmem->shm_last));
565 }
566
567 ifp->if_flags &= ~IFF_OACTIVE;
568
569 /* wakeup? */
570 if (wrote)
571 rumpuser_pwrite(sc->sc_memfd,
572 &busversion, sizeof(busversion), IFMEM_WAKEUP, &error);
573 }
574
575 static void
576 shmif_stop(struct ifnet *ifp, int disable)
577 {
578 struct shmif_sc *sc = ifp->if_softc;
579
580 ifp->if_flags &= ~IFF_RUNNING;
581 membar_producer();
582
583 /*
584 * wakeup thread. this will of course wake up all bus
585 * listeners, but that's life.
586 */
587 if (sc->sc_memfd != -1)
588 rumpuser_pwrite(sc->sc_memfd,
589 &busversion, sizeof(busversion), IFMEM_WAKEUP, NULL);
590 }
591
592
593 /*
594 * Check if we have been sleeping too long. Basically,
595 * our in-sc nextpkt must by first <= nextpkt <= last"+1".
596 * We use the fact that first is guaranteed to never overlap
597 * with the last frame in the ring.
598 */
599 static __inline bool
600 stillvalid_p(struct shmif_sc *sc)
601 {
602 struct shmif_mem *busmem = sc->sc_busmem;
603 unsigned gendiff = busmem->shm_gen - sc->sc_devgen;
604 uint32_t lastoff, devoff;
605
606 KASSERT(busmem->shm_first != busmem->shm_last);
607
608 /* normalize onto a 2x busmem chunk */
609 devoff = sc->sc_nextpacket;
610 lastoff = shmif_nextpktoff(busmem, busmem->shm_last);
611
612 /* trivial case */
613 if (gendiff > 1)
614 return false;
615 KASSERT(gendiff <= 1);
616
617 /* Normalize onto 2x busmem chunk */
618 if (busmem->shm_first >= lastoff) {
619 lastoff += BUSMEM_DATASIZE;
620 if (gendiff == 0)
621 devoff += BUSMEM_DATASIZE;
622 } else {
623 if (gendiff)
624 return false;
625 }
626
627 return devoff >= busmem->shm_first && devoff <= lastoff;
628 }
629
630 static void
631 shmif_rcv(void *arg)
632 {
633 struct ifnet *ifp = arg;
634 struct shmif_sc *sc = ifp->if_softc;
635 struct shmif_mem *busmem;
636 struct mbuf *m = NULL;
637 struct ether_header *eth;
638 uint32_t nextpkt;
639 bool wrap, passup;
640 int error;
641
642 reup:
643 mutex_enter(&sc->sc_mtx);
644 while ((ifp->if_flags & IFF_RUNNING) == 0 && !sc->sc_dying)
645 cv_wait(&sc->sc_cv, &sc->sc_mtx);
646 mutex_exit(&sc->sc_mtx);
647
648 busmem = sc->sc_busmem;
649
650 while (ifp->if_flags & IFF_RUNNING) {
651 struct shmif_pkthdr sp;
652
653 if (m == NULL) {
654 m = m_gethdr(M_WAIT, MT_DATA);
655 MCLGET(m, M_WAIT);
656 }
657
658 DPRINTF(("waiting %d/%" PRIu64 "\n",
659 sc->sc_nextpacket, sc->sc_devgen));
660 KASSERT(m->m_flags & M_EXT);
661
662 shmif_lockbus(busmem);
663 KASSERT(busmem->shm_magic == SHMIF_MAGIC);
664 KASSERT(busmem->shm_gen >= sc->sc_devgen);
665
666 /* need more data? */
667 if (sc->sc_devgen == busmem->shm_gen &&
668 shmif_nextpktoff(busmem, busmem->shm_last)
669 == sc->sc_nextpacket) {
670 shmif_unlockbus(busmem);
671 error = 0;
672 rumpcomp_shmif_watchwait(sc->sc_kq, &error);
673 if (__predict_false(error))
674 printf("shmif_rcv: wait failed %d\n", error);
675 membar_consumer();
676 continue;
677 }
678
679 if (stillvalid_p(sc)) {
680 nextpkt = sc->sc_nextpacket;
681 } else {
682 KASSERT(busmem->shm_gen > 0);
683 nextpkt = busmem->shm_first;
684 if (busmem->shm_first > busmem->shm_last)
685 sc->sc_devgen = busmem->shm_gen - 1;
686 else
687 sc->sc_devgen = busmem->shm_gen;
688 DPRINTF(("dev %p overrun, new data: %d/%" PRIu64 "\n",
689 sc, nextpkt, sc->sc_devgen));
690 }
691
692 /*
693 * If our read pointer is ahead the bus last write, our
694 * generation must be one behind.
695 */
696 KASSERT(!(nextpkt > busmem->shm_last
697 && sc->sc_devgen == busmem->shm_gen));
698
699 wrap = false;
700 nextpkt = shmif_busread(busmem, &sp,
701 nextpkt, sizeof(sp), &wrap);
702 KASSERT(sp.sp_len <= ETHERMTU + ETHER_HDR_LEN);
703 nextpkt = shmif_busread(busmem, mtod(m, void *),
704 nextpkt, sp.sp_len, &wrap);
705
706 DPRINTF(("shmif_rcv: read packet of length %d at %d\n",
707 sp.sp_len, nextpkt));
708
709 sc->sc_nextpacket = nextpkt;
710 shmif_unlockbus(sc->sc_busmem);
711
712 if (wrap) {
713 sc->sc_devgen++;
714 DPRINTF(("dev %p generation now %" PRIu64 "\n",
715 sc, sc->sc_devgen));
716 }
717
718 m->m_len = m->m_pkthdr.len = sp.sp_len;
719 m->m_pkthdr.rcvif = ifp;
720
721 /*
722 * Test if we want to pass the packet upwards
723 */
724 eth = mtod(m, struct ether_header *);
725 if (memcmp(eth->ether_dhost, CLLADDR(ifp->if_sadl),
726 ETHER_ADDR_LEN) == 0) {
727 passup = true;
728 } else if (ETHER_IS_MULTICAST(eth->ether_dhost)) {
729 passup = true;
730 } else if (ifp->if_flags & IFF_PROMISC) {
731 m->m_flags |= M_PROMISC;
732 passup = true;
733 } else {
734 passup = false;
735 }
736
737 if (passup) {
738 KERNEL_LOCK(1, NULL);
739 bpf_mtap(ifp, m);
740 ifp->if_input(ifp, m);
741 KERNEL_UNLOCK_ONE(NULL);
742 m = NULL;
743 }
744 /* else: reuse mbuf for a future packet */
745 }
746 m_freem(m);
747 m = NULL;
748
749 if (!sc->sc_dying)
750 goto reup;
751
752 kthread_exit(0);
753 }
754