if_shmem.c revision 1.73 1 1.73 msaitoh /* $NetBSD: if_shmem.c,v 1.73 2017/10/23 09:31:18 msaitoh Exp $ */
2 1.1 pooka
3 1.1 pooka /*
4 1.39 pooka * Copyright (c) 2009, 2010 Antti Kantee. All Rights Reserved.
5 1.1 pooka *
6 1.1 pooka * Development of this software was supported by The Nokia Foundation.
7 1.1 pooka *
8 1.1 pooka * Redistribution and use in source and binary forms, with or without
9 1.1 pooka * modification, are permitted provided that the following conditions
10 1.1 pooka * are met:
11 1.1 pooka * 1. Redistributions of source code must retain the above copyright
12 1.1 pooka * notice, this list of conditions and the following disclaimer.
13 1.1 pooka * 2. Redistributions in binary form must reproduce the above copyright
14 1.1 pooka * notice, this list of conditions and the following disclaimer in the
15 1.1 pooka * documentation and/or other materials provided with the distribution.
16 1.1 pooka *
17 1.1 pooka * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
18 1.1 pooka * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 1.1 pooka * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 1.1 pooka * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 1.1 pooka * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 1.1 pooka * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23 1.1 pooka * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 1.1 pooka * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 1.1 pooka * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 1.1 pooka * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 1.1 pooka * SUCH DAMAGE.
28 1.1 pooka */
29 1.1 pooka
30 1.1 pooka #include <sys/cdefs.h>
31 1.73 msaitoh __KERNEL_RCSID(0, "$NetBSD: if_shmem.c,v 1.73 2017/10/23 09:31:18 msaitoh Exp $");
32 1.1 pooka
33 1.1 pooka #include <sys/param.h>
34 1.13 pooka #include <sys/atomic.h>
35 1.1 pooka #include <sys/fcntl.h>
36 1.1 pooka #include <sys/kmem.h>
37 1.1 pooka #include <sys/kthread.h>
38 1.1 pooka #include <sys/lock.h>
39 1.31 pooka #include <sys/vmem.h>
40 1.44 tls #include <sys/cprng.h>
41 1.1 pooka
42 1.35 pooka #include <net/bpf.h>
43 1.1 pooka #include <net/if.h>
44 1.37 pooka #include <net/if_dl.h>
45 1.1 pooka #include <net/if_ether.h>
46 1.1 pooka
47 1.1 pooka #include <netinet/in.h>
48 1.1 pooka #include <netinet/in_var.h>
49 1.1 pooka
50 1.64 pooka #include <rump-sys/kern.h>
51 1.64 pooka #include <rump-sys/net.h>
52 1.64 pooka
53 1.1 pooka #include <rump/rump.h>
54 1.1 pooka #include <rump/rumpuser.h>
55 1.1 pooka
56 1.59 pooka #include "shmif_user.h"
57 1.1 pooka
58 1.29 pooka static int shmif_clone(struct if_clone *, int);
59 1.29 pooka static int shmif_unclone(struct ifnet *);
60 1.29 pooka
61 1.29 pooka struct if_clone shmif_cloner =
62 1.29 pooka IF_CLONE_INITIALIZER("shmif", shmif_clone, shmif_unclone);
63 1.29 pooka
64 1.1 pooka /*
65 1.28 pooka * Do r/w prefault for backend pages when attaching the interface.
66 1.29 pooka * At least logically thinking improves performance (although no
67 1.29 pooka * mlocking is done, so they might go away).
68 1.28 pooka */
69 1.28 pooka #define PREFAULT_RW
70 1.28 pooka
71 1.28 pooka /*
72 1.1 pooka * A virtual ethernet interface which uses shared memory from a
73 1.1 pooka * memory mapped file as the bus.
74 1.1 pooka */
75 1.1 pooka
76 1.1 pooka static int shmif_init(struct ifnet *);
77 1.1 pooka static int shmif_ioctl(struct ifnet *, u_long, void *);
78 1.1 pooka static void shmif_start(struct ifnet *);
79 1.1 pooka static void shmif_stop(struct ifnet *, int);
80 1.1 pooka
81 1.16 pooka #include "shmifvar.h"
82 1.16 pooka
83 1.1 pooka struct shmif_sc {
84 1.1 pooka struct ethercom sc_ec;
85 1.16 pooka struct shmif_mem *sc_busmem;
86 1.1 pooka int sc_memfd;
87 1.1 pooka int sc_kq;
88 1.32 pooka int sc_unit;
89 1.1 pooka
90 1.29 pooka char *sc_backfile;
91 1.29 pooka size_t sc_backfilelen;
92 1.29 pooka
93 1.26 pooka uint64_t sc_devgen;
94 1.1 pooka uint32_t sc_nextpacket;
95 1.32 pooka
96 1.32 pooka kmutex_t sc_mtx;
97 1.32 pooka kcondvar_t sc_cv;
98 1.32 pooka
99 1.32 pooka struct lwp *sc_rcvl;
100 1.32 pooka bool sc_dying;
101 1.63 ozaki
102 1.63 ozaki uint64_t sc_uuid;
103 1.1 pooka };
104 1.1 pooka
105 1.1 pooka static void shmif_rcv(void *);
106 1.1 pooka
107 1.23 pooka #define LOCK_UNLOCKED 0
108 1.23 pooka #define LOCK_LOCKED 1
109 1.23 pooka #define LOCK_COOLDOWN 1001
110 1.23 pooka
111 1.31 pooka vmem_t *shmif_units;
112 1.31 pooka
113 1.52 pooka static void
114 1.52 pooka dowakeup(struct shmif_sc *sc)
115 1.52 pooka {
116 1.52 pooka struct rumpuser_iovec iov;
117 1.52 pooka uint32_t ver = SHMIF_VERSION;
118 1.53 pooka size_t n;
119 1.52 pooka
120 1.52 pooka iov.iov_base = &ver;
121 1.52 pooka iov.iov_len = sizeof(ver);
122 1.53 pooka rumpuser_iovwrite(sc->sc_memfd, &iov, 1, IFMEM_WAKEUP, &n);
123 1.52 pooka }
124 1.52 pooka
125 1.23 pooka /*
126 1.23 pooka * This locking needs work and will misbehave severely if:
127 1.23 pooka * 1) the backing memory has to be paged in
128 1.23 pooka * 2) some lockholder exits while holding the lock
129 1.23 pooka */
130 1.23 pooka static void
131 1.23 pooka shmif_lockbus(struct shmif_mem *busmem)
132 1.23 pooka {
133 1.23 pooka int i = 0;
134 1.23 pooka
135 1.23 pooka while (__predict_false(atomic_cas_32(&busmem->shm_lock,
136 1.23 pooka LOCK_UNLOCKED, LOCK_LOCKED) == LOCK_LOCKED)) {
137 1.23 pooka if (__predict_false(++i > LOCK_COOLDOWN)) {
138 1.50 pooka /* wait 1ms */
139 1.54 pooka rumpuser_clock_sleep(RUMPUSER_CLOCK_RELWALL,
140 1.54 pooka 0, 1000*1000);
141 1.23 pooka i = 0;
142 1.23 pooka }
143 1.23 pooka continue;
144 1.23 pooka }
145 1.23 pooka membar_enter();
146 1.23 pooka }
147 1.23 pooka
148 1.23 pooka static void
149 1.23 pooka shmif_unlockbus(struct shmif_mem *busmem)
150 1.23 pooka {
151 1.61 justin unsigned int old __diagused;
152 1.23 pooka
153 1.23 pooka membar_exit();
154 1.23 pooka old = atomic_swap_32(&busmem->shm_lock, LOCK_UNLOCKED);
155 1.23 pooka KASSERT(old == LOCK_LOCKED);
156 1.23 pooka }
157 1.23 pooka
158 1.29 pooka static int
159 1.29 pooka allocif(int unit, struct shmif_sc **scp)
160 1.1 pooka {
161 1.29 pooka uint8_t enaddr[ETHER_ADDR_LEN] = { 0xb2, 0xa0, 0x00, 0x00, 0x00, 0x00 };
162 1.1 pooka struct shmif_sc *sc;
163 1.1 pooka struct ifnet *ifp;
164 1.1 pooka uint32_t randnum;
165 1.32 pooka int error;
166 1.1 pooka
167 1.44 tls randnum = cprng_fast32();
168 1.15 pooka memcpy(&enaddr[2], &randnum, sizeof(randnum));
169 1.1 pooka
170 1.1 pooka sc = kmem_zalloc(sizeof(*sc), KM_SLEEP);
171 1.29 pooka sc->sc_memfd = -1;
172 1.32 pooka sc->sc_unit = unit;
173 1.63 ozaki sc->sc_uuid = cprng_fast64();
174 1.29 pooka
175 1.1 pooka ifp = &sc->sc_ec.ec_if;
176 1.1 pooka
177 1.60 christos snprintf(ifp->if_xname, sizeof(ifp->if_xname), "shmif%d", unit);
178 1.29 pooka ifp->if_softc = sc;
179 1.63 ozaki ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
180 1.29 pooka ifp->if_init = shmif_init;
181 1.29 pooka ifp->if_ioctl = shmif_ioctl;
182 1.29 pooka ifp->if_start = shmif_start;
183 1.29 pooka ifp->if_stop = shmif_stop;
184 1.29 pooka ifp->if_mtu = ETHERMTU;
185 1.37 pooka ifp->if_dlt = DLT_EN10MB;
186 1.29 pooka
187 1.32 pooka mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_NONE);
188 1.32 pooka cv_init(&sc->sc_cv, "shmifcv");
189 1.32 pooka
190 1.73 msaitoh error = if_initialize(ifp);
191 1.73 msaitoh if (error != 0) {
192 1.73 msaitoh aprint_error("shmif%d: if_initialize failed(%d)\n", unit, rv);
193 1.73 msaitoh cv_destroy(&sc->sc_cv);
194 1.73 msaitoh mutex_destroy(&sc->sc_mtx);
195 1.73 msaitoh kmem_free(sc, sizeof(*sc));
196 1.73 msaitoh
197 1.73 msaitoh return error;
198 1.73 msaitoh }
199 1.29 pooka ether_ifattach(ifp, enaddr);
200 1.65 ozaki if_register(ifp);
201 1.29 pooka
202 1.29 pooka aprint_verbose("shmif%d: Ethernet address %s\n",
203 1.32 pooka unit, ether_sprintf(enaddr));
204 1.29 pooka
205 1.29 pooka if (scp)
206 1.29 pooka *scp = sc;
207 1.29 pooka
208 1.32 pooka error = 0;
209 1.32 pooka if (rump_threads) {
210 1.32 pooka error = kthread_create(PRI_NONE,
211 1.40 rmind KTHREAD_MPSAFE | KTHREAD_MUSTJOIN, NULL,
212 1.32 pooka shmif_rcv, ifp, &sc->sc_rcvl, "shmif");
213 1.32 pooka } else {
214 1.32 pooka printf("WARNING: threads not enabled, shmif NOT working\n");
215 1.32 pooka }
216 1.32 pooka
217 1.32 pooka if (error) {
218 1.32 pooka shmif_unclone(ifp);
219 1.32 pooka }
220 1.32 pooka
221 1.32 pooka return error;
222 1.29 pooka }
223 1.29 pooka
224 1.29 pooka static int
225 1.29 pooka initbackend(struct shmif_sc *sc, int memfd)
226 1.29 pooka {
227 1.29 pooka volatile uint8_t v;
228 1.29 pooka volatile uint8_t *p;
229 1.53 pooka void *mem;
230 1.29 pooka int error;
231 1.29 pooka
232 1.53 pooka error = rumpcomp_shmif_mmap(memfd, BUSMEM_SIZE, &mem);
233 1.1 pooka if (error)
234 1.29 pooka return error;
235 1.53 pooka sc->sc_busmem = mem;
236 1.17 pooka
237 1.29 pooka if (sc->sc_busmem->shm_magic
238 1.29 pooka && sc->sc_busmem->shm_magic != SHMIF_MAGIC) {
239 1.29 pooka printf("bus is not magical");
240 1.29 pooka rumpuser_unmap(sc->sc_busmem, BUSMEM_SIZE);
241 1.69 msaitoh return ENOEXEC;
242 1.29 pooka }
243 1.28 pooka
244 1.36 pooka /*
245 1.36 pooka * Prefault in pages to minimize runtime penalty with buslock.
246 1.36 pooka * Use 512 instead of PAGE_SIZE to make sure we catch cases where
247 1.36 pooka * rump kernel PAGE_SIZE > host page size.
248 1.36 pooka */
249 1.28 pooka for (p = (uint8_t *)sc->sc_busmem;
250 1.28 pooka p < (uint8_t *)sc->sc_busmem + BUSMEM_SIZE;
251 1.36 pooka p += 512)
252 1.28 pooka v = *p;
253 1.28 pooka
254 1.19 pooka shmif_lockbus(sc->sc_busmem);
255 1.19 pooka /* we're first? initialize bus */
256 1.19 pooka if (sc->sc_busmem->shm_magic == 0) {
257 1.19 pooka sc->sc_busmem->shm_magic = SHMIF_MAGIC;
258 1.19 pooka sc->sc_busmem->shm_first = BUSMEM_DATASIZE;
259 1.19 pooka }
260 1.19 pooka
261 1.16 pooka sc->sc_nextpacket = sc->sc_busmem->shm_last;
262 1.26 pooka sc->sc_devgen = sc->sc_busmem->shm_gen;
263 1.28 pooka
264 1.28 pooka #ifdef PREFAULT_RW
265 1.28 pooka for (p = (uint8_t *)sc->sc_busmem;
266 1.28 pooka p < (uint8_t *)sc->sc_busmem + BUSMEM_SIZE;
267 1.28 pooka p += PAGE_SIZE) {
268 1.28 pooka v = *p;
269 1.28 pooka *p = v;
270 1.28 pooka }
271 1.28 pooka #endif
272 1.19 pooka shmif_unlockbus(sc->sc_busmem);
273 1.1 pooka
274 1.53 pooka sc->sc_kq = -1;
275 1.53 pooka error = rumpcomp_shmif_watchsetup(&sc->sc_kq, memfd);
276 1.53 pooka if (error) {
277 1.32 pooka rumpuser_unmap(sc->sc_busmem, BUSMEM_SIZE);
278 1.29 pooka return error;
279 1.32 pooka }
280 1.1 pooka
281 1.29 pooka sc->sc_memfd = memfd;
282 1.32 pooka
283 1.32 pooka return error;
284 1.29 pooka }
285 1.29 pooka
286 1.29 pooka static void
287 1.29 pooka finibackend(struct shmif_sc *sc)
288 1.29 pooka {
289 1.29 pooka
290 1.32 pooka if (sc->sc_backfile == NULL)
291 1.32 pooka return;
292 1.32 pooka
293 1.32 pooka if (sc->sc_backfile) {
294 1.32 pooka kmem_free(sc->sc_backfile, sc->sc_backfilelen);
295 1.32 pooka sc->sc_backfile = NULL;
296 1.32 pooka sc->sc_backfilelen = 0;
297 1.32 pooka }
298 1.29 pooka
299 1.29 pooka rumpuser_unmap(sc->sc_busmem, BUSMEM_SIZE);
300 1.53 pooka rumpuser_close(sc->sc_memfd);
301 1.53 pooka rumpuser_close(sc->sc_kq);
302 1.32 pooka
303 1.32 pooka sc->sc_memfd = -1;
304 1.29 pooka }
305 1.29 pooka
306 1.29 pooka int
307 1.29 pooka rump_shmif_create(const char *path, int *ifnum)
308 1.29 pooka {
309 1.29 pooka struct shmif_sc *sc;
310 1.43 dyoung vmem_addr_t t;
311 1.33 pooka int unit, error;
312 1.33 pooka int memfd = -1; /* XXXgcc */
313 1.29 pooka
314 1.33 pooka if (path) {
315 1.53 pooka error = rumpuser_open(path,
316 1.53 pooka RUMPUSER_OPEN_RDWR | RUMPUSER_OPEN_CREATE, &memfd);
317 1.53 pooka if (error)
318 1.33 pooka return error;
319 1.33 pooka }
320 1.1 pooka
321 1.43 dyoung error = vmem_xalloc(shmif_units, 1, 0, 0, 0,
322 1.43 dyoung VMEM_ADDR_MIN, VMEM_ADDR_MAX, VM_INSTANTFIT | VM_SLEEP, &t);
323 1.43 dyoung
324 1.43 dyoung if (error != 0) {
325 1.43 dyoung if (path)
326 1.53 pooka rumpuser_close(memfd);
327 1.43 dyoung return error;
328 1.43 dyoung }
329 1.43 dyoung
330 1.43 dyoung unit = t - 1;
331 1.31 pooka
332 1.32 pooka if ((error = allocif(unit, &sc)) != 0) {
333 1.33 pooka if (path)
334 1.53 pooka rumpuser_close(memfd);
335 1.29 pooka return error;
336 1.29 pooka }
337 1.33 pooka
338 1.33 pooka if (!path)
339 1.33 pooka goto out;
340 1.33 pooka
341 1.29 pooka error = initbackend(sc, memfd);
342 1.29 pooka if (error) {
343 1.32 pooka shmif_unclone(&sc->sc_ec.ec_if);
344 1.29 pooka return error;
345 1.29 pooka }
346 1.1 pooka
347 1.29 pooka sc->sc_backfilelen = strlen(path)+1;
348 1.29 pooka sc->sc_backfile = kmem_alloc(sc->sc_backfilelen, KM_SLEEP);
349 1.29 pooka strcpy(sc->sc_backfile, path);
350 1.12 pooka
351 1.33 pooka out:
352 1.2 pooka if (ifnum)
353 1.32 pooka *ifnum = unit;
354 1.29 pooka
355 1.1 pooka return 0;
356 1.29 pooka }
357 1.1 pooka
358 1.29 pooka static int
359 1.29 pooka shmif_clone(struct if_clone *ifc, int unit)
360 1.29 pooka {
361 1.61 justin int rc __diagused;
362 1.43 dyoung vmem_addr_t unit2;
363 1.29 pooka
364 1.31 pooka /*
365 1.31 pooka * Ok, we know the unit number, but we must still reserve it.
366 1.31 pooka * Otherwise the wildcard-side of things might get the same one.
367 1.31 pooka * This is slightly offset-happy due to vmem. First, we offset
368 1.31 pooka * the range of unit numbers by +1 since vmem cannot deal with
369 1.41 dyoung * ranges starting from 0. Talk about uuuh.
370 1.31 pooka */
371 1.43 dyoung rc = vmem_xalloc(shmif_units, 1, 0, 0, 0, unit+1, unit+1,
372 1.43 dyoung VM_SLEEP | VM_INSTANTFIT, &unit2);
373 1.43 dyoung KASSERT(rc == 0 && unit2-1 == unit);
374 1.29 pooka
375 1.29 pooka return allocif(unit, NULL);
376 1.29 pooka }
377 1.29 pooka
378 1.29 pooka static int
379 1.29 pooka shmif_unclone(struct ifnet *ifp)
380 1.29 pooka {
381 1.32 pooka struct shmif_sc *sc = ifp->if_softc;
382 1.32 pooka
383 1.32 pooka shmif_stop(ifp, 1);
384 1.32 pooka if_down(ifp);
385 1.32 pooka
386 1.32 pooka mutex_enter(&sc->sc_mtx);
387 1.32 pooka sc->sc_dying = true;
388 1.32 pooka cv_broadcast(&sc->sc_cv);
389 1.32 pooka mutex_exit(&sc->sc_mtx);
390 1.29 pooka
391 1.32 pooka if (sc->sc_rcvl)
392 1.32 pooka kthread_join(sc->sc_rcvl);
393 1.32 pooka sc->sc_rcvl = NULL;
394 1.32 pooka
395 1.72 ozaki /*
396 1.72 ozaki * Need to be called after the kthread left, otherwise closing kqueue
397 1.72 ozaki * (sc_kq) hangs sometimes perhaps because of a race condition between
398 1.72 ozaki * close and kevent in the kthread on the kqueue.
399 1.72 ozaki */
400 1.72 ozaki finibackend(sc);
401 1.72 ozaki
402 1.32 pooka vmem_xfree(shmif_units, sc->sc_unit+1, 1);
403 1.32 pooka
404 1.32 pooka ether_ifdetach(ifp);
405 1.32 pooka if_detach(ifp);
406 1.32 pooka
407 1.32 pooka cv_destroy(&sc->sc_cv);
408 1.32 pooka mutex_destroy(&sc->sc_mtx);
409 1.32 pooka
410 1.32 pooka kmem_free(sc, sizeof(*sc));
411 1.32 pooka
412 1.32 pooka return 0;
413 1.1 pooka }
414 1.1 pooka
415 1.1 pooka static int
416 1.1 pooka shmif_init(struct ifnet *ifp)
417 1.1 pooka {
418 1.29 pooka struct shmif_sc *sc = ifp->if_softc;
419 1.4 pooka int error = 0;
420 1.4 pooka
421 1.29 pooka if (sc->sc_memfd == -1)
422 1.29 pooka return ENXIO;
423 1.32 pooka KASSERT(sc->sc_busmem);
424 1.29 pooka
425 1.32 pooka ifp->if_flags |= IFF_RUNNING;
426 1.32 pooka
427 1.32 pooka mutex_enter(&sc->sc_mtx);
428 1.32 pooka sc->sc_nextpacket = sc->sc_busmem->shm_last;
429 1.32 pooka sc->sc_devgen = sc->sc_busmem->shm_gen;
430 1.32 pooka
431 1.32 pooka cv_broadcast(&sc->sc_cv);
432 1.32 pooka mutex_exit(&sc->sc_mtx);
433 1.1 pooka
434 1.4 pooka return error;
435 1.1 pooka }
436 1.1 pooka
437 1.1 pooka static int
438 1.1 pooka shmif_ioctl(struct ifnet *ifp, u_long cmd, void *data)
439 1.1 pooka {
440 1.29 pooka struct shmif_sc *sc = ifp->if_softc;
441 1.29 pooka struct ifdrv *ifd;
442 1.29 pooka char *path;
443 1.32 pooka int s, rv, memfd;
444 1.1 pooka
445 1.1 pooka s = splnet();
446 1.29 pooka switch (cmd) {
447 1.29 pooka case SIOCGLINKSTR:
448 1.29 pooka ifd = data;
449 1.29 pooka
450 1.29 pooka if (sc->sc_backfilelen == 0) {
451 1.29 pooka rv = ENOENT;
452 1.29 pooka break;
453 1.29 pooka }
454 1.29 pooka
455 1.29 pooka ifd->ifd_len = sc->sc_backfilelen;
456 1.29 pooka if (ifd->ifd_cmd == IFLINKSTR_QUERYLEN) {
457 1.29 pooka rv = 0;
458 1.29 pooka break;
459 1.29 pooka }
460 1.29 pooka
461 1.29 pooka if (ifd->ifd_cmd != 0) {
462 1.29 pooka rv = EINVAL;
463 1.29 pooka break;
464 1.29 pooka }
465 1.29 pooka
466 1.29 pooka rv = copyoutstr(sc->sc_backfile, ifd->ifd_data,
467 1.29 pooka MIN(sc->sc_backfilelen, ifd->ifd_len), NULL);
468 1.29 pooka break;
469 1.29 pooka case SIOCSLINKSTR:
470 1.29 pooka if (ifp->if_flags & IFF_UP) {
471 1.29 pooka rv = EBUSY;
472 1.29 pooka break;
473 1.29 pooka }
474 1.29 pooka
475 1.29 pooka ifd = data;
476 1.29 pooka if (ifd->ifd_cmd == IFLINKSTR_UNSET) {
477 1.29 pooka finibackend(sc);
478 1.29 pooka rv = 0;
479 1.29 pooka break;
480 1.29 pooka } else if (ifd->ifd_cmd != 0) {
481 1.29 pooka rv = EINVAL;
482 1.29 pooka break;
483 1.29 pooka } else if (sc->sc_backfile) {
484 1.29 pooka rv = EBUSY;
485 1.29 pooka break;
486 1.29 pooka }
487 1.29 pooka
488 1.29 pooka if (ifd->ifd_len > MAXPATHLEN) {
489 1.29 pooka rv = E2BIG;
490 1.29 pooka break;
491 1.29 pooka } else if (ifd->ifd_len < 1) {
492 1.29 pooka rv = EINVAL;
493 1.29 pooka break;
494 1.29 pooka }
495 1.29 pooka
496 1.29 pooka path = kmem_alloc(ifd->ifd_len, KM_SLEEP);
497 1.29 pooka rv = copyinstr(ifd->ifd_data, path, ifd->ifd_len, NULL);
498 1.29 pooka if (rv) {
499 1.29 pooka kmem_free(path, ifd->ifd_len);
500 1.29 pooka break;
501 1.29 pooka }
502 1.53 pooka rv = rumpuser_open(path,
503 1.53 pooka RUMPUSER_OPEN_RDWR | RUMPUSER_OPEN_CREATE, &memfd);
504 1.53 pooka if (rv) {
505 1.29 pooka kmem_free(path, ifd->ifd_len);
506 1.29 pooka break;
507 1.29 pooka }
508 1.29 pooka rv = initbackend(sc, memfd);
509 1.29 pooka if (rv) {
510 1.29 pooka kmem_free(path, ifd->ifd_len);
511 1.53 pooka rumpuser_close(memfd);
512 1.29 pooka break;
513 1.29 pooka }
514 1.29 pooka sc->sc_backfile = path;
515 1.29 pooka sc->sc_backfilelen = ifd->ifd_len;
516 1.29 pooka
517 1.29 pooka break;
518 1.29 pooka default:
519 1.29 pooka rv = ether_ioctl(ifp, cmd, data);
520 1.29 pooka if (rv == ENETRESET)
521 1.29 pooka rv = 0;
522 1.29 pooka break;
523 1.29 pooka }
524 1.1 pooka splx(s);
525 1.1 pooka
526 1.1 pooka return rv;
527 1.1 pooka }
528 1.1 pooka
529 1.32 pooka /* send everything in-context since it's just a matter of mem-to-mem copy */
530 1.1 pooka static void
531 1.1 pooka shmif_start(struct ifnet *ifp)
532 1.1 pooka {
533 1.1 pooka struct shmif_sc *sc = ifp->if_softc;
534 1.26 pooka struct shmif_mem *busmem = sc->sc_busmem;
535 1.1 pooka struct mbuf *m, *m0;
536 1.26 pooka uint32_t dataoff;
537 1.26 pooka uint32_t pktsize, pktwrote;
538 1.1 pooka bool wrote = false;
539 1.24 pooka bool wrap;
540 1.1 pooka
541 1.26 pooka ifp->if_flags |= IFF_OACTIVE;
542 1.26 pooka
543 1.1 pooka for (;;) {
544 1.20 pooka struct shmif_pkthdr sp;
545 1.20 pooka struct timeval tv;
546 1.20 pooka
547 1.1 pooka IF_DEQUEUE(&ifp->if_snd, m0);
548 1.1 pooka if (m0 == NULL) {
549 1.1 pooka break;
550 1.1 pooka }
551 1.1 pooka
552 1.25 pooka pktsize = 0;
553 1.19 pooka for (m = m0; m != NULL; m = m->m_next) {
554 1.19 pooka pktsize += m->m_len;
555 1.19 pooka }
556 1.26 pooka KASSERT(pktsize <= ETHERMTU + ETHER_HDR_LEN);
557 1.19 pooka
558 1.20 pooka getmicrouptime(&tv);
559 1.20 pooka sp.sp_len = pktsize;
560 1.20 pooka sp.sp_sec = tv.tv_sec;
561 1.20 pooka sp.sp_usec = tv.tv_usec;
562 1.63 ozaki sp.sp_sender = sc->sc_uuid;
563 1.20 pooka
564 1.35 pooka bpf_mtap(ifp, m0);
565 1.35 pooka
566 1.26 pooka shmif_lockbus(busmem);
567 1.26 pooka KASSERT(busmem->shm_magic == SHMIF_MAGIC);
568 1.26 pooka busmem->shm_last = shmif_nextpktoff(busmem, busmem->shm_last);
569 1.21 pooka
570 1.24 pooka wrap = false;
571 1.26 pooka dataoff = shmif_buswrite(busmem,
572 1.26 pooka busmem->shm_last, &sp, sizeof(sp), &wrap);
573 1.26 pooka pktwrote = 0;
574 1.1 pooka for (m = m0; m != NULL; m = m->m_next) {
575 1.26 pooka pktwrote += m->m_len;
576 1.26 pooka dataoff = shmif_buswrite(busmem, dataoff,
577 1.19 pooka mtod(m, void *), m->m_len, &wrap);
578 1.1 pooka }
579 1.26 pooka KASSERT(pktwrote == pktsize);
580 1.27 pooka if (wrap) {
581 1.26 pooka busmem->shm_gen++;
582 1.47 pooka DPRINTF(("bus generation now %" PRIu64 "\n",
583 1.47 pooka busmem->shm_gen));
584 1.27 pooka }
585 1.26 pooka shmif_unlockbus(busmem);
586 1.1 pooka
587 1.1 pooka m_freem(m0);
588 1.1 pooka wrote = true;
589 1.62 ozaki ifp->if_opackets++;
590 1.1 pooka
591 1.1 pooka DPRINTF(("shmif_start: send %d bytes at off %d\n",
592 1.27 pooka pktsize, busmem->shm_last));
593 1.1 pooka }
594 1.26 pooka
595 1.26 pooka ifp->if_flags &= ~IFF_OACTIVE;
596 1.26 pooka
597 1.32 pooka /* wakeup? */
598 1.52 pooka if (wrote) {
599 1.52 pooka dowakeup(sc);
600 1.52 pooka }
601 1.1 pooka }
602 1.1 pooka
603 1.1 pooka static void
604 1.1 pooka shmif_stop(struct ifnet *ifp, int disable)
605 1.1 pooka {
606 1.32 pooka struct shmif_sc *sc = ifp->if_softc;
607 1.1 pooka
608 1.32 pooka ifp->if_flags &= ~IFF_RUNNING;
609 1.32 pooka membar_producer();
610 1.32 pooka
611 1.32 pooka /*
612 1.32 pooka * wakeup thread. this will of course wake up all bus
613 1.32 pooka * listeners, but that's life.
614 1.32 pooka */
615 1.52 pooka if (sc->sc_memfd != -1) {
616 1.52 pooka dowakeup(sc);
617 1.52 pooka }
618 1.1 pooka }
619 1.1 pooka
620 1.27 pooka
621 1.27 pooka /*
622 1.27 pooka * Check if we have been sleeping too long. Basically,
623 1.27 pooka * our in-sc nextpkt must by first <= nextpkt <= last"+1".
624 1.27 pooka * We use the fact that first is guaranteed to never overlap
625 1.27 pooka * with the last frame in the ring.
626 1.27 pooka */
627 1.27 pooka static __inline bool
628 1.27 pooka stillvalid_p(struct shmif_sc *sc)
629 1.27 pooka {
630 1.27 pooka struct shmif_mem *busmem = sc->sc_busmem;
631 1.27 pooka unsigned gendiff = busmem->shm_gen - sc->sc_devgen;
632 1.27 pooka uint32_t lastoff, devoff;
633 1.27 pooka
634 1.27 pooka KASSERT(busmem->shm_first != busmem->shm_last);
635 1.27 pooka
636 1.27 pooka /* normalize onto a 2x busmem chunk */
637 1.27 pooka devoff = sc->sc_nextpacket;
638 1.27 pooka lastoff = shmif_nextpktoff(busmem, busmem->shm_last);
639 1.27 pooka
640 1.27 pooka /* trivial case */
641 1.27 pooka if (gendiff > 1)
642 1.27 pooka return false;
643 1.27 pooka KASSERT(gendiff <= 1);
644 1.27 pooka
645 1.27 pooka /* Normalize onto 2x busmem chunk */
646 1.27 pooka if (busmem->shm_first >= lastoff) {
647 1.27 pooka lastoff += BUSMEM_DATASIZE;
648 1.27 pooka if (gendiff == 0)
649 1.27 pooka devoff += BUSMEM_DATASIZE;
650 1.27 pooka } else {
651 1.27 pooka if (gendiff)
652 1.27 pooka return false;
653 1.27 pooka }
654 1.27 pooka
655 1.27 pooka return devoff >= busmem->shm_first && devoff <= lastoff;
656 1.27 pooka }
657 1.27 pooka
658 1.1 pooka static void
659 1.1 pooka shmif_rcv(void *arg)
660 1.1 pooka {
661 1.1 pooka struct ifnet *ifp = arg;
662 1.1 pooka struct shmif_sc *sc = ifp->if_softc;
663 1.32 pooka struct shmif_mem *busmem;
664 1.1 pooka struct mbuf *m = NULL;
665 1.1 pooka struct ether_header *eth;
666 1.27 pooka uint32_t nextpkt;
667 1.37 pooka bool wrap, passup;
668 1.1 pooka int error;
669 1.57 pooka const int align
670 1.57 pooka = ALIGN(sizeof(struct ether_header)) - sizeof(struct ether_header);
671 1.1 pooka
672 1.32 pooka reup:
673 1.32 pooka mutex_enter(&sc->sc_mtx);
674 1.32 pooka while ((ifp->if_flags & IFF_RUNNING) == 0 && !sc->sc_dying)
675 1.32 pooka cv_wait(&sc->sc_cv, &sc->sc_mtx);
676 1.32 pooka mutex_exit(&sc->sc_mtx);
677 1.32 pooka
678 1.32 pooka busmem = sc->sc_busmem;
679 1.32 pooka
680 1.32 pooka while (ifp->if_flags & IFF_RUNNING) {
681 1.20 pooka struct shmif_pkthdr sp;
682 1.20 pooka
683 1.1 pooka if (m == NULL) {
684 1.1 pooka m = m_gethdr(M_WAIT, MT_DATA);
685 1.1 pooka MCLGET(m, M_WAIT);
686 1.57 pooka m->m_data += align;
687 1.1 pooka }
688 1.1 pooka
689 1.47 pooka DPRINTF(("waiting %d/%" PRIu64 "\n",
690 1.47 pooka sc->sc_nextpacket, sc->sc_devgen));
691 1.26 pooka KASSERT(m->m_flags & M_EXT);
692 1.1 pooka
693 1.26 pooka shmif_lockbus(busmem);
694 1.26 pooka KASSERT(busmem->shm_magic == SHMIF_MAGIC);
695 1.27 pooka KASSERT(busmem->shm_gen >= sc->sc_devgen);
696 1.1 pooka
697 1.1 pooka /* need more data? */
698 1.27 pooka if (sc->sc_devgen == busmem->shm_gen &&
699 1.26 pooka shmif_nextpktoff(busmem, busmem->shm_last)
700 1.26 pooka == sc->sc_nextpacket) {
701 1.26 pooka shmif_unlockbus(busmem);
702 1.71 ozaki error = rumpcomp_shmif_watchwait(sc->sc_kq);
703 1.1 pooka if (__predict_false(error))
704 1.1 pooka printf("shmif_rcv: wait failed %d\n", error);
705 1.32 pooka membar_consumer();
706 1.1 pooka continue;
707 1.1 pooka }
708 1.1 pooka
709 1.27 pooka if (stillvalid_p(sc)) {
710 1.27 pooka nextpkt = sc->sc_nextpacket;
711 1.27 pooka } else {
712 1.27 pooka KASSERT(busmem->shm_gen > 0);
713 1.26 pooka nextpkt = busmem->shm_first;
714 1.26 pooka if (busmem->shm_first > busmem->shm_last)
715 1.27 pooka sc->sc_devgen = busmem->shm_gen - 1;
716 1.26 pooka else
717 1.27 pooka sc->sc_devgen = busmem->shm_gen;
718 1.47 pooka DPRINTF(("dev %p overrun, new data: %d/%" PRIu64 "\n",
719 1.27 pooka sc, nextpkt, sc->sc_devgen));
720 1.26 pooka }
721 1.26 pooka
722 1.26 pooka /*
723 1.26 pooka * If our read pointer is ahead the bus last write, our
724 1.26 pooka * generation must be one behind.
725 1.26 pooka */
726 1.26 pooka KASSERT(!(nextpkt > busmem->shm_last
727 1.27 pooka && sc->sc_devgen == busmem->shm_gen));
728 1.26 pooka
729 1.24 pooka wrap = false;
730 1.26 pooka nextpkt = shmif_busread(busmem, &sp,
731 1.26 pooka nextpkt, sizeof(sp), &wrap);
732 1.26 pooka KASSERT(sp.sp_len <= ETHERMTU + ETHER_HDR_LEN);
733 1.26 pooka nextpkt = shmif_busread(busmem, mtod(m, void *),
734 1.26 pooka nextpkt, sp.sp_len, &wrap);
735 1.1 pooka
736 1.1 pooka DPRINTF(("shmif_rcv: read packet of length %d at %d\n",
737 1.20 pooka sp.sp_len, nextpkt));
738 1.1 pooka
739 1.26 pooka sc->sc_nextpacket = nextpkt;
740 1.19 pooka shmif_unlockbus(sc->sc_busmem);
741 1.1 pooka
742 1.27 pooka if (wrap) {
743 1.26 pooka sc->sc_devgen++;
744 1.47 pooka DPRINTF(("dev %p generation now %" PRIu64 "\n",
745 1.27 pooka sc, sc->sc_devgen));
746 1.27 pooka }
747 1.26 pooka
748 1.56 pooka /*
749 1.56 pooka * Ignore packets too short to possibly be valid.
750 1.56 pooka * This is hit at least for the first frame on a new bus.
751 1.56 pooka */
752 1.55 pooka if (__predict_false(sp.sp_len < ETHER_HDR_LEN)) {
753 1.55 pooka DPRINTF(("shmif read packet len %d < ETHER_HDR_LEN\n",
754 1.55 pooka sp.sp_len));
755 1.55 pooka continue;
756 1.55 pooka }
757 1.55 pooka
758 1.20 pooka m->m_len = m->m_pkthdr.len = sp.sp_len;
759 1.67 ozaki m_set_rcvif(m, ifp);
760 1.1 pooka
761 1.37 pooka /*
762 1.37 pooka * Test if we want to pass the packet upwards
763 1.37 pooka */
764 1.1 pooka eth = mtod(m, struct ether_header *);
765 1.63 ozaki if (sp.sp_sender == sc->sc_uuid) {
766 1.63 ozaki passup = false;
767 1.63 ozaki } else if (memcmp(eth->ether_dhost, CLLADDR(ifp->if_sadl),
768 1.37 pooka ETHER_ADDR_LEN) == 0) {
769 1.37 pooka passup = true;
770 1.46 pooka } else if (ETHER_IS_MULTICAST(eth->ether_dhost)) {
771 1.37 pooka passup = true;
772 1.37 pooka } else if (ifp->if_flags & IFF_PROMISC) {
773 1.37 pooka m->m_flags |= M_PROMISC;
774 1.37 pooka passup = true;
775 1.38 pooka } else {
776 1.38 pooka passup = false;
777 1.37 pooka }
778 1.37 pooka
779 1.37 pooka if (passup) {
780 1.68 ozaki int bound;
781 1.22 pooka KERNEL_LOCK(1, NULL);
782 1.66 ozaki /* Prevent LWP migrations between CPUs for psref(9) */
783 1.68 ozaki bound = curlwp_bind();
784 1.65 ozaki if_input(ifp, m);
785 1.68 ozaki curlwp_bindx(bound);
786 1.22 pooka KERNEL_UNLOCK_ONE(NULL);
787 1.1 pooka m = NULL;
788 1.1 pooka }
789 1.37 pooka /* else: reuse mbuf for a future packet */
790 1.1 pooka }
791 1.32 pooka m_freem(m);
792 1.32 pooka m = NULL;
793 1.32 pooka
794 1.32 pooka if (!sc->sc_dying)
795 1.32 pooka goto reup;
796 1.1 pooka
797 1.32 pooka kthread_exit(0);
798 1.1 pooka }
799