vio9p.c revision 1.6 1 /* $NetBSD: vio9p.c,v 1.6 2022/04/13 13:50:37 uwe Exp $ */
2
3 /*
4 * Copyright (c) 2019 Internet Initiative Japan, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: vio9p.c,v 1.6 2022/04/13 13:50:37 uwe Exp $");
30
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/bus.h>
35 #include <sys/conf.h>
36 #include <sys/condvar.h>
37 #include <sys/device.h>
38 #include <sys/mutex.h>
39 #include <sys/sysctl.h>
40 #include <sys/module.h>
41 #include <sys/syslog.h>
42 #include <sys/select.h>
43 #include <sys/kmem.h>
44
45 #include <sys/file.h>
46 #include <sys/filedesc.h>
47 #include <sys/uio.h>
48
49 #include <dev/pci/virtioreg.h>
50 #include <dev/pci/virtiovar.h>
51
52 #include "ioconf.h"
53
54 //#define VIO9P_DEBUG 1
55 //#define VIO9P_DUMP 1
56 #ifdef VIO9P_DEBUG
57 #define DLOG(fmt, args...) \
58 do { log(LOG_DEBUG, "%s: " fmt "\n", __func__, ##args); } while (0)
59 #else
60 #define DLOG(fmt, args...) __nothing
61 #endif
62
63 /* Device-specific feature bits */
64 #define VIO9P_F_MOUNT_TAG (UINT64_C(1) << 0) /* mount tag specified */
65
66 /* Configuration registers */
67 #define VIO9P_CONFIG_TAG_LEN 0 /* 16bit */
68 #define VIO9P_CONFIG_TAG 2
69
70 #define VIO9P_FLAG_BITS \
71 VIRTIO_COMMON_FLAG_BITS \
72 "b\x00" "MOUNT_TAG\0"
73
74
75 // Must be the same as P9P_DEFREQLEN of usr.sbin/puffs/mount_9p/ninepuffs.h
76 #define VIO9P_MAX_REQLEN (16 * 1024)
77 #define VIO9P_SEGSIZE PAGE_SIZE
78 #define VIO9P_N_SEGMENTS (VIO9P_MAX_REQLEN / VIO9P_SEGSIZE)
79
80 #define P9_MAX_TAG_LEN 16
81
82 CTASSERT((PAGE_SIZE) == (VIRTIO_PAGE_SIZE)); /* XXX */
83
84 struct vio9p_softc {
85 device_t sc_dev;
86
87 struct virtio_softc *sc_virtio;
88 struct virtqueue sc_vq[1];
89
90 uint16_t sc_taglen;
91 uint8_t sc_tag[P9_MAX_TAG_LEN + 1];
92
93 int sc_flags;
94 #define VIO9P_INUSE __BIT(0)
95
96 int sc_state;
97 #define VIO9P_S_INIT 0
98 #define VIO9P_S_REQUESTING 1
99 #define VIO9P_S_REPLIED 2
100 #define VIO9P_S_CONSUMING 3
101 kcondvar_t sc_wait;
102 struct selinfo sc_sel;
103 kmutex_t sc_lock;
104
105 bus_dmamap_t sc_dmamap_tx;
106 bus_dmamap_t sc_dmamap_rx;
107 char *sc_buf_tx;
108 char *sc_buf_rx;
109 size_t sc_buf_rx_len;
110 off_t sc_buf_rx_offset;
111 };
112
113 /*
114 * Locking notes:
115 * - sc_state, sc_wait and sc_sel are protected by sc_lock
116 *
117 * The state machine (sc_state):
118 * - INIT =(write from client)=> REQUESTING
119 * - REQUESTING =(reply from host)=> REPLIED
120 * - REPLIED =(read from client)=> CONSUMING
121 * - CONSUMING =(read completed(*))=> INIT
122 *
123 * (*) read may not finish by one read(2) request, then
124 * the state remains CONSUMING.
125 */
126
127 static int vio9p_match(device_t, cfdata_t, void *);
128 static void vio9p_attach(device_t, device_t, void *);
129 static void vio9p_read_config(struct vio9p_softc *);
130 static int vio9p_request_done(struct virtqueue *);
131
132 static int vio9p_read(struct file *, off_t *, struct uio *, kauth_cred_t,
133 int);
134 static int vio9p_write(struct file *, off_t *, struct uio *,
135 kauth_cred_t, int);
136 static int vio9p_ioctl(struct file *, u_long, void *);
137 static int vio9p_close(struct file *);
138 static int vio9p_kqfilter(struct file *, struct knote *);
139
140 static const struct fileops vio9p_fileops = {
141 .fo_name = "vio9p",
142 .fo_read = vio9p_read,
143 .fo_write = vio9p_write,
144 .fo_ioctl = vio9p_ioctl,
145 .fo_fcntl = fnullop_fcntl,
146 .fo_poll = fnullop_poll,
147 .fo_stat = fbadop_stat,
148 .fo_close = vio9p_close,
149 .fo_kqfilter = vio9p_kqfilter,
150 .fo_restart = fnullop_restart,
151 };
152
153 static dev_type_open(vio9p_dev_open);
154
155 const struct cdevsw vio9p_cdevsw = {
156 .d_open = vio9p_dev_open,
157 .d_read = noread,
158 .d_write = nowrite,
159 .d_ioctl = noioctl,
160 .d_stop = nostop,
161 .d_tty = notty,
162 .d_poll = nopoll,
163 .d_mmap = nommap,
164 .d_kqfilter = nokqfilter,
165 .d_discard = nodiscard,
166 .d_flag = D_OTHER | D_MPSAFE,
167 };
168
169 static int
170 vio9p_dev_open(dev_t dev, int flag, int mode, struct lwp *l)
171 {
172 struct vio9p_softc *sc;
173 struct file *fp;
174 int error, fd;
175
176 sc = device_lookup_private(&vio9p_cd, minor(dev));
177 if (sc == NULL)
178 return ENXIO;
179
180 /* FIXME TOCTOU */
181 if (ISSET(sc->sc_flags, VIO9P_INUSE))
182 return EBUSY;
183
184 /* falloc() will fill in the descriptor for us. */
185 error = fd_allocfile(&fp, &fd);
186 if (error != 0)
187 return error;
188
189 sc->sc_flags |= VIO9P_INUSE;
190
191 return fd_clone(fp, fd, flag, &vio9p_fileops, sc);
192 }
193
194 static int
195 vio9p_ioctl(struct file *fp, u_long cmd, void *addr)
196 {
197 int error = 0;
198
199 switch (cmd) {
200 case FIONBIO:
201 break;
202 default:
203 error = EINVAL;
204 break;
205 }
206
207 return error;
208 }
209
210 static int
211 vio9p_read(struct file *fp, off_t *offp, struct uio *uio,
212 kauth_cred_t cred, int flags)
213 {
214 struct vio9p_softc *sc = fp->f_data;
215 struct virtio_softc *vsc = sc->sc_virtio;
216 struct virtqueue *vq = &sc->sc_vq[0];
217 int error, slot, len;
218
219 DLOG("enter");
220
221 mutex_enter(&sc->sc_lock);
222
223 if (sc->sc_state == VIO9P_S_INIT) {
224 DLOG("%s: not requested", device_xname(sc->sc_dev));
225 error = EAGAIN;
226 goto out;
227 }
228
229 if (sc->sc_state == VIO9P_S_CONSUMING) {
230 KASSERT(sc->sc_buf_rx_len > 0);
231 /* We already have some remaining, consume it. */
232 len = sc->sc_buf_rx_len - sc->sc_buf_rx_offset;
233 goto consume;
234 }
235
236 #if 0
237 if (uio->uio_resid != VIO9P_MAX_REQLEN)
238 return EINVAL;
239 #else
240 if (uio->uio_resid > VIO9P_MAX_REQLEN) {
241 error = EINVAL;
242 goto out;
243 }
244 #endif
245
246 error = 0;
247 while (sc->sc_state == VIO9P_S_REQUESTING) {
248 error = cv_timedwait_sig(&sc->sc_wait, &sc->sc_lock, hz);
249 if (error != 0)
250 break;
251 }
252 if (sc->sc_state == VIO9P_S_REPLIED)
253 sc->sc_state = VIO9P_S_CONSUMING;
254
255 if (error != 0)
256 goto out;
257
258 error = virtio_dequeue(vsc, vq, &slot, &len);
259 if (error != 0) {
260 log(LOG_ERR, "%s: virtio_dequeue failed: %d\n",
261 device_xname(sc->sc_dev), error);
262 goto out;
263 }
264 DLOG("len=%d", len);
265 sc->sc_buf_rx_len = len;
266 sc->sc_buf_rx_offset = 0;
267 bus_dmamap_sync(virtio_dmat(vsc), sc->sc_dmamap_tx, 0, VIO9P_MAX_REQLEN,
268 BUS_DMASYNC_POSTWRITE);
269 bus_dmamap_sync(virtio_dmat(vsc), sc->sc_dmamap_rx, 0, VIO9P_MAX_REQLEN,
270 BUS_DMASYNC_POSTREAD);
271 virtio_dequeue_commit(vsc, vq, slot);
272 #ifdef VIO9P_DUMP
273 int i;
274 log(LOG_DEBUG, "%s: buf: ", __func__);
275 for (i = 0; i < len; i++) {
276 log(LOG_DEBUG, "%c", (char)sc->sc_buf_rx[i]);
277 }
278 log(LOG_DEBUG, "\n");
279 #endif
280
281 consume:
282 DLOG("uio_resid=%lu", uio->uio_resid);
283 if (len < uio->uio_resid) {
284 error = EINVAL;
285 goto out;
286 }
287 len = uio->uio_resid;
288 error = uiomove(sc->sc_buf_rx + sc->sc_buf_rx_offset, len, uio);
289 if (error != 0)
290 goto out;
291
292 sc->sc_buf_rx_offset += len;
293 if (sc->sc_buf_rx_offset == sc->sc_buf_rx_len) {
294 sc->sc_buf_rx_len = 0;
295 sc->sc_buf_rx_offset = 0;
296
297 sc->sc_state = VIO9P_S_INIT;
298 selnotify(&sc->sc_sel, 0, 1);
299 }
300
301 out:
302 mutex_exit(&sc->sc_lock);
303 return error;
304 }
305
306 static int
307 vio9p_write(struct file *fp, off_t *offp, struct uio *uio,
308 kauth_cred_t cred, int flags)
309 {
310 struct vio9p_softc *sc = fp->f_data;
311 struct virtio_softc *vsc = sc->sc_virtio;
312 struct virtqueue *vq = &sc->sc_vq[0];
313 int error, slot;
314 size_t len;
315
316 DLOG("enter");
317
318 mutex_enter(&sc->sc_lock);
319
320 if (sc->sc_state != VIO9P_S_INIT) {
321 DLOG("already requesting");
322 error = EAGAIN;
323 goto out;
324 }
325
326 if (uio->uio_resid == 0) {
327 error = 0;
328 goto out;
329 }
330
331 if (uio->uio_resid > VIO9P_MAX_REQLEN) {
332 error = EINVAL;
333 goto out;
334 }
335
336 len = uio->uio_resid;
337 error = uiomove(sc->sc_buf_tx, len, uio);
338 if (error != 0)
339 goto out;
340
341 DLOG("len=%lu", len);
342 #ifdef VIO9P_DUMP
343 int i;
344 log(LOG_DEBUG, "%s: buf: ", __func__);
345 for (i = 0; i < len; i++) {
346 log(LOG_DEBUG, "%c", (char)sc->sc_buf_tx[i]);
347 }
348 log(LOG_DEBUG, "\n");
349 #endif
350
351 error = virtio_enqueue_prep(vsc, vq, &slot);
352 if (error != 0) {
353 log(LOG_ERR, "%s: virtio_enqueue_prep failed\n",
354 device_xname(sc->sc_dev));
355 goto out;
356 }
357 DLOG("slot=%d", slot);
358 error = virtio_enqueue_reserve(vsc, vq, slot,
359 sc->sc_dmamap_tx->dm_nsegs + sc->sc_dmamap_rx->dm_nsegs);
360 if (error != 0) {
361 log(LOG_ERR, "%s: virtio_enqueue_reserve failed\n",
362 device_xname(sc->sc_dev));
363 goto out;
364 }
365
366 /* Tx */
367 bus_dmamap_sync(virtio_dmat(vsc), sc->sc_dmamap_tx, 0,
368 len, BUS_DMASYNC_PREWRITE);
369 virtio_enqueue(vsc, vq, slot, sc->sc_dmamap_tx, true);
370 /* Rx */
371 bus_dmamap_sync(virtio_dmat(vsc), sc->sc_dmamap_rx, 0,
372 VIO9P_MAX_REQLEN, BUS_DMASYNC_PREREAD);
373 virtio_enqueue(vsc, vq, slot, sc->sc_dmamap_rx, false);
374 virtio_enqueue_commit(vsc, vq, slot, true);
375
376 sc->sc_state = VIO9P_S_REQUESTING;
377 out:
378 mutex_exit(&sc->sc_lock);
379 return error;
380 }
381
382 static int
383 vio9p_close(struct file *fp)
384 {
385 struct vio9p_softc *sc = fp->f_data;
386
387 KASSERT(ISSET(sc->sc_flags, VIO9P_INUSE));
388 sc->sc_flags &= ~VIO9P_INUSE;
389
390 return 0;
391 }
392
393 static void
394 filt_vio9p_detach(struct knote *kn)
395 {
396 struct vio9p_softc *sc = kn->kn_hook;
397
398 mutex_enter(&sc->sc_lock);
399 selremove_knote(&sc->sc_sel, kn);
400 mutex_exit(&sc->sc_lock);
401 }
402
403 static int
404 filt_vio9p_read(struct knote *kn, long hint)
405 {
406 struct vio9p_softc *sc = kn->kn_hook;
407 int rv;
408
409 kn->kn_data = sc->sc_buf_rx_len;
410 /* XXX need sc_lock? */
411 rv = (kn->kn_data > 0) || sc->sc_state != VIO9P_S_INIT;
412
413 return rv;
414 }
415
416 static const struct filterops vio9p_read_filtops = {
417 .f_flags = FILTEROP_ISFD,
418 .f_attach = NULL,
419 .f_detach = filt_vio9p_detach,
420 .f_event = filt_vio9p_read,
421 };
422
423 static int
424 filt_vio9p_write(struct knote *kn, long hint)
425 {
426 struct vio9p_softc *sc = kn->kn_hook;
427
428 /* XXX need sc_lock? */
429 return sc->sc_state == VIO9P_S_INIT;
430 }
431
432 static const struct filterops vio9p_write_filtops = {
433 .f_flags = FILTEROP_ISFD,
434 .f_attach = NULL,
435 .f_detach = filt_vio9p_detach,
436 .f_event = filt_vio9p_write,
437 };
438
439 static int
440 vio9p_kqfilter(struct file *fp, struct knote *kn)
441 {
442 struct vio9p_softc *sc = fp->f_data;
443
444 switch (kn->kn_filter) {
445 case EVFILT_READ:
446 kn->kn_fop = &vio9p_read_filtops;
447 break;
448
449 case EVFILT_WRITE:
450 kn->kn_fop = &vio9p_write_filtops;
451 break;
452
453 default:
454 log(LOG_ERR, "%s: kn_filter=%u\n", __func__, kn->kn_filter);
455 return EINVAL;
456 }
457
458 kn->kn_hook = sc;
459
460 mutex_enter(&sc->sc_lock);
461 selrecord_knote(&sc->sc_sel, kn);
462 mutex_exit(&sc->sc_lock);
463
464 return 0;
465 }
466
467 CFATTACH_DECL_NEW(vio9p, sizeof(struct vio9p_softc),
468 vio9p_match, vio9p_attach, NULL, NULL);
469
470 static int
471 vio9p_match(device_t parent, cfdata_t match, void *aux)
472 {
473 struct virtio_attach_args *va = aux;
474
475 if (va->sc_childdevid == VIRTIO_DEVICE_ID_9P)
476 return 1;
477
478 return 0;
479 }
480
481 static void
482 vio9p_attach(device_t parent, device_t self, void *aux)
483 {
484 struct vio9p_softc *sc = device_private(self);
485 struct virtio_softc *vsc = device_private(parent);
486 uint64_t features;
487 int error;
488
489 if (virtio_child(vsc) != NULL) {
490 aprint_normal(": child already attached for %s; "
491 "something wrong...\n", device_xname(parent));
492 return;
493 }
494
495 sc->sc_dev = self;
496 sc->sc_virtio = vsc;
497
498 virtio_child_attach_start(vsc, self, IPL_VM, NULL,
499 NULL, virtio_vq_intr,
500 VIRTIO_F_INTR_MPSAFE | VIRTIO_F_INTR_SOFTINT, 0,
501 VIO9P_FLAG_BITS);
502
503 features = virtio_features(vsc);
504 if (features == 0)
505 goto err_none;
506
507 error = virtio_alloc_vq(vsc, &sc->sc_vq[0], 0, VIO9P_MAX_REQLEN,
508 VIO9P_N_SEGMENTS * 2, "vio9p");
509 if (error != 0)
510 goto err_none;
511
512 sc->sc_vq[0].vq_done = vio9p_request_done;
513
514 virtio_child_attach_set_vqs(vsc, sc->sc_vq, 1);
515
516 sc->sc_buf_tx = kmem_alloc(VIO9P_MAX_REQLEN, KM_SLEEP);
517 sc->sc_buf_rx = kmem_alloc(VIO9P_MAX_REQLEN, KM_SLEEP);
518
519 error = bus_dmamap_create(virtio_dmat(vsc), VIO9P_MAX_REQLEN,
520 VIO9P_N_SEGMENTS, VIO9P_SEGSIZE, 0, BUS_DMA_WAITOK, &sc->sc_dmamap_tx);
521 if (error != 0) {
522 aprint_error_dev(sc->sc_dev, "bus_dmamap_create failed: %d\n",
523 error);
524 goto err_vq;
525 }
526 error = bus_dmamap_create(virtio_dmat(vsc), VIO9P_MAX_REQLEN,
527 VIO9P_N_SEGMENTS, VIO9P_SEGSIZE, 0, BUS_DMA_WAITOK, &sc->sc_dmamap_rx);
528 if (error != 0) {
529 aprint_error_dev(sc->sc_dev, "bus_dmamap_create failed: %d\n",
530 error);
531 goto err_vq;
532 }
533
534 error = bus_dmamap_load(virtio_dmat(vsc), sc->sc_dmamap_tx,
535 sc->sc_buf_tx, VIO9P_MAX_REQLEN, NULL, BUS_DMA_WAITOK | BUS_DMA_WRITE);
536 if (error != 0) {
537 aprint_error_dev(sc->sc_dev, "bus_dmamap_load failed: %d\n",
538 error);
539 goto err_dmamap;
540 }
541 error = bus_dmamap_load(virtio_dmat(vsc), sc->sc_dmamap_rx,
542 sc->sc_buf_rx, VIO9P_MAX_REQLEN, NULL, BUS_DMA_WAITOK | BUS_DMA_READ);
543 if (error != 0) {
544 aprint_error_dev(sc->sc_dev, "bus_dmamap_load failed: %d\n",
545 error);
546 goto err_dmamap;
547 }
548
549 sc->sc_state = VIO9P_S_INIT;
550 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NONE);
551 cv_init(&sc->sc_wait, "vio9p");
552
553 vio9p_read_config(sc);
554 aprint_normal_dev(self, "tagged as %s\n", sc->sc_tag);
555
556 error = virtio_child_attach_finish(vsc);
557 if (error != 0)
558 goto err_mutex;
559
560 return;
561
562 err_mutex:
563 cv_destroy(&sc->sc_wait);
564 mutex_destroy(&sc->sc_lock);
565 err_dmamap:
566 bus_dmamap_destroy(virtio_dmat(vsc), sc->sc_dmamap_tx);
567 bus_dmamap_destroy(virtio_dmat(vsc), sc->sc_dmamap_rx);
568 err_vq:
569 virtio_free_vq(vsc, &sc->sc_vq[0]);
570 err_none:
571 virtio_child_attach_failed(vsc);
572 return;
573 }
574
575 static void
576 vio9p_read_config(struct vio9p_softc *sc)
577 {
578 device_t dev = sc->sc_dev;
579 uint8_t reg;
580 int i;
581
582 /* these values are explicitly specified as little-endian */
583 sc->sc_taglen = virtio_read_device_config_le_2(sc->sc_virtio,
584 VIO9P_CONFIG_TAG_LEN);
585
586 if (sc->sc_taglen > P9_MAX_TAG_LEN) {
587 aprint_error_dev(dev, "warning: tag is trimmed from %u to %u\n",
588 sc->sc_taglen, P9_MAX_TAG_LEN);
589 sc->sc_taglen = P9_MAX_TAG_LEN;
590 }
591
592 for (i = 0; i < sc->sc_taglen; i++) {
593 reg = virtio_read_device_config_1(sc->sc_virtio,
594 VIO9P_CONFIG_TAG + i);
595 sc->sc_tag[i] = reg;
596 }
597 sc->sc_tag[i] = '\0';
598 }
599
600 static int
601 vio9p_request_done(struct virtqueue *vq)
602 {
603 struct virtio_softc *vsc = vq->vq_owner;
604 struct vio9p_softc *sc = device_private(virtio_child(vsc));
605
606 DLOG("enter");
607
608 mutex_enter(&sc->sc_lock);
609 sc->sc_state = VIO9P_S_REPLIED;
610 cv_broadcast(&sc->sc_wait);
611 selnotify(&sc->sc_sel, 0, 1);
612 mutex_exit(&sc->sc_lock);
613
614 return 1;
615 }
616
617 MODULE(MODULE_CLASS_DRIVER, vio9p, "virtio");
618
619 #ifdef _MODULE
620 #include "ioconf.c"
621 #endif
622
623 static int
624 vio9p_modcmd(modcmd_t cmd, void *opaque)
625 {
626 #ifdef _MODULE
627 devmajor_t bmajor = NODEVMAJOR, cmajor = NODEVMAJOR;
628 #endif
629 int error = 0;
630
631 #ifdef _MODULE
632 switch (cmd) {
633 case MODULE_CMD_INIT:
634 devsw_attach(vio9p_cd.cd_name, NULL, &bmajor,
635 &vio9p_cdevsw, &cmajor);
636 error = config_init_component(cfdriver_ioconf_vio9p,
637 cfattach_ioconf_vio9p, cfdata_ioconf_vio9p);
638 break;
639 case MODULE_CMD_FINI:
640 error = config_fini_component(cfdriver_ioconf_vio9p,
641 cfattach_ioconf_vio9p, cfdata_ioconf_vio9p);
642 devsw_detach(NULL, &vio9p_cdevsw);
643 break;
644 default:
645 error = ENOTTY;
646 break;
647 }
648 #endif
649
650 return error;
651 }
652