vio9p.c revision 1.5 1 /* $NetBSD: vio9p.c,v 1.5 2022/03/31 19:30:16 pgoyette Exp $ */
2
3 /*
4 * Copyright (c) 2019 Internet Initiative Japan, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: vio9p.c,v 1.5 2022/03/31 19:30:16 pgoyette Exp $");
30
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/bus.h>
35 #include <sys/conf.h>
36 #include <sys/condvar.h>
37 #include <sys/device.h>
38 #include <sys/mutex.h>
39 #include <sys/sysctl.h>
40 #include <sys/module.h>
41 #include <sys/syslog.h>
42 #include <sys/select.h>
43 #include <sys/kmem.h>
44
45 #include <sys/file.h>
46 #include <sys/filedesc.h>
47 #include <sys/uio.h>
48
49 #include <dev/pci/virtioreg.h>
50 #include <dev/pci/virtiovar.h>
51
52 #include "ioconf.h"
53
54 //#define VIO9P_DEBUG 1
55 //#define VIO9P_DUMP 1
56 #ifdef VIO9P_DEBUG
57 #define DLOG(fmt, args...) \
58 do { log(LOG_DEBUG, "%s: " fmt "\n", __func__, ##args); } while (0)
59 #else
60 #define DLOG(fmt, args...) __nothing
61 #endif
62
63 /* Configuration registers */
64 #define VIO9P_CONFIG_TAG_LEN 0 /* 16bit */
65 #define VIO9P_CONFIG_TAG 2
66
67 #define VIO9P_FLAG_BITS VIRTIO_COMMON_FLAG_BITS
68
69 // Must be the same as P9P_DEFREQLEN of usr.sbin/puffs/mount_9p/ninepuffs.h
70 #define VIO9P_MAX_REQLEN (16 * 1024)
71 #define VIO9P_SEGSIZE PAGE_SIZE
72 #define VIO9P_N_SEGMENTS (VIO9P_MAX_REQLEN / VIO9P_SEGSIZE)
73
74 #define P9_MAX_TAG_LEN 16
75
76 CTASSERT((PAGE_SIZE) == (VIRTIO_PAGE_SIZE)); /* XXX */
77
78 struct vio9p_softc {
79 device_t sc_dev;
80
81 struct virtio_softc *sc_virtio;
82 struct virtqueue sc_vq[1];
83
84 uint16_t sc_taglen;
85 uint8_t sc_tag[P9_MAX_TAG_LEN + 1];
86
87 int sc_flags;
88 #define VIO9P_INUSE __BIT(0)
89
90 int sc_state;
91 #define VIO9P_S_INIT 0
92 #define VIO9P_S_REQUESTING 1
93 #define VIO9P_S_REPLIED 2
94 #define VIO9P_S_CONSUMING 3
95 kcondvar_t sc_wait;
96 struct selinfo sc_sel;
97 kmutex_t sc_lock;
98
99 bus_dmamap_t sc_dmamap_tx;
100 bus_dmamap_t sc_dmamap_rx;
101 char *sc_buf_tx;
102 char *sc_buf_rx;
103 size_t sc_buf_rx_len;
104 off_t sc_buf_rx_offset;
105 };
106
107 /*
108 * Locking notes:
109 * - sc_state, sc_wait and sc_sel are protected by sc_lock
110 *
111 * The state machine (sc_state):
112 * - INIT =(write from client)=> REQUESTING
113 * - REQUESTING =(reply from host)=> REPLIED
114 * - REPLIED =(read from client)=> CONSUMING
115 * - CONSUMING =(read completed(*))=> INIT
116 *
117 * (*) read may not finish by one read(2) request, then
118 * the state remains CONSUMING.
119 */
120
121 static int vio9p_match(device_t, cfdata_t, void *);
122 static void vio9p_attach(device_t, device_t, void *);
123 static void vio9p_read_config(struct vio9p_softc *);
124 static int vio9p_request_done(struct virtqueue *);
125
126 static int vio9p_read(struct file *, off_t *, struct uio *, kauth_cred_t,
127 int);
128 static int vio9p_write(struct file *, off_t *, struct uio *,
129 kauth_cred_t, int);
130 static int vio9p_ioctl(struct file *, u_long, void *);
131 static int vio9p_close(struct file *);
132 static int vio9p_kqfilter(struct file *, struct knote *);
133
134 static const struct fileops vio9p_fileops = {
135 .fo_name = "vio9p",
136 .fo_read = vio9p_read,
137 .fo_write = vio9p_write,
138 .fo_ioctl = vio9p_ioctl,
139 .fo_fcntl = fnullop_fcntl,
140 .fo_poll = fnullop_poll,
141 .fo_stat = fbadop_stat,
142 .fo_close = vio9p_close,
143 .fo_kqfilter = vio9p_kqfilter,
144 .fo_restart = fnullop_restart,
145 };
146
147 static dev_type_open(vio9p_dev_open);
148
149 const struct cdevsw vio9p_cdevsw = {
150 .d_open = vio9p_dev_open,
151 .d_read = noread,
152 .d_write = nowrite,
153 .d_ioctl = noioctl,
154 .d_stop = nostop,
155 .d_tty = notty,
156 .d_poll = nopoll,
157 .d_mmap = nommap,
158 .d_kqfilter = nokqfilter,
159 .d_discard = nodiscard,
160 .d_flag = D_OTHER | D_MPSAFE,
161 };
162
163 static int
164 vio9p_dev_open(dev_t dev, int flag, int mode, struct lwp *l)
165 {
166 struct vio9p_softc *sc;
167 struct file *fp;
168 int error, fd;
169
170 sc = device_lookup_private(&vio9p_cd, minor(dev));
171 if (sc == NULL)
172 return ENXIO;
173
174 /* FIXME TOCTOU */
175 if (ISSET(sc->sc_flags, VIO9P_INUSE))
176 return EBUSY;
177
178 /* falloc() will fill in the descriptor for us. */
179 error = fd_allocfile(&fp, &fd);
180 if (error != 0)
181 return error;
182
183 sc->sc_flags |= VIO9P_INUSE;
184
185 return fd_clone(fp, fd, flag, &vio9p_fileops, sc);
186 }
187
188 static int
189 vio9p_ioctl(struct file *fp, u_long cmd, void *addr)
190 {
191 int error = 0;
192
193 switch (cmd) {
194 case FIONBIO:
195 break;
196 default:
197 error = EINVAL;
198 break;
199 }
200
201 return error;
202 }
203
204 static int
205 vio9p_read(struct file *fp, off_t *offp, struct uio *uio,
206 kauth_cred_t cred, int flags)
207 {
208 struct vio9p_softc *sc = fp->f_data;
209 struct virtio_softc *vsc = sc->sc_virtio;
210 struct virtqueue *vq = &sc->sc_vq[0];
211 int error, slot, len;
212
213 DLOG("enter");
214
215 mutex_enter(&sc->sc_lock);
216
217 if (sc->sc_state == VIO9P_S_INIT) {
218 DLOG("%s: not requested", device_xname(sc->sc_dev));
219 error = EAGAIN;
220 goto out;
221 }
222
223 if (sc->sc_state == VIO9P_S_CONSUMING) {
224 KASSERT(sc->sc_buf_rx_len > 0);
225 /* We already have some remaining, consume it. */
226 len = sc->sc_buf_rx_len - sc->sc_buf_rx_offset;
227 goto consume;
228 }
229
230 #if 0
231 if (uio->uio_resid != VIO9P_MAX_REQLEN)
232 return EINVAL;
233 #else
234 if (uio->uio_resid > VIO9P_MAX_REQLEN) {
235 error = EINVAL;
236 goto out;
237 }
238 #endif
239
240 error = 0;
241 while (sc->sc_state == VIO9P_S_REQUESTING) {
242 error = cv_timedwait_sig(&sc->sc_wait, &sc->sc_lock, hz);
243 if (error != 0)
244 break;
245 }
246 if (sc->sc_state == VIO9P_S_REPLIED)
247 sc->sc_state = VIO9P_S_CONSUMING;
248
249 if (error != 0)
250 goto out;
251
252 error = virtio_dequeue(vsc, vq, &slot, &len);
253 if (error != 0) {
254 log(LOG_ERR, "%s: virtio_dequeue failed: %d\n",
255 device_xname(sc->sc_dev), error);
256 goto out;
257 }
258 DLOG("len=%d", len);
259 sc->sc_buf_rx_len = len;
260 sc->sc_buf_rx_offset = 0;
261 bus_dmamap_sync(virtio_dmat(vsc), sc->sc_dmamap_tx, 0, VIO9P_MAX_REQLEN,
262 BUS_DMASYNC_POSTWRITE);
263 bus_dmamap_sync(virtio_dmat(vsc), sc->sc_dmamap_rx, 0, VIO9P_MAX_REQLEN,
264 BUS_DMASYNC_POSTREAD);
265 virtio_dequeue_commit(vsc, vq, slot);
266 #ifdef VIO9P_DUMP
267 int i;
268 log(LOG_DEBUG, "%s: buf: ", __func__);
269 for (i = 0; i < len; i++) {
270 log(LOG_DEBUG, "%c", (char)sc->sc_buf_rx[i]);
271 }
272 log(LOG_DEBUG, "\n");
273 #endif
274
275 consume:
276 DLOG("uio_resid=%lu", uio->uio_resid);
277 if (len < uio->uio_resid) {
278 error = EINVAL;
279 goto out;
280 }
281 len = uio->uio_resid;
282 error = uiomove(sc->sc_buf_rx + sc->sc_buf_rx_offset, len, uio);
283 if (error != 0)
284 goto out;
285
286 sc->sc_buf_rx_offset += len;
287 if (sc->sc_buf_rx_offset == sc->sc_buf_rx_len) {
288 sc->sc_buf_rx_len = 0;
289 sc->sc_buf_rx_offset = 0;
290
291 sc->sc_state = VIO9P_S_INIT;
292 selnotify(&sc->sc_sel, 0, 1);
293 }
294
295 out:
296 mutex_exit(&sc->sc_lock);
297 return error;
298 }
299
300 static int
301 vio9p_write(struct file *fp, off_t *offp, struct uio *uio,
302 kauth_cred_t cred, int flags)
303 {
304 struct vio9p_softc *sc = fp->f_data;
305 struct virtio_softc *vsc = sc->sc_virtio;
306 struct virtqueue *vq = &sc->sc_vq[0];
307 int error, slot;
308 size_t len;
309
310 DLOG("enter");
311
312 mutex_enter(&sc->sc_lock);
313
314 if (sc->sc_state != VIO9P_S_INIT) {
315 DLOG("already requesting");
316 error = EAGAIN;
317 goto out;
318 }
319
320 if (uio->uio_resid == 0) {
321 error = 0;
322 goto out;
323 }
324
325 if (uio->uio_resid > VIO9P_MAX_REQLEN) {
326 error = EINVAL;
327 goto out;
328 }
329
330 len = uio->uio_resid;
331 error = uiomove(sc->sc_buf_tx, len, uio);
332 if (error != 0)
333 goto out;
334
335 DLOG("len=%lu", len);
336 #ifdef VIO9P_DUMP
337 int i;
338 log(LOG_DEBUG, "%s: buf: ", __func__);
339 for (i = 0; i < len; i++) {
340 log(LOG_DEBUG, "%c", (char)sc->sc_buf_tx[i]);
341 }
342 log(LOG_DEBUG, "\n");
343 #endif
344
345 error = virtio_enqueue_prep(vsc, vq, &slot);
346 if (error != 0) {
347 log(LOG_ERR, "%s: virtio_enqueue_prep failed\n",
348 device_xname(sc->sc_dev));
349 goto out;
350 }
351 DLOG("slot=%d", slot);
352 error = virtio_enqueue_reserve(vsc, vq, slot,
353 sc->sc_dmamap_tx->dm_nsegs + sc->sc_dmamap_rx->dm_nsegs);
354 if (error != 0) {
355 log(LOG_ERR, "%s: virtio_enqueue_reserve failed\n",
356 device_xname(sc->sc_dev));
357 goto out;
358 }
359
360 /* Tx */
361 bus_dmamap_sync(virtio_dmat(vsc), sc->sc_dmamap_tx, 0,
362 len, BUS_DMASYNC_PREWRITE);
363 virtio_enqueue(vsc, vq, slot, sc->sc_dmamap_tx, true);
364 /* Rx */
365 bus_dmamap_sync(virtio_dmat(vsc), sc->sc_dmamap_rx, 0,
366 VIO9P_MAX_REQLEN, BUS_DMASYNC_PREREAD);
367 virtio_enqueue(vsc, vq, slot, sc->sc_dmamap_rx, false);
368 virtio_enqueue_commit(vsc, vq, slot, true);
369
370 sc->sc_state = VIO9P_S_REQUESTING;
371 out:
372 mutex_exit(&sc->sc_lock);
373 return error;
374 }
375
376 static int
377 vio9p_close(struct file *fp)
378 {
379 struct vio9p_softc *sc = fp->f_data;
380
381 KASSERT(ISSET(sc->sc_flags, VIO9P_INUSE));
382 sc->sc_flags &= ~VIO9P_INUSE;
383
384 return 0;
385 }
386
387 static void
388 filt_vio9p_detach(struct knote *kn)
389 {
390 struct vio9p_softc *sc = kn->kn_hook;
391
392 mutex_enter(&sc->sc_lock);
393 selremove_knote(&sc->sc_sel, kn);
394 mutex_exit(&sc->sc_lock);
395 }
396
397 static int
398 filt_vio9p_read(struct knote *kn, long hint)
399 {
400 struct vio9p_softc *sc = kn->kn_hook;
401 int rv;
402
403 kn->kn_data = sc->sc_buf_rx_len;
404 /* XXX need sc_lock? */
405 rv = (kn->kn_data > 0) || sc->sc_state != VIO9P_S_INIT;
406
407 return rv;
408 }
409
410 static const struct filterops vio9p_read_filtops = {
411 .f_flags = FILTEROP_ISFD,
412 .f_attach = NULL,
413 .f_detach = filt_vio9p_detach,
414 .f_event = filt_vio9p_read,
415 };
416
417 static int
418 filt_vio9p_write(struct knote *kn, long hint)
419 {
420 struct vio9p_softc *sc = kn->kn_hook;
421
422 /* XXX need sc_lock? */
423 return sc->sc_state == VIO9P_S_INIT;
424 }
425
426 static const struct filterops vio9p_write_filtops = {
427 .f_flags = FILTEROP_ISFD,
428 .f_attach = NULL,
429 .f_detach = filt_vio9p_detach,
430 .f_event = filt_vio9p_write,
431 };
432
433 static int
434 vio9p_kqfilter(struct file *fp, struct knote *kn)
435 {
436 struct vio9p_softc *sc = fp->f_data;
437
438 switch (kn->kn_filter) {
439 case EVFILT_READ:
440 kn->kn_fop = &vio9p_read_filtops;
441 break;
442
443 case EVFILT_WRITE:
444 kn->kn_fop = &vio9p_write_filtops;
445 break;
446
447 default:
448 log(LOG_ERR, "%s: kn_filter=%u\n", __func__, kn->kn_filter);
449 return EINVAL;
450 }
451
452 kn->kn_hook = sc;
453
454 mutex_enter(&sc->sc_lock);
455 selrecord_knote(&sc->sc_sel, kn);
456 mutex_exit(&sc->sc_lock);
457
458 return 0;
459 }
460
461 CFATTACH_DECL_NEW(vio9p, sizeof(struct vio9p_softc),
462 vio9p_match, vio9p_attach, NULL, NULL);
463
464 static int
465 vio9p_match(device_t parent, cfdata_t match, void *aux)
466 {
467 struct virtio_attach_args *va = aux;
468
469 if (va->sc_childdevid == VIRTIO_DEVICE_ID_9P)
470 return 1;
471
472 return 0;
473 }
474
475 static void
476 vio9p_attach(device_t parent, device_t self, void *aux)
477 {
478 struct vio9p_softc *sc = device_private(self);
479 struct virtio_softc *vsc = device_private(parent);
480 uint64_t features;
481 int error;
482
483 if (virtio_child(vsc) != NULL) {
484 aprint_normal(": child already attached for %s; "
485 "something wrong...\n", device_xname(parent));
486 return;
487 }
488
489 sc->sc_dev = self;
490 sc->sc_virtio = vsc;
491
492 virtio_child_attach_start(vsc, self, IPL_VM, NULL,
493 NULL, virtio_vq_intr,
494 VIRTIO_F_INTR_MPSAFE | VIRTIO_F_INTR_SOFTINT, 0,
495 VIO9P_FLAG_BITS);
496
497 features = virtio_features(vsc);
498 if (features == 0)
499 goto err_none;
500
501 error = virtio_alloc_vq(vsc, &sc->sc_vq[0], 0, VIO9P_MAX_REQLEN,
502 VIO9P_N_SEGMENTS * 2, "vio9p");
503 if (error != 0)
504 goto err_none;
505
506 sc->sc_vq[0].vq_done = vio9p_request_done;
507
508 virtio_child_attach_set_vqs(vsc, sc->sc_vq, 1);
509
510 sc->sc_buf_tx = kmem_alloc(VIO9P_MAX_REQLEN, KM_SLEEP);
511 sc->sc_buf_rx = kmem_alloc(VIO9P_MAX_REQLEN, KM_SLEEP);
512
513 error = bus_dmamap_create(virtio_dmat(vsc), VIO9P_MAX_REQLEN,
514 VIO9P_N_SEGMENTS, VIO9P_SEGSIZE, 0, BUS_DMA_WAITOK, &sc->sc_dmamap_tx);
515 if (error != 0) {
516 aprint_error_dev(sc->sc_dev, "bus_dmamap_create failed: %d\n",
517 error);
518 goto err_vq;
519 }
520 error = bus_dmamap_create(virtio_dmat(vsc), VIO9P_MAX_REQLEN,
521 VIO9P_N_SEGMENTS, VIO9P_SEGSIZE, 0, BUS_DMA_WAITOK, &sc->sc_dmamap_rx);
522 if (error != 0) {
523 aprint_error_dev(sc->sc_dev, "bus_dmamap_create failed: %d\n",
524 error);
525 goto err_vq;
526 }
527
528 error = bus_dmamap_load(virtio_dmat(vsc), sc->sc_dmamap_tx,
529 sc->sc_buf_tx, VIO9P_MAX_REQLEN, NULL, BUS_DMA_WAITOK | BUS_DMA_WRITE);
530 if (error != 0) {
531 aprint_error_dev(sc->sc_dev, "bus_dmamap_load failed: %d\n",
532 error);
533 goto err_dmamap;
534 }
535 error = bus_dmamap_load(virtio_dmat(vsc), sc->sc_dmamap_rx,
536 sc->sc_buf_rx, VIO9P_MAX_REQLEN, NULL, BUS_DMA_WAITOK | BUS_DMA_READ);
537 if (error != 0) {
538 aprint_error_dev(sc->sc_dev, "bus_dmamap_load failed: %d\n",
539 error);
540 goto err_dmamap;
541 }
542
543 sc->sc_state = VIO9P_S_INIT;
544 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NONE);
545 cv_init(&sc->sc_wait, "vio9p");
546
547 vio9p_read_config(sc);
548 aprint_normal_dev(self, "tagged as %s\n", sc->sc_tag);
549
550 error = virtio_child_attach_finish(vsc);
551 if (error != 0)
552 goto err_mutex;
553
554 return;
555
556 err_mutex:
557 cv_destroy(&sc->sc_wait);
558 mutex_destroy(&sc->sc_lock);
559 err_dmamap:
560 bus_dmamap_destroy(virtio_dmat(vsc), sc->sc_dmamap_tx);
561 bus_dmamap_destroy(virtio_dmat(vsc), sc->sc_dmamap_rx);
562 err_vq:
563 virtio_free_vq(vsc, &sc->sc_vq[0]);
564 err_none:
565 virtio_child_attach_failed(vsc);
566 return;
567 }
568
569 static void
570 vio9p_read_config(struct vio9p_softc *sc)
571 {
572 device_t dev = sc->sc_dev;
573 uint8_t reg;
574 int i;
575
576 /* these values are explicitly specified as little-endian */
577 sc->sc_taglen = virtio_read_device_config_le_2(sc->sc_virtio,
578 VIO9P_CONFIG_TAG_LEN);
579
580 if (sc->sc_taglen > P9_MAX_TAG_LEN) {
581 aprint_error_dev(dev, "warning: tag is trimmed from %u to %u\n",
582 sc->sc_taglen, P9_MAX_TAG_LEN);
583 sc->sc_taglen = P9_MAX_TAG_LEN;
584 }
585
586 for (i = 0; i < sc->sc_taglen; i++) {
587 reg = virtio_read_device_config_1(sc->sc_virtio,
588 VIO9P_CONFIG_TAG + i);
589 sc->sc_tag[i] = reg;
590 }
591 sc->sc_tag[i] = '\0';
592 }
593
594 static int
595 vio9p_request_done(struct virtqueue *vq)
596 {
597 struct virtio_softc *vsc = vq->vq_owner;
598 struct vio9p_softc *sc = device_private(virtio_child(vsc));
599
600 DLOG("enter");
601
602 mutex_enter(&sc->sc_lock);
603 sc->sc_state = VIO9P_S_REPLIED;
604 cv_broadcast(&sc->sc_wait);
605 selnotify(&sc->sc_sel, 0, 1);
606 mutex_exit(&sc->sc_lock);
607
608 return 1;
609 }
610
611 MODULE(MODULE_CLASS_DRIVER, vio9p, "virtio");
612
613 #ifdef _MODULE
614 #include "ioconf.c"
615 #endif
616
617 static int
618 vio9p_modcmd(modcmd_t cmd, void *opaque)
619 {
620 #ifdef _MODULE
621 devmajor_t bmajor = NODEVMAJOR, cmajor = NODEVMAJOR;
622 #endif
623 int error = 0;
624
625 #ifdef _MODULE
626 switch (cmd) {
627 case MODULE_CMD_INIT:
628 devsw_attach(vio9p_cd.cd_name, NULL, &bmajor,
629 &vio9p_cdevsw, &cmajor);
630 error = config_init_component(cfdriver_ioconf_vio9p,
631 cfattach_ioconf_vio9p, cfdata_ioconf_vio9p);
632 break;
633 case MODULE_CMD_FINI:
634 error = config_fini_component(cfdriver_ioconf_vio9p,
635 cfattach_ioconf_vio9p, cfdata_ioconf_vio9p);
636 devsw_detach(NULL, &vio9p_cdevsw);
637 break;
638 default:
639 error = ENOTTY;
640 break;
641 }
642 #endif
643
644 return error;
645 }
646