vio9p.c revision 1.1 1 /* $NetBSD: vio9p.c,v 1.1 2019/10/28 02:56:40 ozaki-r Exp $ */
2
3 /*
4 * Copyright (c) 2019 Internet Initiative Japan, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: vio9p.c,v 1.1 2019/10/28 02:56:40 ozaki-r Exp $");
30
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/bus.h>
35 #include <sys/conf.h>
36 #include <sys/condvar.h>
37 #include <sys/device.h>
38 #include <sys/mutex.h>
39 #include <sys/sysctl.h>
40 #include <sys/module.h>
41 #include <sys/syslog.h>
42 #include <sys/select.h>
43 #include <sys/kmem.h>
44
45 #include <sys/file.h>
46 #include <sys/filedesc.h>
47 #include <sys/uio.h>
48
49 #include <dev/pci/virtioreg.h>
50 #include <dev/pci/virtiovar.h>
51
52 #include "ioconf.h"
53
54 //#define VIO9P_DEBUG 1
55 //#define VIO9P_DUMP 1
56 #ifdef VIO9P_DEBUG
57 #define DLOG(fmt, args...) \
58 do { log(LOG_DEBUG, "%s: " fmt "\n", __func__, ##args); } while (0)
59 #else
60 #define DLOG(fmt, args...) __nothing
61 #endif
62
63 /* Configuration registers */
64 #define VIO9P_CONFIG_TAG_LEN 0 /* 16bit */
65 #define VIO9P_CONFIG_TAG 2
66
67 #define VIO9P_FLAG_BITS VIRTIO_COMMON_FLAG_BITS
68
69 // Must be the same as P9P_DEFREQLEN of usr.sbin/puffs/mount_9p/ninepuffs.h
70 #define VIO9P_MAX_REQLEN (16 * 1024)
71 #define VIO9P_SEGSIZE PAGE_SIZE
72 #define VIO9P_N_SEGMENTS (VIO9P_MAX_REQLEN / VIO9P_SEGSIZE)
73
74 #define P9_MAX_TAG_LEN 16
75
76 CTASSERT((PAGE_SIZE) == (VIRTIO_PAGE_SIZE)); /* XXX */
77
78 struct vio9p_softc {
79 device_t sc_dev;
80
81 struct virtio_softc *sc_virtio;
82 struct virtqueue sc_vq[1];
83
84 uint16_t sc_taglen;
85 uint8_t sc_tag[P9_MAX_TAG_LEN + 1];
86
87 int sc_flags;
88 #define VIO9P_INUSE __BIT(0)
89
90 int sc_state;
91 #define VIO9P_S_INIT 0
92 #define VIO9P_S_REQUESTING 1
93 #define VIO9P_S_REPLIED 2
94 #define VIO9P_S_CONSUMING 3
95 kcondvar_t sc_wait;
96 struct selinfo sc_sel;
97 kmutex_t sc_lock;
98
99 bus_dmamap_t sc_dmamap_tx;
100 bus_dmamap_t sc_dmamap_rx;
101 char *sc_buf_tx;
102 char *sc_buf_rx;
103 size_t sc_buf_rx_len;
104 off_t sc_buf_rx_offset;
105 };
106
107 /*
108 * Locking notes:
109 * - sc_state, sc_wait and sc_sel are protected by sc_lock
110 *
111 * The state machine (sc_state):
112 * - INIT =(write from client)=> REQUESTING
113 * - REQUESTING =(reply from host)=> REPLIED
114 * - REPLIED =(read from client)=> CONSUMING
115 * - CONSUMING =(read completed(*))=> INIT
116 *
117 * (*) read may not finish by one read(2) request, then
118 * the state remains CONSUMING.
119 */
120
121 static int vio9p_match(device_t, cfdata_t, void *);
122 static void vio9p_attach(device_t, device_t, void *);
123 static void vio9p_read_config(struct vio9p_softc *);
124 static int vio9p_request_done(struct virtqueue *);
125
126 static int vio9p_read(struct file *, off_t *, struct uio *, kauth_cred_t,
127 int);
128 static int vio9p_write(struct file *, off_t *, struct uio *,
129 kauth_cred_t, int);
130 static int vio9p_ioctl(struct file *, u_long, void *);
131 static int vio9p_close(struct file *);
132 static int vio9p_kqfilter(struct file *, struct knote *);
133
134 static const struct fileops vio9p_fileops = {
135 .fo_name = "vio9p",
136 .fo_read = vio9p_read,
137 .fo_write = vio9p_write,
138 .fo_ioctl = vio9p_ioctl,
139 .fo_fcntl = fnullop_fcntl,
140 .fo_poll = fnullop_poll,
141 .fo_stat = fbadop_stat,
142 .fo_close = vio9p_close,
143 .fo_kqfilter = vio9p_kqfilter,
144 .fo_restart = fnullop_restart,
145 };
146
147 static dev_type_open(vio9p_dev_open);
148
149 const struct cdevsw vio9p_cdevsw = {
150 .d_open = vio9p_dev_open,
151 .d_read = noread,
152 .d_write = nowrite,
153 .d_ioctl = noioctl,
154 .d_stop = nostop,
155 .d_tty = notty,
156 .d_poll = nopoll,
157 .d_mmap = nommap,
158 .d_kqfilter = nokqfilter,
159 .d_discard = nodiscard,
160 .d_flag = D_OTHER | D_MPSAFE,
161 };
162
163 static int
164 vio9p_dev_open(dev_t dev, int flag, int mode, struct lwp *l)
165 {
166 struct vio9p_softc *sc;
167 struct file *fp;
168 int error, fd;
169
170 sc = device_lookup_private(&vio9p_cd, minor(dev));
171 if (sc == NULL)
172 return ENXIO;
173
174 /* FIXME TOCTOU */
175 if (ISSET(sc->sc_flags, VIO9P_INUSE))
176 return EBUSY;
177
178 /* falloc() will fill in the descriptor for us. */
179 error = fd_allocfile(&fp, &fd);
180 if (error != 0)
181 return error;
182
183 sc->sc_flags |= VIO9P_INUSE;
184
185 return fd_clone(fp, fd, flag, &vio9p_fileops, sc);
186 }
187
188 static int
189 vio9p_ioctl(struct file *fp, u_long cmd, void *addr)
190 {
191 int error = 0;
192
193 switch (cmd) {
194 case FIONBIO:
195 break;
196 default:
197 error = EINVAL;
198 break;
199 }
200
201 return error;
202 }
203
204 static int
205 vio9p_read(struct file *fp, off_t *offp, struct uio *uio,
206 kauth_cred_t cred, int flags)
207 {
208 struct vio9p_softc *sc = fp->f_data;
209 struct virtio_softc *vsc = sc->sc_virtio;
210 struct virtqueue *vq = &sc->sc_vq[0];
211 int error, slot, len;
212
213 DLOG("enter");
214
215 mutex_enter(&sc->sc_lock);
216
217 if (sc->sc_state == VIO9P_S_INIT) {
218 DLOG("%s: not requested", device_xname(sc->sc_dev));
219 error = EAGAIN;
220 goto out;
221 }
222
223 if (sc->sc_state == VIO9P_S_CONSUMING) {
224 KASSERT(sc->sc_buf_rx_len > 0);
225 /* We already have some remaining, consume it. */
226 len = sc->sc_buf_rx_len - sc->sc_buf_rx_offset;
227 goto consume;
228 }
229
230 #if 0
231 if (uio->uio_resid != VIO9P_MAX_REQLEN)
232 return EINVAL;
233 #else
234 if (uio->uio_resid > VIO9P_MAX_REQLEN) {
235 error = EINVAL;
236 goto out;
237 }
238 #endif
239
240 error = 0;
241 while (sc->sc_state == VIO9P_S_REQUESTING) {
242 error = cv_timedwait_sig(&sc->sc_wait, &sc->sc_lock, hz);
243 if (error != 0)
244 break;
245 }
246 if (sc->sc_state == VIO9P_S_REPLIED)
247 sc->sc_state = VIO9P_S_CONSUMING;
248
249 if (error != 0)
250 goto out;
251
252 error = virtio_dequeue(vsc, vq, &slot, &len);
253 if (error != 0) {
254 log(LOG_ERR, "%s: virtio_dequeue failed: %d\n",
255 device_xname(sc->sc_dev), error);
256 goto out;
257 }
258 DLOG("len=%d", len);
259 sc->sc_buf_rx_len = len;
260 sc->sc_buf_rx_offset = 0;
261 bus_dmamap_sync(virtio_dmat(vsc), sc->sc_dmamap_tx, 0, VIO9P_MAX_REQLEN,
262 BUS_DMASYNC_POSTWRITE);
263 bus_dmamap_sync(virtio_dmat(vsc), sc->sc_dmamap_rx, 0, VIO9P_MAX_REQLEN,
264 BUS_DMASYNC_POSTREAD);
265 virtio_dequeue_commit(vsc, vq, slot);
266 #ifdef VIO9P_DUMP
267 int i;
268 log(LOG_DEBUG, "%s: buf: ", __func__);
269 for (i = 0; i < len; i++) {
270 log(LOG_DEBUG, "%c", (char)sc->sc_buf_rx[i]);
271 }
272 log(LOG_DEBUG, "\n");
273 #endif
274
275 consume:
276 DLOG("uio_resid=%lu", uio->uio_resid);
277 if (len < uio->uio_resid) {
278 error = EINVAL;
279 goto out;
280 }
281 len = uio->uio_resid;
282 error = uiomove(sc->sc_buf_rx + sc->sc_buf_rx_offset, len, uio);
283 if (error != 0)
284 goto out;
285
286 sc->sc_buf_rx_offset += len;
287 if (sc->sc_buf_rx_offset == sc->sc_buf_rx_len) {
288 sc->sc_buf_rx_len = 0;
289 sc->sc_buf_rx_offset = 0;
290
291 sc->sc_state = VIO9P_S_INIT;
292 selnotify(&sc->sc_sel, 0, 1);
293 }
294
295 out:
296 mutex_exit(&sc->sc_lock);
297 return error;
298 }
299
300 static int
301 vio9p_write(struct file *fp, off_t *offp, struct uio *uio,
302 kauth_cred_t cred, int flags)
303 {
304 struct vio9p_softc *sc = fp->f_data;
305 struct virtio_softc *vsc = sc->sc_virtio;
306 struct virtqueue *vq = &sc->sc_vq[0];
307 int error, slot;
308 size_t len;
309
310 DLOG("enter");
311
312 mutex_enter(&sc->sc_lock);
313
314 if (sc->sc_state != VIO9P_S_INIT) {
315 DLOG("already requesting");
316 error = EAGAIN;
317 goto out;
318 }
319
320 if (uio->uio_resid == 0) {
321 error = 0;
322 goto out;
323 }
324
325 if (uio->uio_resid > VIO9P_MAX_REQLEN) {
326 error = EINVAL;
327 goto out;
328 }
329
330 len = uio->uio_resid;
331 error = uiomove(sc->sc_buf_tx, len, uio);
332 if (error != 0)
333 goto out;
334
335 DLOG("len=%lu", len);
336 #ifdef VIO9P_DUMP
337 int i;
338 log(LOG_DEBUG, "%s: buf: ", __func__);
339 for (i = 0; i < len; i++) {
340 log(LOG_DEBUG, "%c", (char)sc->sc_buf_tx[i]);
341 }
342 log(LOG_DEBUG, "\n");
343 #endif
344
345 error = virtio_enqueue_prep(vsc, vq, &slot);
346 if (error != 0) {
347 log(LOG_ERR, "%s: virtio_enqueue_prep failed\n",
348 device_xname(sc->sc_dev));
349 goto out;
350 }
351 DLOG("slot=%d", slot);
352 error = virtio_enqueue_reserve(vsc, vq, slot,
353 sc->sc_dmamap_tx->dm_nsegs + sc->sc_dmamap_rx->dm_nsegs);
354 if (error != 0) {
355 log(LOG_ERR, "%s: virtio_enqueue_reserve failed\n",
356 device_xname(sc->sc_dev));
357 goto out;
358 }
359
360 /* Tx */
361 bus_dmamap_sync(virtio_dmat(vsc), sc->sc_dmamap_tx, 0,
362 len, BUS_DMASYNC_PREWRITE);
363 virtio_enqueue(vsc, vq, slot, sc->sc_dmamap_tx, true);
364 /* Rx */
365 bus_dmamap_sync(virtio_dmat(vsc), sc->sc_dmamap_rx, 0,
366 VIO9P_MAX_REQLEN, BUS_DMASYNC_PREREAD);
367 virtio_enqueue(vsc, vq, slot, sc->sc_dmamap_rx, false);
368 virtio_enqueue_commit(vsc, vq, slot, true);
369
370 sc->sc_state = VIO9P_S_REQUESTING;
371 out:
372 mutex_exit(&sc->sc_lock);
373 return error;
374 }
375
376 static int
377 vio9p_close(struct file *fp)
378 {
379 struct vio9p_softc *sc = fp->f_data;
380
381 KASSERT(ISSET(sc->sc_flags, VIO9P_INUSE));
382 sc->sc_flags &= ~VIO9P_INUSE;
383
384 return 0;
385 }
386
387 static void
388 filt_vio9p_detach(struct knote *kn)
389 {
390 struct vio9p_softc *sc = kn->kn_hook;
391
392 mutex_enter(&sc->sc_lock);
393 SLIST_REMOVE(&sc->sc_sel.sel_klist, kn, knote, kn_selnext);
394 mutex_exit(&sc->sc_lock);
395 }
396
397 static int
398 filt_vio9p_read(struct knote *kn, long hint)
399 {
400 struct vio9p_softc *sc = kn->kn_hook;
401 int rv;
402
403 kn->kn_data = sc->sc_buf_rx_len;
404 /* XXX need sc_lock? */
405 rv = (kn->kn_data > 0) || sc->sc_state != VIO9P_S_INIT;
406
407 return rv;
408 }
409
410 static const struct filterops vio9p_read_filtops = {
411 .f_isfd = 1,
412 .f_attach = NULL,
413 .f_detach = filt_vio9p_detach,
414 .f_event = filt_vio9p_read,
415 };
416
417 static int
418 filt_vio9p_write(struct knote *kn, long hint)
419 {
420 struct vio9p_softc *sc = kn->kn_hook;
421
422 /* XXX need sc_lock? */
423 return sc->sc_state == VIO9P_S_INIT;
424 }
425
426 static const struct filterops vio9p_write_filtops = {
427 .f_isfd = 1,
428 .f_attach = NULL,
429 .f_detach = filt_vio9p_detach,
430 .f_event = filt_vio9p_write,
431 };
432
433 static int
434 vio9p_kqfilter(struct file *fp, struct knote *kn)
435 {
436 struct vio9p_softc *sc = fp->f_data;
437 struct klist *klist;
438
439 mutex_enter(&sc->sc_lock);
440 switch (kn->kn_filter) {
441 case EVFILT_READ:
442 klist = &sc->sc_sel.sel_klist;
443 kn->kn_fop = &vio9p_read_filtops;
444 break;
445
446 case EVFILT_WRITE:
447 klist = &sc->sc_sel.sel_klist;
448 kn->kn_fop = &vio9p_write_filtops;
449 break;
450
451 default:
452 mutex_exit(&sc->sc_lock);
453 log(LOG_ERR, "%s: kn_filter=%u\n", __func__, kn->kn_filter);
454 return EINVAL;
455 }
456
457 kn->kn_hook = sc;
458
459 SLIST_INSERT_HEAD(klist, kn, kn_selnext);
460 mutex_exit(&sc->sc_lock);
461
462 return 0;
463 }
464
465 CFATTACH_DECL_NEW(vio9p, sizeof(struct vio9p_softc),
466 vio9p_match, vio9p_attach, NULL, NULL);
467
468 static int
469 vio9p_match(device_t parent, cfdata_t match, void *aux)
470 {
471 struct virtio_attach_args *va = aux;
472
473 if (va->sc_childdevid == PCI_PRODUCT_VIRTIO_9P)
474 return 1;
475
476 return 0;
477 }
478
479 static void
480 vio9p_attach(device_t parent, device_t self, void *aux)
481 {
482 struct vio9p_softc *sc = device_private(self);
483 struct virtio_softc *vsc = device_private(parent);
484 int error;
485
486 if (virtio_child(vsc) != NULL) {
487 aprint_normal(": child already attached for %s; "
488 "something wrong...\n", device_xname(parent));
489 return;
490 }
491
492 sc->sc_dev = self;
493 sc->sc_virtio = vsc;
494
495 virtio_child_attach_start(vsc, self, IPL_VM, NULL,
496 NULL, virtio_vq_intr,
497 VIRTIO_F_PCI_INTR_MPSAFE | VIRTIO_F_PCI_INTR_SOFTINT, 0,
498 VIO9P_FLAG_BITS);
499
500 error = virtio_alloc_vq(vsc, &sc->sc_vq[0], 0, VIO9P_MAX_REQLEN,
501 VIO9P_N_SEGMENTS * 2, "vio9p");
502 if (error != 0)
503 goto err_none;
504
505 sc->sc_vq[0].vq_done = vio9p_request_done;
506
507 virtio_child_attach_set_vqs(vsc, sc->sc_vq, 1);
508
509 sc->sc_buf_tx = kmem_alloc(VIO9P_MAX_REQLEN, KM_SLEEP);
510 sc->sc_buf_rx = kmem_alloc(VIO9P_MAX_REQLEN, KM_SLEEP);
511
512 error = bus_dmamap_create(virtio_dmat(vsc), VIO9P_MAX_REQLEN,
513 VIO9P_N_SEGMENTS, VIO9P_SEGSIZE, 0, BUS_DMA_WAITOK, &sc->sc_dmamap_tx);
514 if (error != 0) {
515 aprint_error_dev(sc->sc_dev, "bus_dmamap_create failed: %d\n",
516 error);
517 goto err_vq;
518 }
519 error = bus_dmamap_create(virtio_dmat(vsc), VIO9P_MAX_REQLEN,
520 VIO9P_N_SEGMENTS, VIO9P_SEGSIZE, 0, BUS_DMA_WAITOK, &sc->sc_dmamap_rx);
521 if (error != 0) {
522 aprint_error_dev(sc->sc_dev, "bus_dmamap_create failed: %d\n",
523 error);
524 goto err_vq;
525 }
526
527 error = bus_dmamap_load(virtio_dmat(vsc), sc->sc_dmamap_tx,
528 sc->sc_buf_tx, VIO9P_MAX_REQLEN, NULL, BUS_DMA_WAITOK | BUS_DMA_WRITE);
529 if (error != 0) {
530 aprint_error_dev(sc->sc_dev, "bus_dmamap_load failed: %d\n",
531 error);
532 goto err_dmamap;
533 }
534 error = bus_dmamap_load(virtio_dmat(vsc), sc->sc_dmamap_rx,
535 sc->sc_buf_rx, VIO9P_MAX_REQLEN, NULL, BUS_DMA_WAITOK | BUS_DMA_READ);
536 if (error != 0) {
537 aprint_error_dev(sc->sc_dev, "bus_dmamap_load failed: %d\n",
538 error);
539 goto err_dmamap;
540 }
541
542 sc->sc_state = VIO9P_S_INIT;
543 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NONE);
544 cv_init(&sc->sc_wait, "vio9p");
545
546 vio9p_read_config(sc);
547 aprint_normal_dev(self, "tagged as %s\n", sc->sc_tag);
548
549 error = virtio_child_attach_finish(vsc);
550 if (error != 0)
551 goto err_mutex;
552
553 return;
554
555 err_mutex:
556 cv_destroy(&sc->sc_wait);
557 mutex_destroy(&sc->sc_lock);
558 err_dmamap:
559 bus_dmamap_destroy(virtio_dmat(vsc), sc->sc_dmamap_tx);
560 bus_dmamap_destroy(virtio_dmat(vsc), sc->sc_dmamap_rx);
561 err_vq:
562 virtio_free_vq(vsc, &sc->sc_vq[0]);
563 err_none:
564 virtio_child_attach_failed(vsc);
565 return;
566 }
567
568 static void
569 vio9p_read_config(struct vio9p_softc *sc)
570 {
571 device_t dev = sc->sc_dev;
572 uint8_t reg;
573 int i;
574
575 /* these values are explicitly specified as little-endian */
576 reg = virtio_read_device_config_2(sc->sc_virtio, VIO9P_CONFIG_TAG_LEN);
577 sc->sc_taglen = le16toh(reg);
578
579 if (sc->sc_taglen > P9_MAX_TAG_LEN) {
580 aprint_error_dev(dev, "warning: tag is trimmed from %u to %u\n",
581 sc->sc_taglen, P9_MAX_TAG_LEN);
582 sc->sc_taglen = P9_MAX_TAG_LEN;
583 }
584
585 for (i = 0; i < sc->sc_taglen; i++) {
586 reg = virtio_read_device_config_1(sc->sc_virtio,
587 VIO9P_CONFIG_TAG + i);
588 sc->sc_tag[i] = reg;
589 }
590 sc->sc_tag[i] = '\0';
591 }
592
593 static int
594 vio9p_request_done(struct virtqueue *vq)
595 {
596 struct virtio_softc *vsc = vq->vq_owner;
597 struct vio9p_softc *sc = device_private(virtio_child(vsc));
598
599 DLOG("enter");
600
601 mutex_enter(&sc->sc_lock);
602 sc->sc_state = VIO9P_S_REPLIED;
603 cv_broadcast(&sc->sc_wait);
604 selnotify(&sc->sc_sel, 0, 1);
605 mutex_exit(&sc->sc_lock);
606
607 return 1;
608 }
609
610 MODULE(MODULE_CLASS_DRIVER, vio9p, "virtio");
611
612 #ifdef _MODULE
613 #include "ioconf.c"
614 #endif
615
616 static int
617 vio9p_modcmd(modcmd_t cmd, void *opaque)
618 {
619 #ifdef _MODULE
620 devmajor_t bmajor = NODEVMAJOR, cmajor = NODEVMAJOR;
621 #endif
622 int error = 0;
623
624 #ifdef _MODULE
625 switch (cmd) {
626 case MODULE_CMD_INIT:
627 error = config_init_component(cfdriver_ioconf_vio9p,
628 cfattach_ioconf_vio9p, cfdata_ioconf_vio9p);
629 devsw_attach(vio9p_cd.cd_name, NULL, &bmajor,
630 &vio9p_cdevsw, &cmajor);
631 break;
632 case MODULE_CMD_FINI:
633 error = config_fini_component(cfdriver_ioconf_vio9p,
634 cfattach_ioconf_vio9p, cfdata_ioconf_vio9p);
635 break;
636 default:
637 error = ENOTTY;
638 break;
639 }
640 #endif
641
642 return error;
643 }
644