Home | History | Annotate | Line # | Download | only in pci
      1 /*	$NetBSD: vio9p.c,v 1.12 2025/04/22 05:56:25 ozaki-r Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2019 Internet Initiative Japan, Inc.
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  *
     16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     26  */
     27 
     28 #include <sys/cdefs.h>
     29 __KERNEL_RCSID(0, "$NetBSD: vio9p.c,v 1.12 2025/04/22 05:56:25 ozaki-r Exp $");
     30 
     31 #include <sys/param.h>
     32 #include <sys/systm.h>
     33 #include <sys/kernel.h>
     34 #include <sys/bus.h>
     35 #include <sys/conf.h>
     36 #include <sys/condvar.h>
     37 #include <sys/device.h>
     38 #include <sys/mutex.h>
     39 #include <sys/sysctl.h>
     40 #include <sys/module.h>
     41 #include <sys/syslog.h>
     42 #include <sys/select.h>
     43 #include <sys/kmem.h>
     44 
     45 #include <sys/file.h>
     46 #include <sys/filedesc.h>
     47 #include <sys/uio.h>
     48 
     49 #include <dev/pci/virtioreg.h>
     50 #include <dev/pci/virtiovar.h>
     51 
     52 #include "ioconf.h"
     53 
     54 //#define VIO9P_DEBUG	1
     55 //#define VIO9P_DUMP	1
     56 #ifdef VIO9P_DEBUG
     57 #define DLOG(fmt, args...) \
     58 	do { log(LOG_DEBUG, "%s: " fmt "\n", __func__, ##args); } while (0)
     59 #else
     60 #define DLOG(fmt, args...) __nothing
     61 #endif
     62 
     63 /* Device-specific feature bits */
     64 #define VIO9P_F_MOUNT_TAG	(UINT64_C(1) << 0) /* mount tag specified */
     65 
     66 /* Configuration registers */
     67 #define VIO9P_CONFIG_TAG_LEN	0 /* 16bit */
     68 #define VIO9P_CONFIG_TAG	2
     69 
     70 #define VIO9P_FLAG_BITS				\
     71 	VIRTIO_COMMON_FLAG_BITS			\
     72 	"b\x00" "MOUNT_TAG\0"
     73 
     74 
     75 // Must be the same as P9P_DEFREQLEN of usr.sbin/puffs/mount_9p/ninepuffs.h
     76 #define VIO9P_MAX_REQLEN	(16 * 1024)
     77 #define VIO9P_SEGSIZE		PAGE_SIZE
     78 #define VIO9P_N_SEGMENTS	(VIO9P_MAX_REQLEN / VIO9P_SEGSIZE)
     79 
     80 /*
     81  * QEMU defines this as 32 but includes the final zero byte into the
     82  * limit.  The code below counts the final zero byte separately, so
     83  * adjust this define to match.
     84  */
     85 #define P9_MAX_TAG_LEN		31
     86 
     87 CTASSERT((PAGE_SIZE) == (VIRTIO_PAGE_SIZE)); /* XXX */
     88 
     89 struct vio9p_softc {
     90 	device_t		sc_dev;
     91 
     92 	struct virtio_softc	*sc_virtio;
     93 	struct virtqueue	sc_vq[1];
     94 
     95 	uint16_t		sc_taglen;
     96 	uint8_t			sc_tag[P9_MAX_TAG_LEN + 1];
     97 
     98 	int			sc_flags;
     99 #define VIO9P_INUSE		__BIT(0)
    100 
    101 	int			sc_state;
    102 #define VIO9P_S_INIT		0
    103 #define VIO9P_S_REQUESTING	1
    104 #define VIO9P_S_REPLIED		2
    105 #define VIO9P_S_CONSUMING	3
    106 	kcondvar_t		sc_wait;
    107 	struct selinfo		sc_sel;
    108 	kmutex_t		sc_lock;
    109 
    110 	bus_dmamap_t		sc_dmamap_tx;
    111 	bus_dmamap_t		sc_dmamap_rx;
    112 	char			*sc_buf_tx;
    113 	char			*sc_buf_rx;
    114 	size_t			sc_buf_rx_len;
    115 	off_t			sc_buf_rx_offset;
    116 };
    117 
    118 /*
    119  * Locking notes:
    120  * - sc_state, sc_wait and sc_sel are protected by sc_lock
    121  *
    122  * The state machine (sc_state):
    123  * - INIT       =(write from client)=> REQUESTING
    124  * - REQUESTING =(reply from host)=>   REPLIED
    125  * - REPLIED    =(read from client)=>  CONSUMING
    126  * - CONSUMING  =(read completed(*))=> INIT
    127  *
    128  * (*) read may not finish by one read(2) request, then
    129  *     the state remains CONSUMING.
    130  */
    131 
    132 static int	vio9p_match(device_t, cfdata_t, void *);
    133 static void	vio9p_attach(device_t, device_t, void *);
    134 static void	vio9p_read_config(struct vio9p_softc *);
    135 static int	vio9p_request_done(struct virtqueue *);
    136 
    137 static int	vio9p_read(struct file *, off_t *, struct uio *, kauth_cred_t,
    138 		    int);
    139 static int	vio9p_write(struct file *, off_t *, struct uio *,
    140 		    kauth_cred_t, int);
    141 static int	vio9p_ioctl(struct file *, u_long, void *);
    142 static int	vio9p_close(struct file *);
    143 static int	vio9p_kqfilter(struct file *, struct knote *);
    144 
    145 static const struct fileops vio9p_fileops = {
    146 	.fo_name = "vio9p",
    147 	.fo_read = vio9p_read,
    148 	.fo_write = vio9p_write,
    149 	.fo_ioctl = vio9p_ioctl,
    150 	.fo_fcntl = fnullop_fcntl,
    151 	.fo_poll = fnullop_poll,
    152 	.fo_stat = fbadop_stat,
    153 	.fo_close = vio9p_close,
    154 	.fo_kqfilter = vio9p_kqfilter,
    155 	.fo_restart = fnullop_restart,
    156 };
    157 
    158 static dev_type_open(vio9p_dev_open);
    159 
    160 const struct cdevsw vio9p_cdevsw = {
    161 	.d_open = vio9p_dev_open,
    162 	.d_read = noread,
    163 	.d_write = nowrite,
    164 	.d_ioctl = noioctl,
    165 	.d_stop = nostop,
    166 	.d_tty = notty,
    167 	.d_poll = nopoll,
    168 	.d_mmap = nommap,
    169 	.d_kqfilter = nokqfilter,
    170 	.d_discard = nodiscard,
    171 	.d_flag = D_OTHER | D_MPSAFE,
    172 };
    173 
    174 static int
    175 vio9p_dev_open(dev_t dev, int flag, int mode, struct lwp *l)
    176 {
    177 	struct vio9p_softc *sc;
    178 	struct file *fp;
    179 	int error, fd;
    180 
    181 	sc = device_lookup_private(&vio9p_cd, minor(dev));
    182 	if (sc == NULL)
    183 		return ENXIO;
    184 
    185 	/* FIXME TOCTOU */
    186 	if (ISSET(sc->sc_flags, VIO9P_INUSE))
    187 		return EBUSY;
    188 
    189 	/* falloc() will fill in the descriptor for us. */
    190 	error = fd_allocfile(&fp, &fd);
    191 	if (error != 0)
    192 		return error;
    193 
    194 	sc->sc_flags |= VIO9P_INUSE;
    195 
    196 	return fd_clone(fp, fd, flag, &vio9p_fileops, sc);
    197 }
    198 
    199 static int
    200 vio9p_ioctl(struct file *fp, u_long cmd, void *addr)
    201 {
    202 	int error = 0;
    203 
    204 	switch (cmd) {
    205 	case FIONBIO:
    206 		break;
    207 	default:
    208 		error = EINVAL;
    209 		break;
    210 	}
    211 
    212 	return error;
    213 }
    214 
    215 static int
    216 vio9p_read(struct file *fp, off_t *offp, struct uio *uio,
    217     kauth_cred_t cred, int flags)
    218 {
    219 	struct vio9p_softc *sc = fp->f_data;
    220 	struct virtio_softc *vsc = sc->sc_virtio;
    221 	struct virtqueue *vq = &sc->sc_vq[0];
    222 	int error, slot, len;
    223 
    224 	DLOG("enter");
    225 
    226 	mutex_enter(&sc->sc_lock);
    227 
    228 	if (sc->sc_state == VIO9P_S_INIT) {
    229 		DLOG("%s: not requested", device_xname(sc->sc_dev));
    230 		error = EAGAIN;
    231 		goto out;
    232 	}
    233 
    234 	if (sc->sc_state == VIO9P_S_CONSUMING) {
    235 		KASSERT(sc->sc_buf_rx_len > 0);
    236 		/* We already have some remaining, consume it. */
    237 		len = sc->sc_buf_rx_len - sc->sc_buf_rx_offset;
    238 		goto consume;
    239 	}
    240 
    241 #if 0
    242 	if (uio->uio_resid != VIO9P_MAX_REQLEN)
    243 		return EINVAL;
    244 #else
    245 	if (uio->uio_resid > VIO9P_MAX_REQLEN) {
    246 		error = EINVAL;
    247 		goto out;
    248 	}
    249 #endif
    250 
    251 	error = 0;
    252 	while (sc->sc_state == VIO9P_S_REQUESTING) {
    253 		error = cv_timedwait_sig(&sc->sc_wait, &sc->sc_lock, hz);
    254 		if (error != 0)
    255 			break;
    256 	}
    257 	if (sc->sc_state == VIO9P_S_REPLIED)
    258 		sc->sc_state = VIO9P_S_CONSUMING;
    259 
    260 	if (error != 0)
    261 		goto out;
    262 
    263 	error = virtio_dequeue(vsc, vq, &slot, &len);
    264 	if (error != 0) {
    265 		log(LOG_ERR, "%s: virtio_dequeue failed: %d\n",
    266 		       device_xname(sc->sc_dev), error);
    267 		goto out;
    268 	}
    269 	DLOG("len=%d", len);
    270 	sc->sc_buf_rx_len = len;
    271 	sc->sc_buf_rx_offset = 0;
    272 	bus_dmamap_sync(virtio_dmat(vsc), sc->sc_dmamap_tx, 0, VIO9P_MAX_REQLEN,
    273 	    BUS_DMASYNC_POSTWRITE);
    274 	bus_dmamap_sync(virtio_dmat(vsc), sc->sc_dmamap_rx, 0, VIO9P_MAX_REQLEN,
    275 	    BUS_DMASYNC_POSTREAD);
    276 	virtio_dequeue_commit(vsc, vq, slot);
    277 #ifdef VIO9P_DUMP
    278 	int i;
    279 	log(LOG_DEBUG, "%s: buf: ", __func__);
    280 	for (i = 0; i < len; i++) {
    281 		log(LOG_DEBUG, "%c", (char)sc->sc_buf_rx[i]);
    282 	}
    283 	log(LOG_DEBUG, "\n");
    284 #endif
    285 
    286 consume:
    287 	DLOG("uio_resid=%lu", uio->uio_resid);
    288 	if (len < uio->uio_resid) {
    289 		error = EINVAL;
    290 		goto out;
    291 	}
    292 	len = uio->uio_resid;
    293 	error = uiomove(sc->sc_buf_rx + sc->sc_buf_rx_offset, len, uio);
    294 	if (error != 0)
    295 		goto out;
    296 
    297 	sc->sc_buf_rx_offset += len;
    298 	if (sc->sc_buf_rx_offset == sc->sc_buf_rx_len) {
    299 		sc->sc_buf_rx_len = 0;
    300 		sc->sc_buf_rx_offset = 0;
    301 
    302 		sc->sc_state = VIO9P_S_INIT;
    303 		selnotify(&sc->sc_sel, 0, 1);
    304 	}
    305 
    306 out:
    307 	mutex_exit(&sc->sc_lock);
    308 	return error;
    309 }
    310 
    311 static int
    312 vio9p_write(struct file *fp, off_t *offp, struct uio *uio,
    313     kauth_cred_t cred, int flags)
    314 {
    315 	struct vio9p_softc *sc = fp->f_data;
    316 	struct virtio_softc *vsc = sc->sc_virtio;
    317 	struct virtqueue *vq = &sc->sc_vq[0];
    318 	int error, slot;
    319 	size_t len;
    320 
    321 	DLOG("enter");
    322 
    323 	mutex_enter(&sc->sc_lock);
    324 
    325 	if (sc->sc_state != VIO9P_S_INIT) {
    326 		DLOG("already requesting");
    327 		error = EAGAIN;
    328 		goto out;
    329 	}
    330 
    331 	if (uio->uio_resid == 0) {
    332 		error = 0;
    333 		goto out;
    334 	}
    335 
    336 	if (uio->uio_resid > VIO9P_MAX_REQLEN) {
    337 		error = EINVAL;
    338 		goto out;
    339 	}
    340 
    341 	len = uio->uio_resid;
    342 	error = uiomove(sc->sc_buf_tx, len, uio);
    343 	if (error != 0)
    344 		goto out;
    345 
    346 	DLOG("len=%lu", len);
    347 #ifdef VIO9P_DUMP
    348 	int i;
    349 	log(LOG_DEBUG, "%s: buf: ", __func__);
    350 	for (i = 0; i < len; i++) {
    351 		log(LOG_DEBUG, "%c", (char)sc->sc_buf_tx[i]);
    352 	}
    353 	log(LOG_DEBUG, "\n");
    354 #endif
    355 
    356 	error = virtio_enqueue_prep(vsc, vq, &slot);
    357 	if (error != 0) {
    358 		log(LOG_ERR, "%s: virtio_enqueue_prep failed\n",
    359 		       device_xname(sc->sc_dev));
    360 		goto out;
    361 	}
    362 	DLOG("slot=%d", slot);
    363 	error = virtio_enqueue_reserve(vsc, vq, slot,
    364 	    sc->sc_dmamap_tx->dm_nsegs + sc->sc_dmamap_rx->dm_nsegs);
    365 	if (error != 0) {
    366 		log(LOG_ERR, "%s: virtio_enqueue_reserve failed\n",
    367 		       device_xname(sc->sc_dev));
    368 		goto out;
    369 	}
    370 
    371 	/* Tx */
    372 	bus_dmamap_sync(virtio_dmat(vsc), sc->sc_dmamap_tx, 0,
    373 	    len, BUS_DMASYNC_PREWRITE);
    374 	virtio_enqueue(vsc, vq, slot, sc->sc_dmamap_tx, true);
    375 	/* Rx */
    376 	bus_dmamap_sync(virtio_dmat(vsc), sc->sc_dmamap_rx, 0,
    377 	    VIO9P_MAX_REQLEN, BUS_DMASYNC_PREREAD);
    378 	virtio_enqueue(vsc, vq, slot, sc->sc_dmamap_rx, false);
    379 	virtio_enqueue_commit(vsc, vq, slot, true);
    380 
    381 	sc->sc_state = VIO9P_S_REQUESTING;
    382 out:
    383 	mutex_exit(&sc->sc_lock);
    384 	return error;
    385 }
    386 
    387 static int
    388 vio9p_close(struct file *fp)
    389 {
    390 	struct vio9p_softc *sc = fp->f_data;
    391 
    392 	KASSERT(ISSET(sc->sc_flags, VIO9P_INUSE));
    393 	sc->sc_flags &= ~VIO9P_INUSE;
    394 
    395 	return 0;
    396 }
    397 
    398 static void
    399 filt_vio9p_detach(struct knote *kn)
    400 {
    401 	struct vio9p_softc *sc = kn->kn_hook;
    402 
    403 	mutex_enter(&sc->sc_lock);
    404 	selremove_knote(&sc->sc_sel, kn);
    405 	mutex_exit(&sc->sc_lock);
    406 }
    407 
    408 static int
    409 filt_vio9p_read(struct knote *kn, long hint)
    410 {
    411 	struct vio9p_softc *sc = kn->kn_hook;
    412 	int rv;
    413 
    414 	kn->kn_data = sc->sc_buf_rx_len;
    415 	/* XXX need sc_lock? */
    416 	rv = (kn->kn_data > 0) || sc->sc_state != VIO9P_S_INIT;
    417 
    418 	return rv;
    419 }
    420 
    421 static const struct filterops vio9p_read_filtops = {
    422 	.f_flags = FILTEROP_ISFD,
    423 	.f_attach = NULL,
    424 	.f_detach = filt_vio9p_detach,
    425 	.f_event = filt_vio9p_read,
    426 };
    427 
    428 static int
    429 filt_vio9p_write(struct knote *kn, long hint)
    430 {
    431 	struct vio9p_softc *sc = kn->kn_hook;
    432 
    433 	/* XXX need sc_lock? */
    434 	return sc->sc_state == VIO9P_S_INIT;
    435 }
    436 
    437 static const struct filterops vio9p_write_filtops = {
    438 	.f_flags = FILTEROP_ISFD,
    439 	.f_attach = NULL,
    440 	.f_detach = filt_vio9p_detach,
    441 	.f_event = filt_vio9p_write,
    442 };
    443 
    444 static int
    445 vio9p_kqfilter(struct file *fp, struct knote *kn)
    446 {
    447 	struct vio9p_softc *sc = fp->f_data;
    448 
    449 	switch (kn->kn_filter) {
    450 	case EVFILT_READ:
    451 		kn->kn_fop = &vio9p_read_filtops;
    452 		break;
    453 
    454 	case EVFILT_WRITE:
    455 		kn->kn_fop = &vio9p_write_filtops;
    456 		break;
    457 
    458 	default:
    459 		log(LOG_ERR, "%s: kn_filter=%u\n", __func__, kn->kn_filter);
    460 		return EINVAL;
    461 	}
    462 
    463 	kn->kn_hook = sc;
    464 
    465 	mutex_enter(&sc->sc_lock);
    466 	selrecord_knote(&sc->sc_sel, kn);
    467 	mutex_exit(&sc->sc_lock);
    468 
    469 	return 0;
    470 }
    471 
    472 CFATTACH_DECL_NEW(vio9p, sizeof(struct vio9p_softc),
    473     vio9p_match, vio9p_attach, NULL, NULL);
    474 
    475 static int
    476 vio9p_match(device_t parent, cfdata_t match, void *aux)
    477 {
    478 	struct virtio_attach_args *va = aux;
    479 
    480 	if (va->sc_childdevid == VIRTIO_DEVICE_ID_9P)
    481 		return 1;
    482 
    483 	return 0;
    484 }
    485 
    486 static void
    487 vio9p_attach(device_t parent, device_t self, void *aux)
    488 {
    489 	struct vio9p_softc *sc = device_private(self);
    490 	struct virtio_softc *vsc = device_private(parent);
    491 	uint64_t features;
    492 	int error;
    493 	const struct sysctlnode *node;
    494 
    495 	if (virtio_child(vsc) != NULL) {
    496 		aprint_normal(": child already attached for %s; "
    497 			      "something wrong...\n", device_xname(parent));
    498 		return;
    499 	}
    500 
    501 	sc->sc_dev = self;
    502 	sc->sc_virtio = vsc;
    503 
    504 	virtio_child_attach_start(vsc, self, IPL_VM,
    505 	    VIO9P_F_MOUNT_TAG, VIO9P_FLAG_BITS);
    506 
    507 	features = virtio_features(vsc);
    508 	if ((features & VIO9P_F_MOUNT_TAG) == 0)
    509 		goto err_none;
    510 
    511 	virtio_init_vq_vqdone(vsc, &sc->sc_vq[0], 0, vio9p_request_done);
    512 	error = virtio_alloc_vq(vsc, &sc->sc_vq[0], VIO9P_MAX_REQLEN,
    513 	    VIO9P_N_SEGMENTS * 2, "vio9p");
    514 	if (error != 0)
    515 		goto err_none;
    516 
    517 	sc->sc_buf_tx = kmem_alloc(VIO9P_MAX_REQLEN, KM_SLEEP);
    518 	sc->sc_buf_rx = kmem_alloc(VIO9P_MAX_REQLEN, KM_SLEEP);
    519 
    520 	error = bus_dmamap_create(virtio_dmat(vsc), VIO9P_MAX_REQLEN,
    521 	    VIO9P_N_SEGMENTS, VIO9P_SEGSIZE, 0, BUS_DMA_WAITOK, &sc->sc_dmamap_tx);
    522 	if (error != 0) {
    523 		aprint_error_dev(sc->sc_dev, "bus_dmamap_create failed: %d\n",
    524 		    error);
    525 		goto err_vq;
    526 	}
    527 	error = bus_dmamap_create(virtio_dmat(vsc), VIO9P_MAX_REQLEN,
    528 	    VIO9P_N_SEGMENTS, VIO9P_SEGSIZE, 0, BUS_DMA_WAITOK, &sc->sc_dmamap_rx);
    529 	if (error != 0) {
    530 		aprint_error_dev(sc->sc_dev, "bus_dmamap_create failed: %d\n",
    531 		    error);
    532 		goto err_vq;
    533 	}
    534 
    535 	error = bus_dmamap_load(virtio_dmat(vsc), sc->sc_dmamap_tx,
    536 	    sc->sc_buf_tx, VIO9P_MAX_REQLEN, NULL, BUS_DMA_WAITOK | BUS_DMA_WRITE);
    537 	if (error != 0) {
    538 		aprint_error_dev(sc->sc_dev, "bus_dmamap_load failed: %d\n",
    539 		    error);
    540 		goto err_dmamap;
    541 	}
    542 	error = bus_dmamap_load(virtio_dmat(vsc), sc->sc_dmamap_rx,
    543 	    sc->sc_buf_rx, VIO9P_MAX_REQLEN, NULL, BUS_DMA_WAITOK | BUS_DMA_READ);
    544 	if (error != 0) {
    545 		aprint_error_dev(sc->sc_dev, "bus_dmamap_load failed: %d\n",
    546 		    error);
    547 		goto err_dmamap;
    548 	}
    549 
    550 	sc->sc_state = VIO9P_S_INIT;
    551 	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NONE);
    552 	cv_init(&sc->sc_wait, "vio9p");
    553 
    554 	vio9p_read_config(sc);
    555 	aprint_normal_dev(self, "tagged as %s\n", sc->sc_tag);
    556 
    557 	sysctl_createv(NULL, 0, NULL, &node, 0, CTLTYPE_NODE,
    558 	    "vio9p", SYSCTL_DESCR("VirtIO 9p toplevel"),
    559 	    NULL, 0, NULL, 0,
    560 	    CTL_HW, CTL_CREATE, CTL_EOL);
    561 	sysctl_createv(NULL, 0, &node, &node, 0, CTLTYPE_NODE,
    562 	    device_xname(self), SYSCTL_DESCR("VirtIO 9p device"),
    563 	    NULL, 0, NULL, 0,
    564 	    CTL_CREATE, CTL_EOL);
    565 	sysctl_createv(NULL, 0, &node, NULL, 0, CTLTYPE_STRING,
    566 	    "tag", SYSCTL_DESCR("VirtIO 9p tag value"),
    567 	    NULL, 0, sc->sc_tag, 0,
    568 	    CTL_CREATE, CTL_EOL);
    569 
    570 	error = virtio_child_attach_finish(vsc, sc->sc_vq,
    571 	    __arraycount(sc->sc_vq), NULL,
    572 	    VIRTIO_F_INTR_MPSAFE | VIRTIO_F_INTR_SOFTINT);
    573 	if (error != 0)
    574 		goto err_mutex;
    575 
    576 	return;
    577 
    578 err_mutex:
    579 	cv_destroy(&sc->sc_wait);
    580 	mutex_destroy(&sc->sc_lock);
    581 err_dmamap:
    582 	bus_dmamap_destroy(virtio_dmat(vsc), sc->sc_dmamap_tx);
    583 	bus_dmamap_destroy(virtio_dmat(vsc), sc->sc_dmamap_rx);
    584 err_vq:
    585 	virtio_free_vq(vsc, &sc->sc_vq[0]);
    586 err_none:
    587 	virtio_child_attach_failed(vsc);
    588 	return;
    589 }
    590 
    591 static void
    592 vio9p_read_config(struct vio9p_softc *sc)
    593 {
    594 	device_t dev = sc->sc_dev;
    595 	uint8_t reg;
    596 	int i;
    597 
    598 	/* these values are explicitly specified as little-endian */
    599 	sc->sc_taglen = virtio_read_device_config_le_2(sc->sc_virtio,
    600 		VIO9P_CONFIG_TAG_LEN);
    601 
    602 	if (sc->sc_taglen > P9_MAX_TAG_LEN) {
    603 		aprint_error_dev(dev, "warning: tag is trimmed from %u to %u\n",
    604 		    sc->sc_taglen, P9_MAX_TAG_LEN);
    605 		sc->sc_taglen = P9_MAX_TAG_LEN;
    606 	}
    607 
    608 	for (i = 0; i < sc->sc_taglen; i++) {
    609 		reg = virtio_read_device_config_1(sc->sc_virtio,
    610 		    VIO9P_CONFIG_TAG + i);
    611 		sc->sc_tag[i] = reg;
    612 	}
    613 	sc->sc_tag[i] = '\0';
    614 }
    615 
    616 static int
    617 vio9p_request_done(struct virtqueue *vq)
    618 {
    619 	struct virtio_softc *vsc = vq->vq_owner;
    620 	struct vio9p_softc *sc = device_private(virtio_child(vsc));
    621 
    622 	DLOG("enter");
    623 
    624 	mutex_enter(&sc->sc_lock);
    625 	sc->sc_state = VIO9P_S_REPLIED;
    626 	cv_broadcast(&sc->sc_wait);
    627 	selnotify(&sc->sc_sel, 0, 1);
    628 	mutex_exit(&sc->sc_lock);
    629 
    630 	return 1;
    631 }
    632 
    633 MODULE(MODULE_CLASS_DRIVER, vio9p, "virtio");
    634 
    635 #ifdef _MODULE
    636 #include "ioconf.c"
    637 #endif
    638 
    639 static int
    640 vio9p_modcmd(modcmd_t cmd, void *opaque)
    641 {
    642 #ifdef _MODULE
    643 	devmajor_t bmajor = NODEVMAJOR, cmajor = NODEVMAJOR;
    644 #endif
    645 	int error = 0;
    646 
    647 #ifdef _MODULE
    648 	switch (cmd) {
    649 	case MODULE_CMD_INIT:
    650 		devsw_attach(vio9p_cd.cd_name, NULL, &bmajor,
    651 		    &vio9p_cdevsw, &cmajor);
    652 		error = config_init_component(cfdriver_ioconf_vio9p,
    653 		    cfattach_ioconf_vio9p, cfdata_ioconf_vio9p);
    654 		break;
    655 	case MODULE_CMD_FINI:
    656 		error = config_fini_component(cfdriver_ioconf_vio9p,
    657 		    cfattach_ioconf_vio9p, cfdata_ioconf_vio9p);
    658 		devsw_detach(NULL, &vio9p_cdevsw);
    659 		break;
    660 	default:
    661 		error = ENOTTY;
    662 		break;
    663 	}
    664 #endif
    665 
    666 	return error;
    667 }
    668