Home | History | Annotate | Line # | Download | only in pci
ld_virtio.c revision 1.21
      1 /*	$NetBSD: ld_virtio.c,v 1.21 2018/06/10 14:43:07 jakllsch Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2010 Minoura Makoto.
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  *
     16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     26  */
     27 
     28 #include <sys/cdefs.h>
     29 __KERNEL_RCSID(0, "$NetBSD: ld_virtio.c,v 1.21 2018/06/10 14:43:07 jakllsch Exp $");
     30 
     31 #include <sys/param.h>
     32 #include <sys/systm.h>
     33 #include <sys/kernel.h>
     34 #include <sys/buf.h>
     35 #include <sys/bufq.h>
     36 #include <sys/bus.h>
     37 #include <sys/device.h>
     38 #include <sys/disk.h>
     39 #include <sys/mutex.h>
     40 #include <sys/module.h>
     41 
     42 #include <dev/pci/pcidevs.h>
     43 #include <dev/pci/pcireg.h>
     44 #include <dev/pci/pcivar.h>
     45 
     46 #include <dev/ldvar.h>
     47 #include <dev/pci/virtioreg.h>
     48 #include <dev/pci/virtiovar.h>
     49 
     50 #include "ioconf.h"
     51 
     52 /*
     53  * ld_virtioreg:
     54  */
     55 /* Configuration registers */
     56 #define VIRTIO_BLK_CONFIG_CAPACITY	0 /* 64bit */
     57 #define VIRTIO_BLK_CONFIG_SIZE_MAX	8 /* 32bit */
     58 #define VIRTIO_BLK_CONFIG_SEG_MAX	12 /* 32bit */
     59 #define VIRTIO_BLK_CONFIG_GEOMETRY_C	16 /* 16bit */
     60 #define VIRTIO_BLK_CONFIG_GEOMETRY_H	18 /* 8bit */
     61 #define VIRTIO_BLK_CONFIG_GEOMETRY_S	19 /* 8bit */
     62 #define VIRTIO_BLK_CONFIG_BLK_SIZE	20 /* 32bit */
     63 #define VIRTIO_BLK_CONFIG_WRITEBACK	32 /* 8bit */
     64 
     65 /* Feature bits */
     66 #define VIRTIO_BLK_F_BARRIER	(1<<0)
     67 #define VIRTIO_BLK_F_SIZE_MAX	(1<<1)
     68 #define VIRTIO_BLK_F_SEG_MAX	(1<<2)
     69 #define VIRTIO_BLK_F_GEOMETRY	(1<<4)
     70 #define VIRTIO_BLK_F_RO		(1<<5)
     71 #define VIRTIO_BLK_F_BLK_SIZE	(1<<6)
     72 #define VIRTIO_BLK_F_SCSI	(1<<7)
     73 #define VIRTIO_BLK_F_FLUSH	(1<<9)
     74 #define VIRTIO_BLK_F_TOPOLOGY	(1<<10)
     75 #define VIRTIO_BLK_F_CONFIG_WCE	(1<<11)
     76 
     77 /*
     78  * Each block request uses at least two segments - one for the header
     79  * and one for the status.
     80 */
     81 #define	VIRTIO_BLK_MIN_SEGMENTS	2
     82 
     83 #define VIRTIO_BLK_FLAG_BITS \
     84 	VIRTIO_COMMON_FLAG_BITS \
     85 	"\x0c""CONFIG_WCE" \
     86 	"\x0b""TOPOLOGY" \
     87 	"\x0a""FLUSH" \
     88 	"\x08""SCSI" \
     89 	"\x07""BLK_SIZE" \
     90 	"\x06""RO" \
     91 	"\x05""GEOMETRY" \
     92 	"\x03""SEG_MAX" \
     93 	"\x02""SIZE_MAX" \
     94 	"\x01""BARRIER"
     95 
     96 /* Command */
     97 #define VIRTIO_BLK_T_IN		0
     98 #define VIRTIO_BLK_T_OUT	1
     99 #define VIRTIO_BLK_T_FLUSH	4
    100 #define VIRTIO_BLK_T_BARRIER	0x80000000
    101 
    102 /* Sector */
    103 #define VIRTIO_BLK_BSIZE	512
    104 
    105 /* Status */
    106 #define VIRTIO_BLK_S_OK		0
    107 #define VIRTIO_BLK_S_IOERR	1
    108 #define VIRTIO_BLK_S_UNSUPP	2
    109 
    110 /* Request header structure */
    111 struct virtio_blk_req_hdr {
    112 	uint32_t	type;	/* VIRTIO_BLK_T_* */
    113 	uint32_t	ioprio;
    114 	uint64_t	sector;
    115 } __packed;
    116 /* payload and 1 byte status follows */
    117 
    118 
    119 /*
    120  * ld_virtiovar:
    121  */
    122 struct virtio_blk_req {
    123 	struct virtio_blk_req_hdr	vr_hdr;
    124 	uint8_t				vr_status;
    125 	struct buf			*vr_bp;
    126 #define DUMMY_VR_BP				((void *)1)
    127 	bus_dmamap_t			vr_cmdsts;
    128 	bus_dmamap_t			vr_payload;
    129 };
    130 
    131 struct ld_virtio_softc {
    132 	struct ld_softc		sc_ld;
    133 	device_t		sc_dev;
    134 
    135 	struct virtio_softc	*sc_virtio;
    136 	struct virtqueue	sc_vq;
    137 
    138 	struct virtio_blk_req	*sc_reqs;
    139 	bus_dma_segment_t	sc_reqs_seg;
    140 
    141 	int			sc_readonly;
    142 
    143 	enum {
    144 		SYNC_FREE, SYNC_BUSY, SYNC_DONE
    145 	}			sc_sync_use;
    146 	kcondvar_t		sc_sync_wait;
    147 	kmutex_t		sc_sync_wait_lock;
    148 	uint8_t			sc_sync_status;
    149 };
    150 
    151 static int	ld_virtio_match(device_t, cfdata_t, void *);
    152 static void	ld_virtio_attach(device_t, device_t, void *);
    153 static int	ld_virtio_detach(device_t, int);
    154 
    155 CFATTACH_DECL_NEW(ld_virtio, sizeof(struct ld_virtio_softc),
    156     ld_virtio_match, ld_virtio_attach, ld_virtio_detach, NULL);
    157 
    158 static int
    159 ld_virtio_match(device_t parent, cfdata_t match, void *aux)
    160 {
    161 	struct virtio_attach_args *va = aux;
    162 
    163 	if (va->sc_childdevid == PCI_PRODUCT_VIRTIO_BLOCK)
    164 		return 1;
    165 
    166 	return 0;
    167 }
    168 
    169 static int ld_virtio_vq_done(struct virtqueue *);
    170 static int ld_virtio_dump(struct ld_softc *, void *, int, int);
    171 static int ld_virtio_start(struct ld_softc *, struct buf *);
    172 static int ld_virtio_ioctl(struct ld_softc *, u_long, void *, int32_t, bool);
    173 
    174 static int
    175 ld_virtio_alloc_reqs(struct ld_virtio_softc *sc, int qsize)
    176 {
    177 	int allocsize, r, rsegs, i;
    178 	struct ld_softc *ld = &sc->sc_ld;
    179 	void *vaddr;
    180 
    181 	allocsize = sizeof(struct virtio_blk_req) * qsize;
    182 	r = bus_dmamem_alloc(virtio_dmat(sc->sc_virtio), allocsize, 0, 0,
    183 			     &sc->sc_reqs_seg, 1, &rsegs, BUS_DMA_NOWAIT);
    184 	if (r != 0) {
    185 		aprint_error_dev(sc->sc_dev,
    186 				 "DMA memory allocation failed, size %d, "
    187 				 "error code %d\n", allocsize, r);
    188 		goto err_none;
    189 	}
    190 	r = bus_dmamem_map(virtio_dmat(sc->sc_virtio),
    191 			   &sc->sc_reqs_seg, 1, allocsize,
    192 			   &vaddr, BUS_DMA_NOWAIT);
    193 	if (r != 0) {
    194 		aprint_error_dev(sc->sc_dev,
    195 				 "DMA memory map failed, "
    196 				 "error code %d\n", r);
    197 		goto err_dmamem_alloc;
    198 	}
    199 	sc->sc_reqs = vaddr;
    200 	memset(vaddr, 0, allocsize);
    201 	for (i = 0; i < qsize; i++) {
    202 		struct virtio_blk_req *vr = &sc->sc_reqs[i];
    203 		r = bus_dmamap_create(virtio_dmat(sc->sc_virtio),
    204 				      offsetof(struct virtio_blk_req, vr_bp),
    205 				      1,
    206 				      offsetof(struct virtio_blk_req, vr_bp),
    207 				      0,
    208 				      BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
    209 				      &vr->vr_cmdsts);
    210 		if (r != 0) {
    211 			aprint_error_dev(sc->sc_dev,
    212 					 "command dmamap creation failed, "
    213 					 "error code %d\n", r);
    214 			goto err_reqs;
    215 		}
    216 		r = bus_dmamap_load(virtio_dmat(sc->sc_virtio), vr->vr_cmdsts,
    217 				    &vr->vr_hdr,
    218 				    offsetof(struct virtio_blk_req, vr_bp),
    219 				    NULL, BUS_DMA_NOWAIT);
    220 		if (r != 0) {
    221 			aprint_error_dev(sc->sc_dev,
    222 					 "command dmamap load failed, "
    223 					 "error code %d\n", r);
    224 			goto err_reqs;
    225 		}
    226 		r = bus_dmamap_create(virtio_dmat(sc->sc_virtio),
    227 				      ld->sc_maxxfer,
    228 				      (ld->sc_maxxfer / NBPG) +
    229 				      VIRTIO_BLK_MIN_SEGMENTS,
    230 				      ld->sc_maxxfer,
    231 				      0,
    232 				      BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
    233 				      &vr->vr_payload);
    234 		if (r != 0) {
    235 			aprint_error_dev(sc->sc_dev,
    236 					 "payload dmamap creation failed, "
    237 					 "error code %d\n", r);
    238 			goto err_reqs;
    239 		}
    240 	}
    241 	return 0;
    242 
    243 err_reqs:
    244 	for (i = 0; i < qsize; i++) {
    245 		struct virtio_blk_req *vr = &sc->sc_reqs[i];
    246 		if (vr->vr_cmdsts) {
    247 			bus_dmamap_destroy(virtio_dmat(sc->sc_virtio),
    248 					   vr->vr_cmdsts);
    249 			vr->vr_cmdsts = 0;
    250 		}
    251 		if (vr->vr_payload) {
    252 			bus_dmamap_destroy(virtio_dmat(sc->sc_virtio),
    253 					   vr->vr_payload);
    254 			vr->vr_payload = 0;
    255 		}
    256 	}
    257 	bus_dmamem_unmap(virtio_dmat(sc->sc_virtio), sc->sc_reqs, allocsize);
    258 err_dmamem_alloc:
    259 	bus_dmamem_free(virtio_dmat(sc->sc_virtio), &sc->sc_reqs_seg, 1);
    260 err_none:
    261 	return -1;
    262 }
    263 
    264 static void
    265 ld_virtio_attach(device_t parent, device_t self, void *aux)
    266 {
    267 	struct ld_virtio_softc *sc = device_private(self);
    268 	struct ld_softc *ld = &sc->sc_ld;
    269 	struct virtio_softc *vsc = device_private(parent);
    270 	uint32_t features;
    271 	int qsize, maxxfersize, maxnsegs;
    272 
    273 	if (virtio_child(vsc) != NULL) {
    274 		aprint_normal(": child already attached for %s; "
    275 			      "something wrong...\n", device_xname(parent));
    276 		return;
    277 	}
    278 
    279 	sc->sc_dev = self;
    280 	sc->sc_virtio = vsc;
    281 
    282 	virtio_child_attach_start(vsc, self, IPL_BIO, &sc->sc_vq,
    283 	    NULL, virtio_vq_intr, 0,
    284 	    (VIRTIO_BLK_F_SIZE_MAX | VIRTIO_BLK_F_SEG_MAX |
    285 	     VIRTIO_BLK_F_GEOMETRY | VIRTIO_BLK_F_RO | VIRTIO_BLK_F_BLK_SIZE |
    286 	     VIRTIO_BLK_F_FLUSH | VIRTIO_BLK_F_CONFIG_WCE),
    287 	    VIRTIO_BLK_FLAG_BITS);
    288 
    289 	features = virtio_features(vsc);
    290 
    291 	if (features & VIRTIO_BLK_F_RO)
    292 		sc->sc_readonly = 1;
    293 	else
    294 		sc->sc_readonly = 0;
    295 
    296 	if (features & VIRTIO_BLK_F_BLK_SIZE) {
    297 		ld->sc_secsize = virtio_read_device_config_4(vsc,
    298 					VIRTIO_BLK_CONFIG_BLK_SIZE);
    299 	} else
    300 		ld->sc_secsize = VIRTIO_BLK_BSIZE;
    301 
    302 	/* At least genfs_io assumes maxxfer == MAXPHYS. */
    303 	if (features & VIRTIO_BLK_F_SIZE_MAX) {
    304 		maxxfersize = virtio_read_device_config_4(vsc,
    305 		    VIRTIO_BLK_CONFIG_SIZE_MAX);
    306 		if (maxxfersize < MAXPHYS) {
    307 			aprint_error_dev(sc->sc_dev,
    308 			    "Too small SIZE_MAX %dK minimum is %dK\n",
    309 			    maxxfersize / 1024, MAXPHYS / 1024);
    310 			// goto err;
    311 			maxxfersize = MAXPHYS;
    312 		} else if (maxxfersize > MAXPHYS) {
    313 			aprint_normal_dev(sc->sc_dev,
    314 			    "Clip SEG_MAX from %dK to %dK\n",
    315 			    maxxfersize / 1024,
    316 			    MAXPHYS / 1024);
    317 			maxxfersize = MAXPHYS;
    318 		}
    319 	} else
    320 		maxxfersize = MAXPHYS;
    321 
    322 	if (features & VIRTIO_BLK_F_SEG_MAX) {
    323 		maxnsegs = virtio_read_device_config_4(vsc,
    324 		    VIRTIO_BLK_CONFIG_SEG_MAX);
    325 		if (maxnsegs < VIRTIO_BLK_MIN_SEGMENTS) {
    326 			aprint_error_dev(sc->sc_dev,
    327 			    "Too small SEG_MAX %d minimum is %d\n",
    328 			    maxnsegs, VIRTIO_BLK_MIN_SEGMENTS);
    329 			maxnsegs = maxxfersize / NBPG;
    330 			// goto err;
    331 		}
    332 	} else
    333 		maxnsegs = maxxfersize / NBPG;
    334 
    335 	/* 2 for the minimum size */
    336 	maxnsegs += VIRTIO_BLK_MIN_SEGMENTS;
    337 
    338 	if (virtio_alloc_vq(vsc, &sc->sc_vq, 0, maxxfersize, maxnsegs,
    339 	    "I/O request") != 0) {
    340 		goto err;
    341 	}
    342 	qsize = sc->sc_vq.vq_num;
    343 	sc->sc_vq.vq_done = ld_virtio_vq_done;
    344 
    345 	if (virtio_child_attach_finish(vsc) != 0)
    346 		goto err;
    347 
    348 	ld->sc_dv = self;
    349 	ld->sc_secperunit = virtio_read_device_config_8(vsc,
    350 	    VIRTIO_BLK_CONFIG_CAPACITY) / (ld->sc_secsize / VIRTIO_BLK_BSIZE);
    351 	ld->sc_maxxfer = maxxfersize;
    352 	if (features & VIRTIO_BLK_F_GEOMETRY) {
    353 		ld->sc_ncylinders = virtio_read_device_config_2(vsc,
    354 					VIRTIO_BLK_CONFIG_GEOMETRY_C);
    355 		ld->sc_nheads     = virtio_read_device_config_1(vsc,
    356 					VIRTIO_BLK_CONFIG_GEOMETRY_H);
    357 		ld->sc_nsectors   = virtio_read_device_config_1(vsc,
    358 					VIRTIO_BLK_CONFIG_GEOMETRY_S);
    359 	}
    360 	ld->sc_maxqueuecnt = qsize - 1; /* reserve slot for dumps, flushes */
    361 
    362 	if (ld_virtio_alloc_reqs(sc, qsize) < 0)
    363 		goto err;
    364 
    365 	cv_init(&sc->sc_sync_wait, "vblksync");
    366 	mutex_init(&sc->sc_sync_wait_lock, MUTEX_DEFAULT, IPL_BIO);
    367 	sc->sc_sync_use = SYNC_FREE;
    368 
    369 	ld->sc_dump = ld_virtio_dump;
    370 	ld->sc_start = ld_virtio_start;
    371 	ld->sc_ioctl = ld_virtio_ioctl;
    372 
    373 	ld->sc_flags = LDF_ENABLED | LDF_MPSAFE;
    374 	ldattach(ld, BUFQ_DISK_DEFAULT_STRAT);
    375 
    376 	return;
    377 
    378 err:
    379 	virtio_child_attach_failed(vsc);
    380 	return;
    381 }
    382 
    383 static int
    384 ld_virtio_start(struct ld_softc *ld, struct buf *bp)
    385 {
    386 	/* splbio */
    387 	struct ld_virtio_softc *sc = device_private(ld->sc_dv);
    388 	struct virtio_softc *vsc = sc->sc_virtio;
    389 	struct virtqueue *vq = &sc->sc_vq;
    390 	struct virtio_blk_req *vr;
    391 	int r;
    392 	int isread = (bp->b_flags & B_READ);
    393 	int slot;
    394 
    395 	if (sc->sc_readonly && !isread)
    396 		return EIO;
    397 
    398 	r = virtio_enqueue_prep(vsc, vq, &slot);
    399 	if (r != 0)
    400 		return r;
    401 
    402 	vr = &sc->sc_reqs[slot];
    403 	KASSERT(vr->vr_bp == NULL);
    404 
    405 	r = bus_dmamap_load(virtio_dmat(vsc), vr->vr_payload,
    406 			    bp->b_data, bp->b_bcount, NULL,
    407 			    ((isread?BUS_DMA_READ:BUS_DMA_WRITE)
    408 			     |BUS_DMA_NOWAIT));
    409 	if (r != 0) {
    410 		aprint_error_dev(sc->sc_dev,
    411 		    "payload dmamap failed, error code %d\n", r);
    412 		virtio_enqueue_abort(vsc, vq, slot);
    413 		return r;
    414 	}
    415 
    416 	r = virtio_enqueue_reserve(vsc, vq, slot, vr->vr_payload->dm_nsegs +
    417 	    VIRTIO_BLK_MIN_SEGMENTS);
    418 	if (r != 0) {
    419 		bus_dmamap_unload(virtio_dmat(vsc), vr->vr_payload);
    420 		return r;
    421 	}
    422 
    423 	vr->vr_bp = bp;
    424 	vr->vr_hdr.type = isread?VIRTIO_BLK_T_IN:VIRTIO_BLK_T_OUT;
    425 	vr->vr_hdr.ioprio = 0;
    426 	vr->vr_hdr.sector = bp->b_rawblkno * sc->sc_ld.sc_secsize /
    427 	    VIRTIO_BLK_BSIZE;
    428 
    429 	bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts,
    430 			0, sizeof(struct virtio_blk_req_hdr),
    431 			BUS_DMASYNC_PREWRITE);
    432 	bus_dmamap_sync(virtio_dmat(vsc), vr->vr_payload,
    433 			0, bp->b_bcount,
    434 			isread?BUS_DMASYNC_PREREAD:BUS_DMASYNC_PREWRITE);
    435 	bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts,
    436 			offsetof(struct virtio_blk_req, vr_status),
    437 			sizeof(uint8_t),
    438 			BUS_DMASYNC_PREREAD);
    439 
    440 	virtio_enqueue_p(vsc, vq, slot, vr->vr_cmdsts,
    441 			 0, sizeof(struct virtio_blk_req_hdr),
    442 			 true);
    443 	virtio_enqueue(vsc, vq, slot, vr->vr_payload, !isread);
    444 	virtio_enqueue_p(vsc, vq, slot, vr->vr_cmdsts,
    445 			 offsetof(struct virtio_blk_req, vr_status),
    446 			 sizeof(uint8_t),
    447 			 false);
    448 	virtio_enqueue_commit(vsc, vq, slot, true);
    449 
    450 	return 0;
    451 }
    452 
    453 static void
    454 ld_virtio_vq_done1(struct ld_virtio_softc *sc, struct virtio_softc *vsc,
    455 		   struct virtqueue *vq, int slot)
    456 {
    457 	struct virtio_blk_req *vr = &sc->sc_reqs[slot];
    458 	struct buf *bp = vr->vr_bp;
    459 
    460 	vr->vr_bp = NULL;
    461 
    462 	bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts,
    463 			0, sizeof(struct virtio_blk_req_hdr),
    464 			BUS_DMASYNC_POSTWRITE);
    465 	bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts,
    466 			sizeof(struct virtio_blk_req_hdr), sizeof(uint8_t),
    467 			BUS_DMASYNC_POSTREAD);
    468 	if (bp == DUMMY_VR_BP) {
    469 		mutex_enter(&sc->sc_sync_wait_lock);
    470 		sc->sc_sync_status = vr->vr_status;
    471 		sc->sc_sync_use = SYNC_DONE;
    472 		cv_signal(&sc->sc_sync_wait);
    473 		mutex_exit(&sc->sc_sync_wait_lock);
    474 		virtio_dequeue_commit(vsc, vq, slot);
    475 		return;
    476 	}
    477 	bus_dmamap_sync(virtio_dmat(vsc), vr->vr_payload,
    478 			0, bp->b_bcount,
    479 			(bp->b_flags & B_READ)?BUS_DMASYNC_POSTREAD
    480 					      :BUS_DMASYNC_POSTWRITE);
    481 	bus_dmamap_unload(virtio_dmat(vsc), vr->vr_payload);
    482 
    483 	if (vr->vr_status != VIRTIO_BLK_S_OK) {
    484 		bp->b_error = EIO;
    485 		bp->b_resid = bp->b_bcount;
    486 	} else {
    487 		bp->b_error = 0;
    488 		bp->b_resid = 0;
    489 	}
    490 
    491 	virtio_dequeue_commit(vsc, vq, slot);
    492 
    493 	lddone(&sc->sc_ld, bp);
    494 }
    495 
    496 static int
    497 ld_virtio_vq_done(struct virtqueue *vq)
    498 {
    499 	struct virtio_softc *vsc = vq->vq_owner;
    500 	struct ld_virtio_softc *sc = device_private(virtio_child(vsc));
    501 	int r = 0;
    502 	int slot;
    503 
    504 again:
    505 	if (virtio_dequeue(vsc, vq, &slot, NULL))
    506 		return r;
    507 	r = 1;
    508 
    509 	ld_virtio_vq_done1(sc, vsc, vq, slot);
    510 	goto again;
    511 }
    512 
    513 static int
    514 ld_virtio_dump(struct ld_softc *ld, void *data, int blkno, int blkcnt)
    515 {
    516 	struct ld_virtio_softc *sc = device_private(ld->sc_dv);
    517 	struct virtio_softc *vsc = sc->sc_virtio;
    518 	struct virtqueue *vq = &sc->sc_vq;
    519 	struct virtio_blk_req *vr;
    520 	int slot, r;
    521 
    522 	if (sc->sc_readonly)
    523 		return EIO;
    524 
    525 	r = virtio_enqueue_prep(vsc, vq, &slot);
    526 	if (r != 0) {
    527 		if (r == EAGAIN) { /* no free slot; dequeue first */
    528 			delay(100);
    529 			ld_virtio_vq_done(vq);
    530 			r = virtio_enqueue_prep(vsc, vq, &slot);
    531 			if (r != 0)
    532 				return r;
    533 		}
    534 		return r;
    535 	}
    536 	vr = &sc->sc_reqs[slot];
    537 	r = bus_dmamap_load(virtio_dmat(vsc), vr->vr_payload,
    538 			    data, blkcnt*ld->sc_secsize, NULL,
    539 			    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
    540 	if (r != 0)
    541 		return r;
    542 
    543 	r = virtio_enqueue_reserve(vsc, vq, slot, vr->vr_payload->dm_nsegs +
    544 	    VIRTIO_BLK_MIN_SEGMENTS);
    545 	if (r != 0) {
    546 		bus_dmamap_unload(virtio_dmat(vsc), vr->vr_payload);
    547 		return r;
    548 	}
    549 
    550 	vr->vr_bp = (void*)0xdeadbeef;
    551 	vr->vr_hdr.type = VIRTIO_BLK_T_OUT;
    552 	vr->vr_hdr.ioprio = 0;
    553 	vr->vr_hdr.sector = (daddr_t) blkno * ld->sc_secsize /
    554 	    VIRTIO_BLK_BSIZE;
    555 
    556 	bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts,
    557 			0, sizeof(struct virtio_blk_req_hdr),
    558 			BUS_DMASYNC_PREWRITE);
    559 	bus_dmamap_sync(virtio_dmat(vsc), vr->vr_payload,
    560 			0, blkcnt*ld->sc_secsize,
    561 			BUS_DMASYNC_PREWRITE);
    562 	bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts,
    563 			offsetof(struct virtio_blk_req, vr_status),
    564 			sizeof(uint8_t),
    565 			BUS_DMASYNC_PREREAD);
    566 
    567 	virtio_enqueue_p(vsc, vq, slot, vr->vr_cmdsts,
    568 			 0, sizeof(struct virtio_blk_req_hdr),
    569 			 true);
    570 	virtio_enqueue(vsc, vq, slot, vr->vr_payload, true);
    571 	virtio_enqueue_p(vsc, vq, slot, vr->vr_cmdsts,
    572 			 offsetof(struct virtio_blk_req, vr_status),
    573 			 sizeof(uint8_t),
    574 			 false);
    575 	virtio_enqueue_commit(vsc, vq, slot, true);
    576 
    577 	for ( ; ; ) {
    578 		int dslot;
    579 
    580 		r = virtio_dequeue(vsc, vq, &dslot, NULL);
    581 		if (r != 0)
    582 			continue;
    583 		if (dslot != slot) {
    584 			ld_virtio_vq_done1(sc, vsc, vq, dslot);
    585 			continue;
    586 		} else
    587 			break;
    588 	}
    589 
    590 	bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts,
    591 			0, sizeof(struct virtio_blk_req_hdr),
    592 			BUS_DMASYNC_POSTWRITE);
    593 	bus_dmamap_sync(virtio_dmat(vsc), vr->vr_payload,
    594 			0, blkcnt*ld->sc_secsize,
    595 			BUS_DMASYNC_POSTWRITE);
    596 	bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts,
    597 			offsetof(struct virtio_blk_req, vr_status),
    598 			sizeof(uint8_t),
    599 			BUS_DMASYNC_POSTREAD);
    600 	if (vr->vr_status == VIRTIO_BLK_S_OK)
    601 		r = 0;
    602 	else
    603 		r = EIO;
    604 	virtio_dequeue_commit(vsc, vq, slot);
    605 
    606 	return r;
    607 }
    608 
    609 static int
    610 ld_virtio_detach(device_t self, int flags)
    611 {
    612 	struct ld_virtio_softc *sc = device_private(self);
    613 	struct ld_softc *ld = &sc->sc_ld;
    614 	bus_dma_tag_t dmat = virtio_dmat(sc->sc_virtio);
    615 	int r, i, qsize;
    616 
    617 	qsize = sc->sc_vq.vq_num;
    618 	r = ldbegindetach(ld, flags);
    619 	if (r != 0)
    620 		return r;
    621 	virtio_reset(sc->sc_virtio);
    622 	virtio_free_vq(sc->sc_virtio, &sc->sc_vq);
    623 
    624 	for (i = 0; i < qsize; i++) {
    625 		bus_dmamap_destroy(dmat,
    626 				   sc->sc_reqs[i].vr_cmdsts);
    627 		bus_dmamap_destroy(dmat,
    628 				   sc->sc_reqs[i].vr_payload);
    629 	}
    630 	bus_dmamem_unmap(dmat, sc->sc_reqs,
    631 			 sizeof(struct virtio_blk_req) * qsize);
    632 	bus_dmamem_free(dmat, &sc->sc_reqs_seg, 1);
    633 
    634 	ldenddetach(ld);
    635 
    636 	virtio_child_detach(sc->sc_virtio);
    637 
    638 	return 0;
    639 }
    640 
    641 static int
    642 ld_virtio_flush(struct ld_softc *ld, bool poll)
    643 {
    644 	struct ld_virtio_softc * const sc = device_private(ld->sc_dv);
    645 	struct virtio_softc * const vsc = sc->sc_virtio;
    646 	const uint32_t features = virtio_features(vsc);
    647 	struct virtqueue *vq = &sc->sc_vq;
    648 	struct virtio_blk_req *vr;
    649 	int slot;
    650 	int r;
    651 
    652 	if ((features & VIRTIO_BLK_F_FLUSH) == 0)
    653 		return 0;
    654 
    655 	mutex_enter(&sc->sc_sync_wait_lock);
    656 	while (sc->sc_sync_use != SYNC_FREE) {
    657 		if (poll) {
    658 			mutex_exit(&sc->sc_sync_wait_lock);
    659 			ld_virtio_vq_done(vq);
    660 			mutex_enter(&sc->sc_sync_wait_lock);
    661 			continue;
    662 		}
    663 		cv_wait(&sc->sc_sync_wait, &sc->sc_sync_wait_lock);
    664 	}
    665 	sc->sc_sync_use = SYNC_BUSY;
    666 	mutex_exit(&sc->sc_sync_wait_lock);
    667 
    668 	r = virtio_enqueue_prep(vsc, vq, &slot);
    669 	if (r != 0) {
    670 		return r;
    671 	}
    672 
    673 	vr = &sc->sc_reqs[slot];
    674 	KASSERT(vr->vr_bp == NULL);
    675 
    676 	r = virtio_enqueue_reserve(vsc, vq, slot, VIRTIO_BLK_MIN_SEGMENTS);
    677 	if (r != 0) {
    678 		return r;
    679 	}
    680 
    681 	vr->vr_bp = DUMMY_VR_BP;
    682 	vr->vr_hdr.type = VIRTIO_BLK_T_FLUSH;
    683 	vr->vr_hdr.ioprio = 0;
    684 	vr->vr_hdr.sector = 0;
    685 
    686 	bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts,
    687 			0, sizeof(struct virtio_blk_req_hdr),
    688 			BUS_DMASYNC_PREWRITE);
    689 	bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts,
    690 			offsetof(struct virtio_blk_req, vr_status),
    691 			sizeof(uint8_t),
    692 			BUS_DMASYNC_PREREAD);
    693 
    694 	virtio_enqueue_p(vsc, vq, slot, vr->vr_cmdsts,
    695 			 0, sizeof(struct virtio_blk_req_hdr),
    696 			 true);
    697 	virtio_enqueue_p(vsc, vq, slot, vr->vr_cmdsts,
    698 			 offsetof(struct virtio_blk_req, vr_status),
    699 			 sizeof(uint8_t),
    700 			 false);
    701 	virtio_enqueue_commit(vsc, vq, slot, true);
    702 
    703 	mutex_enter(&sc->sc_sync_wait_lock);
    704 	while (sc->sc_sync_use != SYNC_DONE) {
    705 		if (poll) {
    706 			mutex_exit(&sc->sc_sync_wait_lock);
    707 			ld_virtio_vq_done(vq);
    708 			mutex_enter(&sc->sc_sync_wait_lock);
    709 			continue;
    710 		}
    711 		cv_wait(&sc->sc_sync_wait, &sc->sc_sync_wait_lock);
    712 	}
    713 
    714 	if (sc->sc_sync_status == VIRTIO_BLK_S_OK)
    715 		r = 0;
    716 	else
    717 		r = EIO;
    718 
    719 	sc->sc_sync_use = SYNC_FREE;
    720 	cv_signal(&sc->sc_sync_wait);
    721 	mutex_exit(&sc->sc_sync_wait_lock);
    722 
    723 	return r;
    724 }
    725 
    726 static int
    727 ld_virtio_getcache(struct ld_softc *ld, int *bitsp)
    728 {
    729 	struct ld_virtio_softc * const sc = device_private(ld->sc_dv);
    730 	struct virtio_softc * const vsc = sc->sc_virtio;
    731 	const uint32_t features = virtio_features(vsc);
    732 
    733 	*bitsp = DKCACHE_READ;
    734 	if ((features & VIRTIO_BLK_F_CONFIG_WCE) != 0)
    735 		*bitsp |= DKCACHE_WCHANGE;
    736 	if (virtio_read_device_config_1(vsc,
    737 	    VIRTIO_BLK_CONFIG_WRITEBACK) != 0x00)
    738 		*bitsp |= DKCACHE_WRITE;
    739 
    740 	return 0;
    741 }
    742 
    743 static int
    744 ld_virtio_setcache(struct ld_softc *ld, int bits)
    745 {
    746 	struct ld_virtio_softc * const sc = device_private(ld->sc_dv);
    747 	struct virtio_softc * const vsc = sc->sc_virtio;
    748 	const uint8_t wce = (bits & DKCACHE_WRITE) ? 0x01 : 0x00;
    749 
    750 	virtio_write_device_config_1(vsc,
    751 	    VIRTIO_BLK_CONFIG_WRITEBACK, wce);
    752 	if (virtio_read_device_config_1(vsc,
    753 	    VIRTIO_BLK_CONFIG_WRITEBACK) != wce)
    754 		return EIO;
    755 
    756 	return 0;
    757 }
    758 
    759 static int
    760 ld_virtio_ioctl(struct ld_softc *ld, u_long cmd, void *addr, int32_t flag, bool poll)
    761 {
    762 	int error;
    763 
    764 	switch (cmd) {
    765 	case DIOCCACHESYNC:
    766 		error = ld_virtio_flush(ld, poll);
    767 		break;
    768 
    769 	case DIOCGCACHE:
    770 		error = ld_virtio_getcache(ld, (int *)addr);
    771 		break;
    772 
    773 	case DIOCSCACHE:
    774 		error = ld_virtio_setcache(ld, *(int *)addr);
    775 		break;
    776 
    777 	default:
    778 		error = EPASSTHROUGH;
    779 		break;
    780 	}
    781 
    782 	return error;
    783 }
    784 
    785 MODULE(MODULE_CLASS_DRIVER, ld_virtio, "ld,virtio");
    786 
    787 #ifdef _MODULE
    788 /*
    789  * XXX Don't allow ioconf.c to redefine the "struct cfdriver ld_cd"
    790  * XXX it will be defined in the common-code module
    791  */
    792 #undef  CFDRIVER_DECL
    793 #define CFDRIVER_DECL(name, class, attr)
    794 #include "ioconf.c"
    795 #endif
    796 
    797 static int
    798 ld_virtio_modcmd(modcmd_t cmd, void *opaque)
    799 {
    800 #ifdef _MODULE
    801 	/*
    802 	 * We ignore the cfdriver_vec[] that ioconf provides, since
    803 	 * the cfdrivers are attached already.
    804 	 */
    805 	static struct cfdriver * const no_cfdriver_vec[] = { NULL };
    806 #endif
    807 	int error = 0;
    808 
    809 #ifdef _MODULE
    810 	switch (cmd) {
    811 	case MODULE_CMD_INIT:
    812 		error = config_init_component(no_cfdriver_vec,
    813 		    cfattach_ioconf_ld_virtio, cfdata_ioconf_ld_virtio);
    814 		break;
    815 	case MODULE_CMD_FINI:
    816 		error = config_fini_component(no_cfdriver_vec,
    817 		    cfattach_ioconf_ld_virtio, cfdata_ioconf_ld_virtio);
    818 		break;
    819 	default:
    820 		error = ENOTTY;
    821 		break;
    822 	}
    823 #endif
    824 
    825 	return error;
    826 }
    827