Home | History | Annotate | Line # | Download | only in pci
ld_virtio.c revision 1.4.4.2
      1 /*	$NetBSD: ld_virtio.c,v 1.4.4.2 2012/01/25 21:18:15 riz Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2010 Minoura Makoto.
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  *
     16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     26  */
     27 
     28 #include <sys/cdefs.h>
     29 __KERNEL_RCSID(0, "$NetBSD: ld_virtio.c,v 1.4.4.2 2012/01/25 21:18:15 riz Exp $");
     30 
     31 #include "rnd.h"
     32 
     33 #include <sys/param.h>
     34 #include <sys/systm.h>
     35 #include <sys/kernel.h>
     36 #include <sys/buf.h>
     37 #include <sys/bus.h>
     38 #include <sys/device.h>
     39 #include <sys/disk.h>
     40 #include <sys/mutex.h>
     41 #if NRND > 0
     42 #include <sys/rnd.h>
     43 #endif
     44 
     45 #include <dev/pci/pcidevs.h>
     46 #include <dev/pci/pcireg.h>
     47 #include <dev/pci/pcivar.h>
     48 
     49 #include <dev/ldvar.h>
     50 #include <dev/pci/virtioreg.h>
     51 #include <dev/pci/virtiovar.h>
     52 
     53 #include <uvm/uvm_extern.h>
     54 
     55 /*
     56  * ld_virtioreg:
     57  */
     58 /* Configuration registers */
     59 #define VIRTIO_BLK_CONFIG_CAPACITY	0 /* 64bit */
     60 #define VIRTIO_BLK_CONFIG_SIZE_MAX	8 /* 32bit */
     61 #define VIRTIO_BLK_CONFIG_SEG_MAX	12 /* 32bit */
     62 #define VIRTIO_BLK_CONFIG_GEOMETRY_C	16 /* 16bit */
     63 #define VIRTIO_BLK_CONFIG_GEOMETRY_H	18 /* 8bit */
     64 #define VIRTIO_BLK_CONFIG_GEOMETRY_S	19 /* 8bit */
     65 #define VIRTIO_BLK_CONFIG_BLK_SIZE	20 /* 32bit */
     66 
     67 /* Feature bits */
     68 #define VIRTIO_BLK_F_BARRIER	(1<<0)
     69 #define VIRTIO_BLK_F_SIZE_MAX	(1<<1)
     70 #define VIRTIO_BLK_F_SEG_MAX	(1<<2)
     71 #define VIRTIO_BLK_F_GEOMETRY	(1<<4)
     72 #define VIRTIO_BLK_F_RO		(1<<5)
     73 #define VIRTIO_BLK_F_BLK_SIZE	(1<<6)
     74 #define VIRTIO_BLK_F_SCSI	(1<<7)
     75 #define VIRTIO_BLK_F_FLUSH	(1<<9)
     76 
     77 /* Command */
     78 #define VIRTIO_BLK_T_IN		0
     79 #define VIRTIO_BLK_T_OUT	1
     80 #define VIRTIO_BLK_T_BARRIER	0x80000000
     81 
     82 /* Status */
     83 #define VIRTIO_BLK_S_OK		0
     84 #define VIRTIO_BLK_S_IOERR	1
     85 
     86 /* Request header structure */
     87 struct virtio_blk_req_hdr {
     88 	uint32_t	type;	/* VIRTIO_BLK_T_* */
     89 	uint32_t	ioprio;
     90 	uint64_t	sector;
     91 } __packed;
     92 /* 512*virtio_blk_req_hdr.sector byte payload and 1 byte status follows */
     93 
     94 
     95 /*
     96  * ld_virtiovar:
     97  */
     98 struct virtio_blk_req {
     99 	struct virtio_blk_req_hdr	vr_hdr;
    100 	uint8_t				vr_status;
    101 	struct buf			*vr_bp;
    102 	bus_dmamap_t			vr_cmdsts;
    103 	bus_dmamap_t			vr_payload;
    104 };
    105 
    106 struct ld_virtio_softc {
    107 	struct ld_softc		sc_ld;
    108 	device_t		sc_dev;
    109 
    110 	struct virtio_softc	*sc_virtio;
    111 	struct virtqueue	sc_vq[1];
    112 
    113 	struct virtio_blk_req	*sc_reqs;
    114 	bus_dma_segment_t	sc_reqs_segs[1];
    115 
    116 	kmutex_t		sc_lock;
    117 
    118 	int			sc_readonly;
    119 };
    120 
    121 static int	ld_virtio_match(device_t, cfdata_t, void *);
    122 static void	ld_virtio_attach(device_t, device_t, void *);
    123 static int	ld_virtio_detach(device_t, int);
    124 
    125 CFATTACH_DECL_NEW(ld_virtio, sizeof(struct ld_virtio_softc),
    126     ld_virtio_match, ld_virtio_attach, ld_virtio_detach, NULL);
    127 
    128 static int
    129 ld_virtio_match(device_t parent, cfdata_t match, void *aux)
    130 {
    131 	struct virtio_softc *va = aux;
    132 
    133 	if (va->sc_childdevid == PCI_PRODUCT_VIRTIO_BLOCK)
    134 		return 1;
    135 
    136 	return 0;
    137 }
    138 
    139 static int ld_virtio_vq_done(struct virtqueue *);
    140 static int ld_virtio_dump(struct ld_softc *, void *, int, int);
    141 static int ld_virtio_start(struct ld_softc *, struct buf *);
    142 
    143 static int
    144 ld_virtio_alloc_reqs(struct ld_virtio_softc *sc, int qsize)
    145 {
    146 	int allocsize, r, rsegs, i;
    147 	struct ld_softc *ld = &sc->sc_ld;
    148 	void *vaddr;
    149 
    150 	allocsize = sizeof(struct virtio_blk_req) * qsize;
    151 	r = bus_dmamem_alloc(sc->sc_virtio->sc_dmat, allocsize, 0, 0,
    152 			     &sc->sc_reqs_segs[0], 1, &rsegs, BUS_DMA_NOWAIT);
    153 	if (r != 0) {
    154 		aprint_error_dev(sc->sc_dev,
    155 				 "DMA memory allocation failed, size %d, "
    156 				 "error code %d\n", allocsize, r);
    157 		goto err_none;
    158 	}
    159 	r = bus_dmamem_map(sc->sc_virtio->sc_dmat,
    160 			   &sc->sc_reqs_segs[0], 1, allocsize,
    161 			   &vaddr, BUS_DMA_NOWAIT);
    162 	if (r != 0) {
    163 		aprint_error_dev(sc->sc_dev,
    164 				 "DMA memory map failed, "
    165 				 "error code %d\n", r);
    166 		goto err_dmamem_alloc;
    167 	}
    168 	sc->sc_reqs = vaddr;
    169 	memset(vaddr, 0, allocsize);
    170 	for (i = 0; i < qsize; i++) {
    171 		struct virtio_blk_req *vr = &sc->sc_reqs[i];
    172 		r = bus_dmamap_create(sc->sc_virtio->sc_dmat,
    173 				      offsetof(struct virtio_blk_req, vr_bp),
    174 				      1,
    175 				      offsetof(struct virtio_blk_req, vr_bp),
    176 				      0,
    177 				      BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
    178 				      &vr->vr_cmdsts);
    179 		if (r != 0) {
    180 			aprint_error_dev(sc->sc_dev,
    181 					 "command dmamap creation failed, "
    182 					 "error code %d\n", r);
    183 			goto err_reqs;
    184 		}
    185 		r = bus_dmamap_load(sc->sc_virtio->sc_dmat, vr->vr_cmdsts,
    186 				    &vr->vr_hdr,
    187 				    offsetof(struct virtio_blk_req, vr_bp),
    188 				    NULL, BUS_DMA_NOWAIT);
    189 		if (r != 0) {
    190 			aprint_error_dev(sc->sc_dev,
    191 					 "command dmamap load failed, "
    192 					 "error code %d\n", r);
    193 			goto err_reqs;
    194 		}
    195 		r = bus_dmamap_create(sc->sc_virtio->sc_dmat,
    196 				      ld->sc_maxxfer,
    197 				      (ld->sc_maxxfer / NBPG) + 2,
    198 				      ld->sc_maxxfer,
    199 				      0,
    200 				      BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
    201 				      &vr->vr_payload);
    202 		if (r != 0) {
    203 			aprint_error_dev(sc->sc_dev,
    204 					 "payload dmamap creation failed, "
    205 					 "error code %d\n", r);
    206 			goto err_reqs;
    207 		}
    208 	}
    209 	return 0;
    210 
    211 err_reqs:
    212 	for (i = 0; i < qsize; i++) {
    213 		struct virtio_blk_req *vr = &sc->sc_reqs[i];
    214 		if (vr->vr_cmdsts) {
    215 			bus_dmamap_destroy(sc->sc_virtio->sc_dmat,
    216 					   vr->vr_cmdsts);
    217 			vr->vr_cmdsts = 0;
    218 		}
    219 		if (vr->vr_payload) {
    220 			bus_dmamap_destroy(sc->sc_virtio->sc_dmat,
    221 					   vr->vr_payload);
    222 			vr->vr_payload = 0;
    223 		}
    224 	}
    225 	bus_dmamem_unmap(sc->sc_virtio->sc_dmat, sc->sc_reqs, allocsize);
    226 err_dmamem_alloc:
    227 	bus_dmamem_free(sc->sc_virtio->sc_dmat, &sc->sc_reqs_segs[0], 1);
    228 err_none:
    229 	return -1;
    230 }
    231 
    232 static void
    233 ld_virtio_attach(device_t parent, device_t self, void *aux)
    234 {
    235 	struct ld_virtio_softc *sc = device_private(self);
    236 	struct ld_softc *ld = &sc->sc_ld;
    237 	struct virtio_softc *vsc = device_private(parent);
    238 	uint32_t features;
    239 	int qsize, maxxfersize;
    240 
    241 	if (vsc->sc_child != NULL) {
    242 		aprint_normal(": child already attached for %s; "
    243 			      "something wrong...\n",
    244 			      device_xname(parent));
    245 		return;
    246 	}
    247 	aprint_normal("\n");
    248 	aprint_naive("\n");
    249 
    250 	sc->sc_dev = self;
    251 	sc->sc_virtio = vsc;
    252 
    253 	vsc->sc_child = self;
    254 	vsc->sc_ipl = IPL_BIO;
    255 	vsc->sc_vqs = &sc->sc_vq[0];
    256 	vsc->sc_nvqs = 1;
    257 	vsc->sc_config_change = 0;
    258 	vsc->sc_intrhand = virtio_vq_intr;
    259 
    260 	features = virtio_negotiate_features(vsc,
    261 					     (VIRTIO_BLK_F_SIZE_MAX |
    262 					      VIRTIO_BLK_F_SEG_MAX |
    263 					      VIRTIO_BLK_F_GEOMETRY |
    264 					      VIRTIO_BLK_F_RO |
    265 					      VIRTIO_BLK_F_BLK_SIZE));
    266 	if (features & VIRTIO_BLK_F_RO)
    267 		sc->sc_readonly = 1;
    268 	else
    269 		sc->sc_readonly = 0;
    270 
    271 	ld->sc_secsize = 512;
    272 	if (features & VIRTIO_BLK_F_BLK_SIZE) {
    273 		ld->sc_secsize = virtio_read_device_config_4(vsc,
    274 					VIRTIO_BLK_CONFIG_BLK_SIZE);
    275 	}
    276 	maxxfersize = MAXPHYS;
    277 #if 0	/* At least genfs_io assumes maxxfer == MAXPHYS. */
    278 	if (features & VIRTIO_BLK_F_SEG_MAX) {
    279 		maxxfersize = virtio_read_device_config_4(vsc,
    280 					VIRTIO_BLK_CONFIG_SEG_MAX)
    281 				* ld->sc_secsize;
    282 		if (maxxfersize > MAXPHYS)
    283 			maxxfersize = MAXPHYS;
    284 	}
    285 #endif
    286 
    287 	if (virtio_alloc_vq(vsc, &sc->sc_vq[0], 0,
    288 			    maxxfersize, maxxfersize / NBPG + 2,
    289 			    "I/O request") != 0) {
    290 		goto err;
    291 	}
    292 	qsize = sc->sc_vq[0].vq_num;
    293 	sc->sc_vq[0].vq_done = ld_virtio_vq_done;
    294 
    295 	ld->sc_dv = self;
    296 	ld->sc_secperunit = virtio_read_device_config_8(vsc,
    297 				VIRTIO_BLK_CONFIG_CAPACITY);
    298 	ld->sc_maxxfer = maxxfersize;
    299 	if (features & VIRTIO_BLK_F_GEOMETRY) {
    300 		ld->sc_ncylinders = virtio_read_device_config_2(vsc,
    301 					VIRTIO_BLK_CONFIG_GEOMETRY_C);
    302 		ld->sc_nheads     = virtio_read_device_config_1(vsc,
    303 					VIRTIO_BLK_CONFIG_GEOMETRY_H);
    304 		ld->sc_nsectors   = virtio_read_device_config_1(vsc,
    305 					VIRTIO_BLK_CONFIG_GEOMETRY_S);
    306 	}
    307 	ld->sc_maxqueuecnt = qsize;
    308 
    309 	if (ld_virtio_alloc_reqs(sc, qsize) < 0)
    310 		goto err;
    311 
    312 	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_BIO);
    313 
    314 	ld->sc_dump = ld_virtio_dump;
    315 	ld->sc_flush = NULL;
    316 	ld->sc_start = ld_virtio_start;
    317 
    318 	ld->sc_flags = LDF_ENABLED;
    319 	ldattach(ld);
    320 
    321 	return;
    322 
    323 err:
    324 	vsc->sc_child = (void*)1;
    325 	return;
    326 }
    327 
    328 static int
    329 ld_virtio_start(struct ld_softc *ld, struct buf *bp)
    330 {
    331 	/* splbio */
    332 	struct ld_virtio_softc *sc = device_private(ld->sc_dv);
    333 	struct virtio_softc *vsc = sc->sc_virtio;
    334 	struct virtqueue *vq = &sc->sc_vq[0];
    335 	struct virtio_blk_req *vr;
    336 	int r;
    337 	int isread = (bp->b_flags & B_READ);
    338 	int slot;
    339 
    340 	if (sc->sc_readonly && !isread)
    341 		return EIO;
    342 
    343 	r = virtio_enqueue_prep(vsc, vq, &slot);
    344 	if (r != 0)
    345 		return r;
    346 	vr = &sc->sc_reqs[slot];
    347 	r = bus_dmamap_load(vsc->sc_dmat, vr->vr_payload,
    348 			    bp->b_data, bp->b_bcount, NULL,
    349 			    ((isread?BUS_DMA_READ:BUS_DMA_WRITE)
    350 			     |BUS_DMA_NOWAIT));
    351 	if (r != 0)
    352 		return r;
    353 
    354 	r = virtio_enqueue_reserve(vsc, vq, slot, vr->vr_payload->dm_nsegs + 2);
    355 	if (r != 0) {
    356 		bus_dmamap_unload(vsc->sc_dmat, vr->vr_payload);
    357 		return r;
    358 	}
    359 
    360 	vr->vr_bp = bp;
    361 	vr->vr_hdr.type = isread?VIRTIO_BLK_T_IN:VIRTIO_BLK_T_OUT;
    362 	vr->vr_hdr.ioprio = 0;
    363 	vr->vr_hdr.sector = bp->b_rawblkno * sc->sc_ld.sc_secsize / 512;
    364 
    365 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts,
    366 			0, sizeof(struct virtio_blk_req_hdr),
    367 			BUS_DMASYNC_PREWRITE);
    368 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_payload,
    369 			0, bp->b_bcount,
    370 			isread?BUS_DMASYNC_PREREAD:BUS_DMASYNC_PREWRITE);
    371 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts,
    372 			offsetof(struct virtio_blk_req, vr_status),
    373 			sizeof(uint8_t),
    374 			BUS_DMASYNC_PREREAD);
    375 
    376 	virtio_enqueue_p(vsc, vq, slot, vr->vr_cmdsts,
    377 			 0, sizeof(struct virtio_blk_req_hdr),
    378 			 true);
    379 	virtio_enqueue(vsc, vq, slot, vr->vr_payload, !isread);
    380 	virtio_enqueue_p(vsc, vq, slot, vr->vr_cmdsts,
    381 			 offsetof(struct virtio_blk_req, vr_status),
    382 			 sizeof(uint8_t),
    383 			 false);
    384 	virtio_enqueue_commit(vsc, vq, slot, true);
    385 
    386 	return 0;
    387 }
    388 
    389 static void
    390 ld_virtio_vq_done1(struct ld_virtio_softc *sc, struct virtio_softc *vsc,
    391 		   struct virtqueue *vq, int slot)
    392 {
    393 	struct virtio_blk_req *vr = &sc->sc_reqs[slot];
    394 	struct buf *bp = vr->vr_bp;
    395 
    396 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts,
    397 			0, sizeof(struct virtio_blk_req_hdr),
    398 			BUS_DMASYNC_POSTWRITE);
    399 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_payload,
    400 			0, bp->b_bcount,
    401 			(bp->b_flags & B_READ)?BUS_DMASYNC_POSTREAD
    402 					      :BUS_DMASYNC_POSTWRITE);
    403 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts,
    404 			sizeof(struct virtio_blk_req_hdr), sizeof(uint8_t),
    405 			BUS_DMASYNC_POSTREAD);
    406 
    407 	if (vr->vr_status != VIRTIO_BLK_S_OK) {
    408 		bp->b_error = EIO;
    409 		bp->b_resid = bp->b_bcount;
    410 	} else {
    411 		bp->b_error = 0;
    412 		bp->b_resid = 0;
    413 	}
    414 
    415 	virtio_dequeue_commit(vsc, vq, slot);
    416 
    417 	lddone(&sc->sc_ld, bp);
    418 }
    419 
    420 static int
    421 ld_virtio_vq_done(struct virtqueue *vq)
    422 {
    423 	struct virtio_softc *vsc = vq->vq_owner;
    424 	struct ld_virtio_softc *sc = device_private(vsc->sc_child);
    425 	int r = 0;
    426 	int slot;
    427 
    428 again:
    429 	if (virtio_dequeue(vsc, vq, &slot, NULL))
    430 		return r;
    431 	r = 1;
    432 
    433 	ld_virtio_vq_done1(sc, vsc, vq, slot);
    434 	goto again;
    435 }
    436 
    437 static int
    438 ld_virtio_dump(struct ld_softc *ld, void *data, int blkno, int blkcnt)
    439 {
    440 	struct ld_virtio_softc *sc = device_private(ld->sc_dv);
    441 	struct virtio_softc *vsc = sc->sc_virtio;
    442 	struct virtqueue *vq = &sc->sc_vq[0];
    443 	struct virtio_blk_req *vr;
    444 	int slot, r;
    445 
    446 	if (sc->sc_readonly)
    447 		return EIO;
    448 
    449 	r = virtio_enqueue_prep(vsc, vq, &slot);
    450 	if (r != 0) {
    451 		if (r == EAGAIN) { /* no free slot; dequeue first */
    452 			delay(100);
    453 			ld_virtio_vq_done(vq);
    454 			r = virtio_enqueue_prep(vsc, vq, &slot);
    455 			if (r != 0)
    456 				return r;
    457 		}
    458 		return r;
    459 	}
    460 	vr = &sc->sc_reqs[slot];
    461 	r = bus_dmamap_load(vsc->sc_dmat, vr->vr_payload,
    462 			    data, blkcnt*ld->sc_secsize, NULL,
    463 			    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
    464 	if (r != 0)
    465 		return r;
    466 
    467 	r = virtio_enqueue_reserve(vsc, vq, slot, vr->vr_payload->dm_nsegs + 2);
    468 	if (r != 0) {
    469 		bus_dmamap_unload(vsc->sc_dmat, vr->vr_payload);
    470 		return r;
    471 	}
    472 
    473 	vr->vr_bp = (void*)0xdeadbeef;
    474 	vr->vr_hdr.type = VIRTIO_BLK_T_OUT;
    475 	vr->vr_hdr.ioprio = 0;
    476 	vr->vr_hdr.sector = (daddr_t) blkno * ld->sc_secsize / 512;
    477 
    478 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts,
    479 			0, sizeof(struct virtio_blk_req_hdr),
    480 			BUS_DMASYNC_PREWRITE);
    481 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_payload,
    482 			0, blkcnt*ld->sc_secsize,
    483 			BUS_DMASYNC_PREWRITE);
    484 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts,
    485 			offsetof(struct virtio_blk_req, vr_status),
    486 			sizeof(uint8_t),
    487 			BUS_DMASYNC_PREREAD);
    488 
    489 	virtio_enqueue_p(vsc, vq, slot, vr->vr_cmdsts,
    490 			 0, sizeof(struct virtio_blk_req_hdr),
    491 			 true);
    492 	virtio_enqueue(vsc, vq, slot, vr->vr_payload, true);
    493 	virtio_enqueue_p(vsc, vq, slot, vr->vr_cmdsts,
    494 			 offsetof(struct virtio_blk_req, vr_status),
    495 			 sizeof(uint8_t),
    496 			 false);
    497 	virtio_enqueue_commit(vsc, vq, slot, true);
    498 
    499 	for ( ; ; ) {
    500 		int dslot;
    501 
    502 		r = virtio_dequeue(vsc, vq, &dslot, NULL);
    503 		if (r != 0)
    504 			continue;
    505 		if (dslot != slot) {
    506 			ld_virtio_vq_done1(sc, vsc, vq, dslot);
    507 			continue;
    508 		} else
    509 			break;
    510 	}
    511 
    512 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts,
    513 			0, sizeof(struct virtio_blk_req_hdr),
    514 			BUS_DMASYNC_POSTWRITE);
    515 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_payload,
    516 			0, blkcnt*ld->sc_secsize,
    517 			BUS_DMASYNC_POSTWRITE);
    518 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts,
    519 			offsetof(struct virtio_blk_req, vr_status),
    520 			sizeof(uint8_t),
    521 			BUS_DMASYNC_POSTREAD);
    522 	if (vr->vr_status == VIRTIO_BLK_S_OK)
    523 		r = 0;
    524 	else
    525 		r = EIO;
    526 	virtio_dequeue_commit(vsc, vq, slot);
    527 
    528 	return r;
    529 }
    530 
    531 static int
    532 ld_virtio_detach(device_t self, int flags)
    533 {
    534 	struct ld_virtio_softc *sc = device_private(self);
    535 	struct ld_softc *ld = &sc->sc_ld;
    536 	bus_dma_tag_t dmat = sc->sc_virtio->sc_dmat;
    537 	int r, i, qsize;
    538 
    539 	qsize = sc->sc_vq[0].vq_num;
    540 	r = ldbegindetach(ld, flags);
    541 	if (r != 0)
    542 		return r;
    543 	virtio_reset(sc->sc_virtio);
    544 	virtio_free_vq(sc->sc_virtio, &sc->sc_vq[0]);
    545 
    546 	for (i = 0; i < qsize; i++) {
    547 		bus_dmamap_destroy(dmat,
    548 				   sc->sc_reqs[i].vr_cmdsts);
    549 		bus_dmamap_destroy(dmat,
    550 				   sc->sc_reqs[i].vr_payload);
    551 	}
    552 	bus_dmamem_unmap(dmat, sc->sc_reqs,
    553 			 sizeof(struct virtio_blk_req) * qsize);
    554 	bus_dmamem_free(dmat, &sc->sc_reqs_segs[0], 1);
    555 
    556 	ldenddetach(ld);
    557 
    558 	return 0;
    559 }
    560