ld_virtio.c revision 1.18 1 /* $NetBSD: ld_virtio.c,v 1.18 2018/06/03 19:47:35 jakllsch Exp $ */
2
3 /*
4 * Copyright (c) 2010 Minoura Makoto.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: ld_virtio.c,v 1.18 2018/06/03 19:47:35 jakllsch Exp $");
30
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/buf.h>
35 #include <sys/bufq.h>
36 #include <sys/bus.h>
37 #include <sys/device.h>
38 #include <sys/disk.h>
39 #include <sys/mutex.h>
40 #include <sys/module.h>
41
42 #include <dev/pci/pcidevs.h>
43 #include <dev/pci/pcireg.h>
44 #include <dev/pci/pcivar.h>
45
46 #include <dev/ldvar.h>
47 #include <dev/pci/virtioreg.h>
48 #include <dev/pci/virtiovar.h>
49
50 #include "ioconf.h"
51
52 /*
53 * ld_virtioreg:
54 */
55 /* Configuration registers */
56 #define VIRTIO_BLK_CONFIG_CAPACITY 0 /* 64bit */
57 #define VIRTIO_BLK_CONFIG_SIZE_MAX 8 /* 32bit */
58 #define VIRTIO_BLK_CONFIG_SEG_MAX 12 /* 32bit */
59 #define VIRTIO_BLK_CONFIG_GEOMETRY_C 16 /* 16bit */
60 #define VIRTIO_BLK_CONFIG_GEOMETRY_H 18 /* 8bit */
61 #define VIRTIO_BLK_CONFIG_GEOMETRY_S 19 /* 8bit */
62 #define VIRTIO_BLK_CONFIG_BLK_SIZE 20 /* 32bit */
63 #define VIRTIO_BLK_CONFIG_WRITEBACK 32 /* 8bit */
64
65 /* Feature bits */
66 #define VIRTIO_BLK_F_BARRIER (1<<0)
67 #define VIRTIO_BLK_F_SIZE_MAX (1<<1)
68 #define VIRTIO_BLK_F_SEG_MAX (1<<2)
69 #define VIRTIO_BLK_F_GEOMETRY (1<<4)
70 #define VIRTIO_BLK_F_RO (1<<5)
71 #define VIRTIO_BLK_F_BLK_SIZE (1<<6)
72 #define VIRTIO_BLK_F_SCSI (1<<7)
73 #define VIRTIO_BLK_F_FLUSH (1<<9)
74 #define VIRTIO_BLK_F_TOPOLOGY (1<<10)
75 #define VIRTIO_BLK_F_CONFIG_WCE (1<<11)
76
77 /*
78 * Each block request uses at least two segments - one for the header
79 * and one for the status.
80 */
81 #define VIRTIO_BLK_MIN_SEGMENTS 2
82
83 #define VIRTIO_BLK_FLAG_BITS \
84 VIRTIO_COMMON_FLAG_BITS \
85 "\x0c""CONFIG_WCE" \
86 "\x0b""TOPOLOGY" \
87 "\x0a""FLUSH" \
88 "\x08""SCSI" \
89 "\x07""BLK_SIZE" \
90 "\x06""RO" \
91 "\x05""GEOMETRY" \
92 "\x03""SEG_MAX" \
93 "\x02""SIZE_MAX" \
94 "\x01""BARRIER"
95
96 /* Command */
97 #define VIRTIO_BLK_T_IN 0
98 #define VIRTIO_BLK_T_OUT 1
99 #define VIRTIO_BLK_T_FLUSH 4
100 #define VIRTIO_BLK_T_BARRIER 0x80000000
101
102 /* Status */
103 #define VIRTIO_BLK_S_OK 0
104 #define VIRTIO_BLK_S_IOERR 1
105 #define VIRTIO_BLK_S_UNSUPP 2
106
107 /* Request header structure */
108 struct virtio_blk_req_hdr {
109 uint32_t type; /* VIRTIO_BLK_T_* */
110 uint32_t ioprio;
111 uint64_t sector;
112 } __packed;
113 /* 512*virtio_blk_req_hdr.sector byte payload and 1 byte status follows */
114
115
116 /*
117 * ld_virtiovar:
118 */
119 struct virtio_blk_req {
120 struct virtio_blk_req_hdr vr_hdr;
121 uint8_t vr_status;
122 struct buf *vr_bp;
123 bus_dmamap_t vr_cmdsts;
124 bus_dmamap_t vr_payload;
125 };
126
127 struct ld_virtio_softc {
128 struct ld_softc sc_ld;
129 device_t sc_dev;
130
131 struct virtio_softc *sc_virtio;
132 struct virtqueue sc_vq;
133
134 struct virtio_blk_req *sc_reqs;
135 bus_dma_segment_t sc_reqs_seg;
136
137 int sc_readonly;
138 };
139
140 static int ld_virtio_match(device_t, cfdata_t, void *);
141 static void ld_virtio_attach(device_t, device_t, void *);
142 static int ld_virtio_detach(device_t, int);
143
144 CFATTACH_DECL_NEW(ld_virtio, sizeof(struct ld_virtio_softc),
145 ld_virtio_match, ld_virtio_attach, ld_virtio_detach, NULL);
146
147 static int
148 ld_virtio_match(device_t parent, cfdata_t match, void *aux)
149 {
150 struct virtio_attach_args *va = aux;
151
152 if (va->sc_childdevid == PCI_PRODUCT_VIRTIO_BLOCK)
153 return 1;
154
155 return 0;
156 }
157
158 static int ld_virtio_vq_done(struct virtqueue *);
159 static int ld_virtio_dump(struct ld_softc *, void *, int, int);
160 static int ld_virtio_start(struct ld_softc *, struct buf *);
161
162 static int
163 ld_virtio_alloc_reqs(struct ld_virtio_softc *sc, int qsize)
164 {
165 int allocsize, r, rsegs, i;
166 struct ld_softc *ld = &sc->sc_ld;
167 void *vaddr;
168
169 allocsize = sizeof(struct virtio_blk_req) * qsize;
170 r = bus_dmamem_alloc(virtio_dmat(sc->sc_virtio), allocsize, 0, 0,
171 &sc->sc_reqs_seg, 1, &rsegs, BUS_DMA_NOWAIT);
172 if (r != 0) {
173 aprint_error_dev(sc->sc_dev,
174 "DMA memory allocation failed, size %d, "
175 "error code %d\n", allocsize, r);
176 goto err_none;
177 }
178 r = bus_dmamem_map(virtio_dmat(sc->sc_virtio),
179 &sc->sc_reqs_seg, 1, allocsize,
180 &vaddr, BUS_DMA_NOWAIT);
181 if (r != 0) {
182 aprint_error_dev(sc->sc_dev,
183 "DMA memory map failed, "
184 "error code %d\n", r);
185 goto err_dmamem_alloc;
186 }
187 sc->sc_reqs = vaddr;
188 memset(vaddr, 0, allocsize);
189 for (i = 0; i < qsize; i++) {
190 struct virtio_blk_req *vr = &sc->sc_reqs[i];
191 r = bus_dmamap_create(virtio_dmat(sc->sc_virtio),
192 offsetof(struct virtio_blk_req, vr_bp),
193 1,
194 offsetof(struct virtio_blk_req, vr_bp),
195 0,
196 BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
197 &vr->vr_cmdsts);
198 if (r != 0) {
199 aprint_error_dev(sc->sc_dev,
200 "command dmamap creation failed, "
201 "error code %d\n", r);
202 goto err_reqs;
203 }
204 r = bus_dmamap_load(virtio_dmat(sc->sc_virtio), vr->vr_cmdsts,
205 &vr->vr_hdr,
206 offsetof(struct virtio_blk_req, vr_bp),
207 NULL, BUS_DMA_NOWAIT);
208 if (r != 0) {
209 aprint_error_dev(sc->sc_dev,
210 "command dmamap load failed, "
211 "error code %d\n", r);
212 goto err_reqs;
213 }
214 r = bus_dmamap_create(virtio_dmat(sc->sc_virtio),
215 ld->sc_maxxfer,
216 (ld->sc_maxxfer / NBPG) +
217 VIRTIO_BLK_MIN_SEGMENTS,
218 ld->sc_maxxfer,
219 0,
220 BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
221 &vr->vr_payload);
222 if (r != 0) {
223 aprint_error_dev(sc->sc_dev,
224 "payload dmamap creation failed, "
225 "error code %d\n", r);
226 goto err_reqs;
227 }
228 }
229 return 0;
230
231 err_reqs:
232 for (i = 0; i < qsize; i++) {
233 struct virtio_blk_req *vr = &sc->sc_reqs[i];
234 if (vr->vr_cmdsts) {
235 bus_dmamap_destroy(virtio_dmat(sc->sc_virtio),
236 vr->vr_cmdsts);
237 vr->vr_cmdsts = 0;
238 }
239 if (vr->vr_payload) {
240 bus_dmamap_destroy(virtio_dmat(sc->sc_virtio),
241 vr->vr_payload);
242 vr->vr_payload = 0;
243 }
244 }
245 bus_dmamem_unmap(virtio_dmat(sc->sc_virtio), sc->sc_reqs, allocsize);
246 err_dmamem_alloc:
247 bus_dmamem_free(virtio_dmat(sc->sc_virtio), &sc->sc_reqs_seg, 1);
248 err_none:
249 return -1;
250 }
251
252 static void
253 ld_virtio_attach(device_t parent, device_t self, void *aux)
254 {
255 struct ld_virtio_softc *sc = device_private(self);
256 struct ld_softc *ld = &sc->sc_ld;
257 struct virtio_softc *vsc = device_private(parent);
258 uint32_t features;
259 int qsize, maxxfersize, maxnsegs;
260
261 if (virtio_child(vsc) != NULL) {
262 aprint_normal(": child already attached for %s; "
263 "something wrong...\n", device_xname(parent));
264 return;
265 }
266
267 sc->sc_dev = self;
268 sc->sc_virtio = vsc;
269
270 virtio_child_attach_start(vsc, self, IPL_BIO, &sc->sc_vq,
271 NULL, virtio_vq_intr, 0,
272 (VIRTIO_BLK_F_SIZE_MAX | VIRTIO_BLK_F_SEG_MAX |
273 VIRTIO_BLK_F_GEOMETRY | VIRTIO_BLK_F_RO | VIRTIO_BLK_F_BLK_SIZE),
274 VIRTIO_BLK_FLAG_BITS);
275
276 features = virtio_features(vsc);
277
278 if (features & VIRTIO_BLK_F_RO)
279 sc->sc_readonly = 1;
280 else
281 sc->sc_readonly = 0;
282
283 if (features & VIRTIO_BLK_F_BLK_SIZE) {
284 ld->sc_secsize = virtio_read_device_config_4(vsc,
285 VIRTIO_BLK_CONFIG_BLK_SIZE);
286 } else
287 ld->sc_secsize = 512;
288
289 /* At least genfs_io assumes maxxfer == MAXPHYS. */
290 if (features & VIRTIO_BLK_F_SIZE_MAX) {
291 maxxfersize = virtio_read_device_config_4(vsc,
292 VIRTIO_BLK_CONFIG_SIZE_MAX);
293 if (maxxfersize < MAXPHYS) {
294 aprint_error_dev(sc->sc_dev,
295 "Too small SIZE_MAX %dK minimum is %dK\n",
296 maxxfersize / 1024, MAXPHYS / 1024);
297 // goto err;
298 maxxfersize = MAXPHYS;
299 } else if (maxxfersize > MAXPHYS) {
300 aprint_normal_dev(sc->sc_dev,
301 "Clip SEG_MAX from %dK to %dK\n",
302 maxxfersize / 1024,
303 MAXPHYS / 1024);
304 maxxfersize = MAXPHYS;
305 }
306 } else
307 maxxfersize = MAXPHYS;
308
309 if (features & VIRTIO_BLK_F_SEG_MAX) {
310 maxnsegs = virtio_read_device_config_4(vsc,
311 VIRTIO_BLK_CONFIG_SEG_MAX);
312 if (maxnsegs < VIRTIO_BLK_MIN_SEGMENTS) {
313 aprint_error_dev(sc->sc_dev,
314 "Too small SEG_MAX %d minimum is %d\n",
315 maxnsegs, VIRTIO_BLK_MIN_SEGMENTS);
316 maxnsegs = maxxfersize / NBPG;
317 // goto err;
318 }
319 } else
320 maxnsegs = maxxfersize / NBPG;
321
322 /* 2 for the minimum size */
323 maxnsegs += VIRTIO_BLK_MIN_SEGMENTS;
324
325 if (virtio_alloc_vq(vsc, &sc->sc_vq, 0, maxxfersize, maxnsegs,
326 "I/O request") != 0) {
327 goto err;
328 }
329 qsize = sc->sc_vq.vq_num;
330 sc->sc_vq.vq_done = ld_virtio_vq_done;
331
332 if (virtio_child_attach_finish(vsc) != 0)
333 goto err;
334
335 ld->sc_dv = self;
336 ld->sc_secperunit = virtio_read_device_config_8(vsc,
337 VIRTIO_BLK_CONFIG_CAPACITY);
338 ld->sc_maxxfer = maxxfersize;
339 if (features & VIRTIO_BLK_F_GEOMETRY) {
340 ld->sc_ncylinders = virtio_read_device_config_2(vsc,
341 VIRTIO_BLK_CONFIG_GEOMETRY_C);
342 ld->sc_nheads = virtio_read_device_config_1(vsc,
343 VIRTIO_BLK_CONFIG_GEOMETRY_H);
344 ld->sc_nsectors = virtio_read_device_config_1(vsc,
345 VIRTIO_BLK_CONFIG_GEOMETRY_S);
346 }
347 ld->sc_maxqueuecnt = qsize;
348
349 if (ld_virtio_alloc_reqs(sc, qsize) < 0)
350 goto err;
351
352 ld->sc_dump = ld_virtio_dump;
353 ld->sc_start = ld_virtio_start;
354
355 ld->sc_flags = LDF_ENABLED | LDF_MPSAFE;
356 ldattach(ld, BUFQ_DISK_DEFAULT_STRAT);
357
358 return;
359
360 err:
361 virtio_child_attach_failed(vsc);
362 return;
363 }
364
365 static int
366 ld_virtio_start(struct ld_softc *ld, struct buf *bp)
367 {
368 /* splbio */
369 struct ld_virtio_softc *sc = device_private(ld->sc_dv);
370 struct virtio_softc *vsc = sc->sc_virtio;
371 struct virtqueue *vq = &sc->sc_vq;
372 struct virtio_blk_req *vr;
373 int r;
374 int isread = (bp->b_flags & B_READ);
375 int slot;
376
377 if (sc->sc_readonly && !isread)
378 return EIO;
379
380 r = virtio_enqueue_prep(vsc, vq, &slot);
381 if (r != 0)
382 return r;
383
384 vr = &sc->sc_reqs[slot];
385 KASSERT(vr->vr_bp == NULL);
386
387 r = bus_dmamap_load(virtio_dmat(vsc), vr->vr_payload,
388 bp->b_data, bp->b_bcount, NULL,
389 ((isread?BUS_DMA_READ:BUS_DMA_WRITE)
390 |BUS_DMA_NOWAIT));
391 if (r != 0) {
392 aprint_error_dev(sc->sc_dev,
393 "payload dmamap failed, error code %d\n", r);
394 virtio_enqueue_abort(vsc, vq, slot);
395 return r;
396 }
397
398 r = virtio_enqueue_reserve(vsc, vq, slot, vr->vr_payload->dm_nsegs +
399 VIRTIO_BLK_MIN_SEGMENTS);
400 if (r != 0) {
401 bus_dmamap_unload(virtio_dmat(vsc), vr->vr_payload);
402 return r;
403 }
404
405 vr->vr_bp = bp;
406 vr->vr_hdr.type = isread?VIRTIO_BLK_T_IN:VIRTIO_BLK_T_OUT;
407 vr->vr_hdr.ioprio = 0;
408 vr->vr_hdr.sector = bp->b_rawblkno * sc->sc_ld.sc_secsize / 512;
409
410 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts,
411 0, sizeof(struct virtio_blk_req_hdr),
412 BUS_DMASYNC_PREWRITE);
413 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_payload,
414 0, bp->b_bcount,
415 isread?BUS_DMASYNC_PREREAD:BUS_DMASYNC_PREWRITE);
416 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts,
417 offsetof(struct virtio_blk_req, vr_status),
418 sizeof(uint8_t),
419 BUS_DMASYNC_PREREAD);
420
421 virtio_enqueue_p(vsc, vq, slot, vr->vr_cmdsts,
422 0, sizeof(struct virtio_blk_req_hdr),
423 true);
424 virtio_enqueue(vsc, vq, slot, vr->vr_payload, !isread);
425 virtio_enqueue_p(vsc, vq, slot, vr->vr_cmdsts,
426 offsetof(struct virtio_blk_req, vr_status),
427 sizeof(uint8_t),
428 false);
429 virtio_enqueue_commit(vsc, vq, slot, true);
430
431 return 0;
432 }
433
434 static void
435 ld_virtio_vq_done1(struct ld_virtio_softc *sc, struct virtio_softc *vsc,
436 struct virtqueue *vq, int slot)
437 {
438 struct virtio_blk_req *vr = &sc->sc_reqs[slot];
439 struct buf *bp = vr->vr_bp;
440
441 vr->vr_bp = NULL;
442
443 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts,
444 0, sizeof(struct virtio_blk_req_hdr),
445 BUS_DMASYNC_POSTWRITE);
446 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_payload,
447 0, bp->b_bcount,
448 (bp->b_flags & B_READ)?BUS_DMASYNC_POSTREAD
449 :BUS_DMASYNC_POSTWRITE);
450 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts,
451 sizeof(struct virtio_blk_req_hdr), sizeof(uint8_t),
452 BUS_DMASYNC_POSTREAD);
453
454 if (vr->vr_status != VIRTIO_BLK_S_OK) {
455 bp->b_error = EIO;
456 bp->b_resid = bp->b_bcount;
457 } else {
458 bp->b_error = 0;
459 bp->b_resid = 0;
460 }
461
462 virtio_dequeue_commit(vsc, vq, slot);
463
464 lddone(&sc->sc_ld, bp);
465 }
466
467 static int
468 ld_virtio_vq_done(struct virtqueue *vq)
469 {
470 struct virtio_softc *vsc = vq->vq_owner;
471 struct ld_virtio_softc *sc = device_private(virtio_child(vsc));
472 int r = 0;
473 int slot;
474
475 again:
476 if (virtio_dequeue(vsc, vq, &slot, NULL))
477 return r;
478 r = 1;
479
480 ld_virtio_vq_done1(sc, vsc, vq, slot);
481 goto again;
482 }
483
484 static int
485 ld_virtio_dump(struct ld_softc *ld, void *data, int blkno, int blkcnt)
486 {
487 struct ld_virtio_softc *sc = device_private(ld->sc_dv);
488 struct virtio_softc *vsc = sc->sc_virtio;
489 struct virtqueue *vq = &sc->sc_vq;
490 struct virtio_blk_req *vr;
491 int slot, r;
492
493 if (sc->sc_readonly)
494 return EIO;
495
496 r = virtio_enqueue_prep(vsc, vq, &slot);
497 if (r != 0) {
498 if (r == EAGAIN) { /* no free slot; dequeue first */
499 delay(100);
500 ld_virtio_vq_done(vq);
501 r = virtio_enqueue_prep(vsc, vq, &slot);
502 if (r != 0)
503 return r;
504 }
505 return r;
506 }
507 vr = &sc->sc_reqs[slot];
508 r = bus_dmamap_load(virtio_dmat(vsc), vr->vr_payload,
509 data, blkcnt*ld->sc_secsize, NULL,
510 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
511 if (r != 0)
512 return r;
513
514 r = virtio_enqueue_reserve(vsc, vq, slot, vr->vr_payload->dm_nsegs +
515 VIRTIO_BLK_MIN_SEGMENTS);
516 if (r != 0) {
517 bus_dmamap_unload(virtio_dmat(vsc), vr->vr_payload);
518 return r;
519 }
520
521 vr->vr_bp = (void*)0xdeadbeef;
522 vr->vr_hdr.type = VIRTIO_BLK_T_OUT;
523 vr->vr_hdr.ioprio = 0;
524 vr->vr_hdr.sector = (daddr_t) blkno * ld->sc_secsize / 512;
525
526 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts,
527 0, sizeof(struct virtio_blk_req_hdr),
528 BUS_DMASYNC_PREWRITE);
529 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_payload,
530 0, blkcnt*ld->sc_secsize,
531 BUS_DMASYNC_PREWRITE);
532 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts,
533 offsetof(struct virtio_blk_req, vr_status),
534 sizeof(uint8_t),
535 BUS_DMASYNC_PREREAD);
536
537 virtio_enqueue_p(vsc, vq, slot, vr->vr_cmdsts,
538 0, sizeof(struct virtio_blk_req_hdr),
539 true);
540 virtio_enqueue(vsc, vq, slot, vr->vr_payload, true);
541 virtio_enqueue_p(vsc, vq, slot, vr->vr_cmdsts,
542 offsetof(struct virtio_blk_req, vr_status),
543 sizeof(uint8_t),
544 false);
545 virtio_enqueue_commit(vsc, vq, slot, true);
546
547 for ( ; ; ) {
548 int dslot;
549
550 r = virtio_dequeue(vsc, vq, &dslot, NULL);
551 if (r != 0)
552 continue;
553 if (dslot != slot) {
554 ld_virtio_vq_done1(sc, vsc, vq, dslot);
555 continue;
556 } else
557 break;
558 }
559
560 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts,
561 0, sizeof(struct virtio_blk_req_hdr),
562 BUS_DMASYNC_POSTWRITE);
563 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_payload,
564 0, blkcnt*ld->sc_secsize,
565 BUS_DMASYNC_POSTWRITE);
566 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts,
567 offsetof(struct virtio_blk_req, vr_status),
568 sizeof(uint8_t),
569 BUS_DMASYNC_POSTREAD);
570 if (vr->vr_status == VIRTIO_BLK_S_OK)
571 r = 0;
572 else
573 r = EIO;
574 virtio_dequeue_commit(vsc, vq, slot);
575
576 return r;
577 }
578
579 static int
580 ld_virtio_detach(device_t self, int flags)
581 {
582 struct ld_virtio_softc *sc = device_private(self);
583 struct ld_softc *ld = &sc->sc_ld;
584 bus_dma_tag_t dmat = virtio_dmat(sc->sc_virtio);
585 int r, i, qsize;
586
587 qsize = sc->sc_vq.vq_num;
588 r = ldbegindetach(ld, flags);
589 if (r != 0)
590 return r;
591 virtio_reset(sc->sc_virtio);
592 virtio_free_vq(sc->sc_virtio, &sc->sc_vq);
593
594 for (i = 0; i < qsize; i++) {
595 bus_dmamap_destroy(dmat,
596 sc->sc_reqs[i].vr_cmdsts);
597 bus_dmamap_destroy(dmat,
598 sc->sc_reqs[i].vr_payload);
599 }
600 bus_dmamem_unmap(dmat, sc->sc_reqs,
601 sizeof(struct virtio_blk_req) * qsize);
602 bus_dmamem_free(dmat, &sc->sc_reqs_seg, 1);
603
604 ldenddetach(ld);
605
606 virtio_child_detach(sc->sc_virtio);
607
608 return 0;
609 }
610
611 MODULE(MODULE_CLASS_DRIVER, ld_virtio, "ld,virtio");
612
613 #ifdef _MODULE
614 /*
615 * XXX Don't allow ioconf.c to redefine the "struct cfdriver ld_cd"
616 * XXX it will be defined in the common-code module
617 */
618 #undef CFDRIVER_DECL
619 #define CFDRIVER_DECL(name, class, attr)
620 #include "ioconf.c"
621 #endif
622
623 static int
624 ld_virtio_modcmd(modcmd_t cmd, void *opaque)
625 {
626 #ifdef _MODULE
627 /*
628 * We ignore the cfdriver_vec[] that ioconf provides, since
629 * the cfdrivers are attached already.
630 */
631 static struct cfdriver * const no_cfdriver_vec[] = { NULL };
632 #endif
633 int error = 0;
634
635 #ifdef _MODULE
636 switch (cmd) {
637 case MODULE_CMD_INIT:
638 error = config_init_component(no_cfdriver_vec,
639 cfattach_ioconf_ld_virtio, cfdata_ioconf_ld_virtio);
640 break;
641 case MODULE_CMD_FINI:
642 error = config_fini_component(no_cfdriver_vec,
643 cfattach_ioconf_ld_virtio, cfdata_ioconf_ld_virtio);
644 break;
645 default:
646 error = ENOTTY;
647 break;
648 }
649 #endif
650
651 return error;
652 }
653