ld_virtio.c revision 1.43 1 /* $NetBSD: ld_virtio.c,v 1.43 2025/04/13 02:34:03 rin Exp $ */
2
3 /*
4 * Copyright (c) 2010 Minoura Makoto.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: ld_virtio.c,v 1.43 2025/04/13 02:34:03 rin Exp $");
30
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/buf.h>
35 #include <sys/bufq.h>
36 #include <sys/bus.h>
37 #include <sys/device.h>
38 #include <sys/disk.h>
39 #include <sys/mutex.h>
40 #include <sys/module.h>
41 #include <sys/kmem.h>
42
43 #include <dev/ldvar.h>
44 #include <dev/pci/virtioreg.h>
45 #include <dev/pci/virtiovar.h>
46
47 #include "ioconf.h"
48
49 /*
50 * ld_virtioreg:
51 */
52 /* Configuration registers */
53 #define VIRTIO_BLK_CONFIG_CAPACITY 0 /* 64bit */
54 #define VIRTIO_BLK_CONFIG_SIZE_MAX 8 /* 32bit */
55 #define VIRTIO_BLK_CONFIG_SEG_MAX 12 /* 32bit */
56 #define VIRTIO_BLK_CONFIG_GEOMETRY_C 16 /* 16bit */
57 #define VIRTIO_BLK_CONFIG_GEOMETRY_H 18 /* 8bit */
58 #define VIRTIO_BLK_CONFIG_GEOMETRY_S 19 /* 8bit */
59 #define VIRTIO_BLK_CONFIG_BLK_SIZE 20 /* 32bit */
60 #define VIRTIO_BLK_CONFIG_PHYSICAL_BLOCK_EXP 24 /* 8bit */
61 #define VIRTIO_BLK_CONFIG_ALIGNMENT_OFFSET 25 /* 8bit */
62 #define VIRTIO_BLK_CONFIG_MIN_IO_SIZE 26 /* 16bit */
63 #define VIRTIO_BLK_CONFIG_OPT_IO_SIZE 28 /* 32bit */
64 #define VIRTIO_BLK_CONFIG_WRITEBACK 32 /* 8bit */
65 #define VIRTIO_BLK_CONFIG_NUM_QUEUES 34 /* 16bit */
66 #define VIRTIO_BLK_CONFIG_MAX_DISCARD_SECTORS 36 /* 32bit */
67 #define VIRTIO_BLK_CONFIG_MAX_DISCARD_SEG 40 /* 32bit */
68 #define VIRTIO_BLK_CONFIG_DISCARD_SECTOR_ALIGNMENT 44 /* 32bit */
69
70 /* Feature bits */
71 #define VIRTIO_BLK_F_BARRIER (1<<0)
72 #define VIRTIO_BLK_F_SIZE_MAX (1<<1)
73 #define VIRTIO_BLK_F_SEG_MAX (1<<2)
74 #define VIRTIO_BLK_F_GEOMETRY (1<<4)
75 #define VIRTIO_BLK_F_RO (1<<5)
76 #define VIRTIO_BLK_F_BLK_SIZE (1<<6)
77 #define VIRTIO_BLK_F_SCSI (1<<7)
78 #define VIRTIO_BLK_F_FLUSH (1<<9)
79 #define VIRTIO_BLK_F_TOPOLOGY (1<<10)
80 #define VIRTIO_BLK_F_CONFIG_WCE (1<<11)
81 #define VIRTIO_BLK_F_MQ (1<<12)
82 #define VIRTIO_BLK_F_DISCARD (1<<13)
83 #define VIRTIO_BLK_F_WRITE_ZEROES (1<<14)
84 #define VIRTIO_BLK_F_LIFETIME (1<<15)
85 #define VIRTIO_BLK_F_SECURE_ERASE (1<<16)
86
87 /*
88 * Each block request uses at least two segments - one for the header
89 * and one for the status.
90 */
91 #define VIRTIO_BLK_CTRL_SEGMENTS 2
92
93 #define VIRTIO_BLK_FLAG_BITS \
94 VIRTIO_COMMON_FLAG_BITS \
95 "b\x10" "SECURE_ERASE\0" \
96 "b\x0f" "LIFETIME\0" \
97 "b\x0e" "WRITE_ZEROES\0" \
98 "b\x0d" "DISCARD\0" \
99 "b\x0c" "MQ\0" \
100 "b\x0b" "CONFIG_WCE\0" \
101 "b\x0a" "TOPOLOGY\0" \
102 "b\x09" "FLUSH\0" \
103 "b\x07" "SCSI\0" \
104 "b\x06" "BLK_SIZE\0" \
105 "b\x05" "RO\0" \
106 "b\x04" "GEOMETRY\0" \
107 "b\x02" "SEG_MAX\0" \
108 "b\x01" "SIZE_MAX\0" \
109 "b\x00" "BARRIER\0"
110
111 /* Command */
112 #define VIRTIO_BLK_T_IN 0
113 #define VIRTIO_BLK_T_OUT 1
114 #define VIRTIO_BLK_T_FLUSH 4
115 #define VIRTIO_BLK_T_GET_ID 8
116 #define VIRTIO_BLK_T_GET_LIFETIME 10
117 #define VIRTIO_BLK_T_DISCARD 11
118 #define VIRTIO_BLK_T_WRITE_ZEROES 13
119 #define VIRTIO_BLK_T_SECURE_ERASE 14
120 #define VIRTIO_BLK_T_BARRIER 0x80000000
121
122 /* Sector */
123 #define VIRTIO_BLK_BSIZE 512
124
125 /* Status */
126 #define VIRTIO_BLK_S_OK 0
127 #define VIRTIO_BLK_S_IOERR 1
128 #define VIRTIO_BLK_S_UNSUPP 2
129
130 /* Request header structure */
131 struct virtio_blk_req_hdr {
132 uint32_t type; /* VIRTIO_BLK_T_* */
133 uint32_t ioprio;
134 uint64_t sector;
135 } __packed;
136 /* payload and 1 byte status follows */
137
138 struct virtio_blk_discard_write_zeroes {
139 uint64_t sector;
140 uint32_t num_sectors;
141 union {
142 uint32_t flags;
143 struct {
144 uint32_t unmap:1;
145 uint32_t reserved:31;
146 };
147 };
148 } __packed;
149
150 /*
151 * ld_virtiovar:
152 */
153 struct virtio_blk_req {
154 struct virtio_blk_req_hdr vr_hdr;
155 uint8_t vr_status;
156 struct buf *vr_bp;
157 #define DUMMY_VR_BP ((void *)1)
158 bus_dmamap_t vr_cmdsts;
159 bus_dmamap_t vr_payload;
160 void * vr_datap;
161 size_t vr_datas;
162 };
163
164 struct ld_virtio_softc {
165 struct ld_softc sc_ld;
166 device_t sc_dev;
167
168 uint32_t sc_seg_max; /* max number of segs in xfer */
169 uint32_t sc_size_max; /* max size of single seg */
170
171 struct virtio_softc *sc_virtio;
172 struct virtqueue sc_vq;
173
174 struct virtio_blk_req *sc_reqs;
175 bus_dma_segment_t sc_reqs_seg;
176
177 int sc_readonly;
178
179 enum {
180 SYNC_FREE, SYNC_BUSY, SYNC_DONE
181 } sc_sync_use;
182 kcondvar_t sc_sync_wait;
183 kmutex_t sc_sync_wait_lock;
184 uint8_t sc_sync_status;
185 uint8_t *sc_typename;
186
187 uint32_t sc_max_discard_sectors;
188 uint32_t sc_max_discard_seg;
189 #if 0
190 uint32_t sc_discard_sector_alignment;
191 #endif
192 };
193
194 static int ld_virtio_match(device_t, cfdata_t, void *);
195 static void ld_virtio_attach(device_t, device_t, void *);
196 static int ld_virtio_detach(device_t, int);
197
198 CFATTACH_DECL_NEW(ld_virtio, sizeof(struct ld_virtio_softc),
199 ld_virtio_match, ld_virtio_attach, ld_virtio_detach, NULL);
200
201 static int
202 ld_virtio_match(device_t parent, cfdata_t match, void *aux)
203 {
204 struct virtio_attach_args *va = aux;
205
206 if (va->sc_childdevid == VIRTIO_DEVICE_ID_BLOCK)
207 return 1;
208
209 return 0;
210 }
211
212 static int ld_virtio_vq_done(struct virtqueue *);
213 static int ld_virtio_dump(struct ld_softc *, void *, daddr_t, int);
214 static int ld_virtio_start(struct ld_softc *, struct buf *);
215 static int ld_virtio_ioctl(struct ld_softc *, u_long, void *, int32_t, bool);
216 static int ld_virtio_info(struct ld_softc *, bool);
217 static int ld_virtio_discard(struct ld_softc *, struct buf *);
218
219 static int
220 ld_virtio_alloc_reqs(struct ld_virtio_softc *sc, int qsize)
221 {
222 int allocsize, r, rsegs, i;
223 struct ld_softc *ld = &sc->sc_ld;
224 void *vaddr;
225
226 allocsize = sizeof(struct virtio_blk_req) * qsize;
227 r = bus_dmamem_alloc(virtio_dmat(sc->sc_virtio), allocsize, 0, 0,
228 &sc->sc_reqs_seg, 1, &rsegs, BUS_DMA_WAITOK);
229 if (r != 0) {
230 aprint_error_dev(sc->sc_dev,
231 "DMA memory allocation failed, size %d, "
232 "error code %d\n", allocsize, r);
233 goto err_none;
234 }
235 r = bus_dmamem_map(virtio_dmat(sc->sc_virtio),
236 &sc->sc_reqs_seg, 1, allocsize,
237 &vaddr, BUS_DMA_WAITOK);
238 if (r != 0) {
239 aprint_error_dev(sc->sc_dev,
240 "DMA memory map failed, "
241 "error code %d\n", r);
242 goto err_dmamem_alloc;
243 }
244 sc->sc_reqs = vaddr;
245 memset(vaddr, 0, allocsize);
246 for (i = 0; i < qsize; i++) {
247 struct virtio_blk_req *vr = &sc->sc_reqs[i];
248 r = bus_dmamap_create(virtio_dmat(sc->sc_virtio),
249 offsetof(struct virtio_blk_req, vr_bp),
250 1,
251 offsetof(struct virtio_blk_req, vr_bp),
252 0,
253 BUS_DMA_WAITOK|BUS_DMA_ALLOCNOW,
254 &vr->vr_cmdsts);
255 if (r != 0) {
256 aprint_error_dev(sc->sc_dev,
257 "command dmamap creation failed, "
258 "error code %d\n", r);
259 goto err_reqs;
260 }
261 r = bus_dmamap_load(virtio_dmat(sc->sc_virtio), vr->vr_cmdsts,
262 &vr->vr_hdr,
263 offsetof(struct virtio_blk_req, vr_bp),
264 NULL, BUS_DMA_WAITOK);
265 if (r != 0) {
266 aprint_error_dev(sc->sc_dev,
267 "command dmamap load failed, "
268 "error code %d\n", r);
269 goto err_reqs;
270 }
271 r = bus_dmamap_create(virtio_dmat(sc->sc_virtio),
272 /*size*/ld->sc_maxxfer,
273 /*nseg*/sc->sc_seg_max,
274 /*maxsegsz*/sc->sc_size_max,
275 /*boundary*/0,
276 BUS_DMA_WAITOK|BUS_DMA_ALLOCNOW,
277 &vr->vr_payload);
278 if (r != 0) {
279 aprint_error_dev(sc->sc_dev,
280 "payload dmamap creation failed, "
281 "error code %d\n", r);
282 goto err_reqs;
283 }
284 vr->vr_datap = NULL;
285 vr->vr_datas = 0;
286 }
287 return 0;
288
289 err_reqs:
290 for (i = 0; i < qsize; i++) {
291 struct virtio_blk_req *vr = &sc->sc_reqs[i];
292 if (vr->vr_cmdsts) {
293 bus_dmamap_destroy(virtio_dmat(sc->sc_virtio),
294 vr->vr_cmdsts);
295 vr->vr_cmdsts = 0;
296 }
297 if (vr->vr_payload) {
298 bus_dmamap_destroy(virtio_dmat(sc->sc_virtio),
299 vr->vr_payload);
300 vr->vr_payload = 0;
301 }
302 }
303 bus_dmamem_unmap(virtio_dmat(sc->sc_virtio), sc->sc_reqs, allocsize);
304 err_dmamem_alloc:
305 bus_dmamem_free(virtio_dmat(sc->sc_virtio), &sc->sc_reqs_seg, 1);
306 err_none:
307 return -1;
308 }
309
310 static void
311 ld_virtio_attach(device_t parent, device_t self, void *aux)
312 {
313 struct ld_virtio_softc *sc = device_private(self);
314 struct ld_softc *ld = &sc->sc_ld;
315 struct virtio_softc *vsc = device_private(parent);
316 uint64_t features;
317 int qsize;
318
319 if (virtio_child(vsc) != NULL) {
320 aprint_normal(": child already attached for %s; "
321 "something wrong...\n", device_xname(parent));
322 return;
323 }
324
325 sc->sc_dev = self;
326 sc->sc_virtio = vsc;
327
328 virtio_child_attach_start(vsc, self, IPL_BIO,
329 (VIRTIO_BLK_F_SIZE_MAX | VIRTIO_BLK_F_SEG_MAX |
330 VIRTIO_BLK_F_GEOMETRY | VIRTIO_BLK_F_RO | VIRTIO_BLK_F_BLK_SIZE |
331 VIRTIO_BLK_F_FLUSH | VIRTIO_BLK_F_TOPOLOGY |
332 VIRTIO_BLK_F_CONFIG_WCE | VIRTIO_BLK_F_DISCARD),
333 VIRTIO_BLK_FLAG_BITS);
334
335 features = virtio_features(vsc);
336 if (features == 0)
337 goto err;
338
339 if (features & VIRTIO_BLK_F_RO)
340 sc->sc_readonly = 1;
341 else
342 sc->sc_readonly = 0;
343
344 if (features & VIRTIO_BLK_F_BLK_SIZE) {
345 ld->sc_secsize = virtio_read_device_config_4(vsc,
346 VIRTIO_BLK_CONFIG_BLK_SIZE);
347 } else
348 ld->sc_secsize = VIRTIO_BLK_BSIZE;
349
350 if (features & VIRTIO_BLK_F_SEG_MAX) {
351 sc->sc_seg_max = virtio_read_device_config_4(vsc,
352 VIRTIO_BLK_CONFIG_SEG_MAX);
353 if (sc->sc_seg_max == 0) {
354 aprint_error_dev(sc->sc_dev,
355 "Invalid SEG_MAX %d\n", sc->sc_seg_max);
356 goto err;
357 }
358 } else {
359 sc->sc_seg_max = 1;
360 aprint_verbose_dev(sc->sc_dev,
361 "Unknown SEG_MAX, assuming %"PRIu32"\n", sc->sc_seg_max);
362 }
363
364 /* At least genfs_io assumes size_max*seg_max >= MAXPHYS. */
365 if (features & VIRTIO_BLK_F_SIZE_MAX) {
366 sc->sc_size_max = virtio_read_device_config_4(vsc,
367 VIRTIO_BLK_CONFIG_SIZE_MAX);
368 if (sc->sc_size_max < MAXPHYS/sc->sc_seg_max) {
369 aprint_error_dev(sc->sc_dev,
370 "Too small SIZE_MAX %d minimum is %d\n",
371 sc->sc_size_max, MAXPHYS/sc->sc_seg_max);
372 // goto err;
373 sc->sc_size_max = MAXPHYS/sc->sc_seg_max;
374 } else if (sc->sc_size_max > MAXPHYS) {
375 aprint_verbose_dev(sc->sc_dev,
376 "Clip SIZE_MAX from %d to %d\n",
377 sc->sc_size_max, MAXPHYS);
378 sc->sc_size_max = MAXPHYS;
379 }
380 } else {
381 sc->sc_size_max = MAXPHYS;
382 aprint_verbose_dev(sc->sc_dev,
383 "Unknown SIZE_MAX, assuming %"PRIu32"\n",
384 sc->sc_size_max);
385 }
386
387 aprint_normal_dev(sc->sc_dev, "max %"PRIu32" segs"
388 " of max %"PRIu32" bytes\n",
389 sc->sc_seg_max, sc->sc_size_max);
390
391 virtio_init_vq_vqdone(vsc, &sc->sc_vq, 0,
392 ld_virtio_vq_done);
393
394 if (virtio_alloc_vq(vsc, &sc->sc_vq, sc->sc_size_max,
395 sc->sc_seg_max + VIRTIO_BLK_CTRL_SEGMENTS, "I/O request") != 0)
396 goto err;
397 qsize = sc->sc_vq.vq_num;
398
399 if (virtio_child_attach_finish(vsc, &sc->sc_vq, 1,
400 NULL, VIRTIO_F_INTR_MSIX) != 0)
401 goto err;
402
403 ld->sc_dv = self;
404 ld->sc_secperunit = virtio_read_device_config_8(vsc,
405 VIRTIO_BLK_CONFIG_CAPACITY) / (ld->sc_secsize / VIRTIO_BLK_BSIZE);
406
407 /*
408 * Clamp ld->sc_maxxfer to MAXPHYS before ld_virtio_alloc_reqs
409 * allocates DMA maps of at most ld->sc_maxxfer bytes.
410 * ldattach will also clamp to MAXPHYS, but not until after
411 * ld_virtio_alloc_reqs is done, so that doesn't help.
412 */
413 ld->sc_maxxfer = MIN(MAXPHYS, sc->sc_size_max * sc->sc_seg_max);
414
415 if (features & VIRTIO_BLK_F_GEOMETRY) {
416 ld->sc_ncylinders = virtio_read_device_config_2(vsc,
417 VIRTIO_BLK_CONFIG_GEOMETRY_C);
418 ld->sc_nheads = virtio_read_device_config_1(vsc,
419 VIRTIO_BLK_CONFIG_GEOMETRY_H);
420 ld->sc_nsectors = virtio_read_device_config_1(vsc,
421 VIRTIO_BLK_CONFIG_GEOMETRY_S);
422 }
423 if (features & VIRTIO_BLK_F_TOPOLOGY) {
424 ld->sc_alignedsec = virtio_read_device_config_1(vsc,
425 VIRTIO_BLK_CONFIG_ALIGNMENT_OFFSET);
426 ld->sc_physsecsize = ld->sc_secsize <<
427 virtio_read_device_config_1(vsc,
428 VIRTIO_BLK_CONFIG_PHYSICAL_BLOCK_EXP);
429 }
430 ld->sc_maxqueuecnt = qsize - 1; /* reserve slot for dumps, flushes */
431
432 if (ld_virtio_alloc_reqs(sc, qsize) < 0)
433 goto err;
434
435 cv_init(&sc->sc_sync_wait, "vblksync");
436 mutex_init(&sc->sc_sync_wait_lock, MUTEX_DEFAULT, IPL_BIO);
437 sc->sc_sync_use = SYNC_FREE;
438
439 ld->sc_dump = ld_virtio_dump;
440 ld->sc_start = ld_virtio_start;
441 ld->sc_ioctl = ld_virtio_ioctl;
442
443 if (ld_virtio_info(ld, true) == 0)
444 ld->sc_typename = sc->sc_typename;
445 else
446 ld->sc_typename = __UNCONST("Virtio Block Device");
447
448 if (features & VIRTIO_BLK_F_DISCARD) {
449 ld->sc_discard = ld_virtio_discard;
450 sc->sc_max_discard_sectors = virtio_read_device_config_4(vsc,
451 VIRTIO_BLK_CONFIG_MAX_DISCARD_SECTORS);
452 sc->sc_max_discard_seg = virtio_read_device_config_4(vsc,
453 VIRTIO_BLK_CONFIG_MAX_DISCARD_SEG);
454 #if 0
455 sc->sc_discard_sector_alignment =
456 virtio_read_device_config_4(vsc,
457 VIRTIO_BLK_CONFIG_DISCARD_SECTOR_ALIGNMENT);
458 #endif
459 }
460
461 ld->sc_flags = LDF_ENABLED | LDF_MPSAFE;
462 ldattach(ld, BUFQ_DISK_DEFAULT_STRAT);
463
464 return;
465
466 err:
467 virtio_child_attach_failed(vsc);
468 return;
469 }
470
471 static int __used
472 ld_virtio_info(struct ld_softc *ld, bool poll)
473 {
474 struct ld_virtio_softc *sc = device_private(ld->sc_dv);
475 struct virtio_softc *vsc = sc->sc_virtio;
476 struct virtqueue *vq = &sc->sc_vq;
477 struct virtio_blk_req *vr;
478 int r;
479 int slot;
480 uint8_t *id_data; /* virtio v1.2 5.2.6 */
481 size_t id_len = 20;
482 bool unload = false;
483
484 if (sc->sc_typename != NULL) {
485 kmem_strfree(sc->sc_typename);
486 sc->sc_typename = NULL;
487 }
488
489 id_data = kmem_alloc(id_len, KM_SLEEP);
490
491 mutex_enter(&sc->sc_sync_wait_lock);
492 while (sc->sc_sync_use != SYNC_FREE) {
493 if (poll) {
494 mutex_exit(&sc->sc_sync_wait_lock);
495 ld_virtio_vq_done(vq);
496 mutex_enter(&sc->sc_sync_wait_lock);
497 continue;
498 }
499 cv_wait(&sc->sc_sync_wait, &sc->sc_sync_wait_lock);
500 }
501 sc->sc_sync_use = SYNC_BUSY;
502 mutex_exit(&sc->sc_sync_wait_lock);
503
504 r = virtio_enqueue_prep(vsc, vq, &slot);
505 if (r != 0)
506 goto done;
507
508 vr = &sc->sc_reqs[slot];
509 KASSERT(vr->vr_bp == NULL);
510
511 r = bus_dmamap_load(virtio_dmat(vsc), vr->vr_payload,
512 id_data, id_len, NULL,
513 BUS_DMA_READ|BUS_DMA_NOWAIT);
514 if (r != 0) {
515 aprint_error_dev(sc->sc_dev,
516 "payload dmamap failed, error code %d\n", r);
517 virtio_enqueue_abort(vsc, vq, slot);
518 goto done;
519 }
520 unload = true;
521
522 KASSERT(vr->vr_payload->dm_nsegs <= sc->sc_seg_max);
523 r = virtio_enqueue_reserve(vsc, vq, slot, vr->vr_payload->dm_nsegs +
524 VIRTIO_BLK_CTRL_SEGMENTS);
525 if (r != 0) {
526 bus_dmamap_unload(virtio_dmat(vsc), vr->vr_payload);
527 goto done;
528 }
529
530 vr->vr_bp = DUMMY_VR_BP;
531 vr->vr_hdr.type = virtio_rw32(vsc, VIRTIO_BLK_T_GET_ID);
532 vr->vr_hdr.ioprio = virtio_rw32(vsc, 0);
533 vr->vr_hdr.sector = virtio_rw64(vsc, 0);
534
535 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts,
536 0, sizeof(struct virtio_blk_req_hdr),
537 BUS_DMASYNC_PREWRITE);
538 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_payload,
539 0, id_len,
540 BUS_DMASYNC_PREREAD);
541 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts,
542 offsetof(struct virtio_blk_req, vr_status),
543 sizeof(uint8_t),
544 BUS_DMASYNC_PREREAD);
545
546 virtio_enqueue_p(vsc, vq, slot, vr->vr_cmdsts,
547 0, sizeof(struct virtio_blk_req_hdr),
548 true);
549 virtio_enqueue(vsc, vq, slot, vr->vr_payload, false);
550 virtio_enqueue_p(vsc, vq, slot, vr->vr_cmdsts,
551 offsetof(struct virtio_blk_req, vr_status),
552 sizeof(uint8_t),
553 false);
554 virtio_enqueue_commit(vsc, vq, slot, true);
555
556 done:
557 mutex_enter(&sc->sc_sync_wait_lock);
558 while (sc->sc_sync_use != SYNC_DONE) {
559 if (poll) {
560 mutex_exit(&sc->sc_sync_wait_lock);
561 ld_virtio_vq_done(vq);
562 mutex_enter(&sc->sc_sync_wait_lock);
563 continue;
564 }
565 cv_wait(&sc->sc_sync_wait, &sc->sc_sync_wait_lock);
566 }
567
568 if (sc->sc_sync_status == VIRTIO_BLK_S_OK)
569 r = 0;
570 else
571 r = EIO;
572
573 sc->sc_sync_use = SYNC_FREE;
574 cv_broadcast(&sc->sc_sync_wait);
575 mutex_exit(&sc->sc_sync_wait_lock);
576
577 if (unload) {
578 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_payload,
579 0, id_len, BUS_DMASYNC_POSTREAD);
580 bus_dmamap_unload(virtio_dmat(vsc), vr->vr_payload);
581 }
582
583 if (r == 0)
584 sc->sc_typename = kmem_strndup(id_data, sizeof(id_data), KM_NOSLEEP);
585
586 kmem_free(id_data, id_len);
587
588 return r;
589 }
590
591 static int
592 ld_virtio_start(struct ld_softc *ld, struct buf *bp)
593 {
594 /* splbio */
595 struct ld_virtio_softc *sc = device_private(ld->sc_dv);
596 struct virtio_softc *vsc = sc->sc_virtio;
597 struct virtqueue *vq = &sc->sc_vq;
598 struct virtio_blk_req *vr;
599 int r;
600 int isread = (bp->b_flags & B_READ);
601 int slot;
602
603 if (sc->sc_readonly && !isread)
604 return EIO;
605
606 r = virtio_enqueue_prep(vsc, vq, &slot);
607 if (r != 0)
608 return r;
609
610 vr = &sc->sc_reqs[slot];
611 KASSERT(vr->vr_bp == NULL);
612
613 r = bus_dmamap_load(virtio_dmat(vsc), vr->vr_payload,
614 bp->b_data, bp->b_bcount, NULL,
615 ((isread?BUS_DMA_READ:BUS_DMA_WRITE)
616 |BUS_DMA_NOWAIT));
617 if (r != 0) {
618 aprint_error_dev(sc->sc_dev,
619 "payload dmamap failed, error code %d\n", r);
620 virtio_enqueue_abort(vsc, vq, slot);
621 return r;
622 }
623
624 KASSERT(vr->vr_payload->dm_nsegs <= sc->sc_seg_max);
625 r = virtio_enqueue_reserve(vsc, vq, slot, vr->vr_payload->dm_nsegs +
626 VIRTIO_BLK_CTRL_SEGMENTS);
627 if (r != 0) {
628 bus_dmamap_unload(virtio_dmat(vsc), vr->vr_payload);
629 return r;
630 }
631
632 vr->vr_bp = bp;
633 vr->vr_hdr.type = virtio_rw32(vsc,
634 isread ? VIRTIO_BLK_T_IN : VIRTIO_BLK_T_OUT);
635 vr->vr_hdr.ioprio = virtio_rw32(vsc, 0);
636 vr->vr_hdr.sector = virtio_rw64(vsc,
637 bp->b_rawblkno * sc->sc_ld.sc_secsize /
638 VIRTIO_BLK_BSIZE);
639
640 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts,
641 0, sizeof(struct virtio_blk_req_hdr),
642 BUS_DMASYNC_PREWRITE);
643 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_payload,
644 0, bp->b_bcount,
645 isread?BUS_DMASYNC_PREREAD:BUS_DMASYNC_PREWRITE);
646 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts,
647 offsetof(struct virtio_blk_req, vr_status),
648 sizeof(uint8_t),
649 BUS_DMASYNC_PREREAD);
650
651 virtio_enqueue_p(vsc, vq, slot, vr->vr_cmdsts,
652 0, sizeof(struct virtio_blk_req_hdr),
653 true);
654 virtio_enqueue(vsc, vq, slot, vr->vr_payload, !isread);
655 virtio_enqueue_p(vsc, vq, slot, vr->vr_cmdsts,
656 offsetof(struct virtio_blk_req, vr_status),
657 sizeof(uint8_t),
658 false);
659 virtio_enqueue_commit(vsc, vq, slot, true);
660
661 return 0;
662 }
663
664 static void
665 ld_virtio_vq_done1(struct ld_virtio_softc *sc, struct virtio_softc *vsc,
666 struct virtqueue *vq, int slot)
667 {
668 struct virtio_blk_req *vr = &sc->sc_reqs[slot];
669 struct buf *bp = vr->vr_bp;
670 const uint32_t rt = virtio_rw32(vsc, vr->vr_hdr.type);
671
672 vr->vr_bp = NULL;
673
674 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts,
675 0, sizeof(struct virtio_blk_req_hdr),
676 BUS_DMASYNC_POSTWRITE);
677 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts,
678 sizeof(struct virtio_blk_req_hdr), sizeof(uint8_t),
679 BUS_DMASYNC_POSTREAD);
680 if (bp == DUMMY_VR_BP) {
681 mutex_enter(&sc->sc_sync_wait_lock);
682 sc->sc_sync_status = vr->vr_status;
683 sc->sc_sync_use = SYNC_DONE;
684 cv_broadcast(&sc->sc_sync_wait);
685 mutex_exit(&sc->sc_sync_wait_lock);
686 virtio_dequeue_commit(vsc, vq, slot);
687 return;
688 }
689 switch (rt) {
690 case VIRTIO_BLK_T_OUT:
691 case VIRTIO_BLK_T_IN:
692 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_payload,
693 0, bp->b_bcount,
694 (bp->b_flags & B_READ)?BUS_DMASYNC_POSTREAD
695 :BUS_DMASYNC_POSTWRITE);
696 break;
697 default:
698 if (vr->vr_datap == NULL)
699 break;
700 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_payload,
701 0, vr->vr_datas, BUS_DMASYNC_POSTREAD |
702 BUS_DMASYNC_POSTWRITE);
703 break;
704 }
705 bus_dmamap_unload(virtio_dmat(vsc), vr->vr_payload);
706
707 if (vr->vr_status != VIRTIO_BLK_S_OK) {
708 bp->b_error = EIO;
709 bp->b_resid = bp->b_bcount;
710 } else {
711 bp->b_error = 0;
712 bp->b_resid = 0;
713 }
714
715 if (vr->vr_datap != NULL) {
716 kmem_free(vr->vr_datap, vr->vr_datas);
717 vr->vr_datap = NULL;
718 vr->vr_datas = 0;
719 }
720
721 virtio_dequeue_commit(vsc, vq, slot);
722
723 switch (rt) {
724 case VIRTIO_BLK_T_OUT:
725 case VIRTIO_BLK_T_IN:
726 lddone(&sc->sc_ld, bp);
727 break;
728 case VIRTIO_BLK_T_DISCARD:
729 lddiscardend(&sc->sc_ld, bp);
730 break;
731 }
732 }
733
734 static int
735 ld_virtio_vq_done(struct virtqueue *vq)
736 {
737 struct virtio_softc *vsc = vq->vq_owner;
738 struct ld_virtio_softc *sc = device_private(virtio_child(vsc));
739 int r = 0;
740 int slot;
741
742 again:
743 if (virtio_dequeue(vsc, vq, &slot, NULL))
744 return r;
745 r = 1;
746
747 ld_virtio_vq_done1(sc, vsc, vq, slot);
748 goto again;
749 }
750
751 static int
752 ld_virtio_dump(struct ld_softc *ld, void *data, daddr_t blkno, int blkcnt)
753 {
754 struct ld_virtio_softc *sc = device_private(ld->sc_dv);
755 struct virtio_softc *vsc = sc->sc_virtio;
756 struct virtqueue *vq = &sc->sc_vq;
757 struct virtio_blk_req *vr;
758 int slot, r;
759
760 if (sc->sc_readonly)
761 return EIO;
762
763 r = virtio_enqueue_prep(vsc, vq, &slot);
764 if (r != 0) {
765 if (r == EAGAIN) { /* no free slot; dequeue first */
766 delay(100);
767 ld_virtio_vq_done(vq);
768 r = virtio_enqueue_prep(vsc, vq, &slot);
769 if (r != 0)
770 return r;
771 }
772 return r;
773 }
774 vr = &sc->sc_reqs[slot];
775 r = bus_dmamap_load(virtio_dmat(vsc), vr->vr_payload,
776 data, blkcnt*ld->sc_secsize, NULL,
777 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
778 if (r != 0)
779 return r;
780
781 r = virtio_enqueue_reserve(vsc, vq, slot, vr->vr_payload->dm_nsegs +
782 VIRTIO_BLK_CTRL_SEGMENTS);
783 if (r != 0) {
784 bus_dmamap_unload(virtio_dmat(vsc), vr->vr_payload);
785 return r;
786 }
787
788 vr->vr_bp = (void*)0xdeadbeef;
789 vr->vr_hdr.type = virtio_rw32(vsc, VIRTIO_BLK_T_OUT);
790 vr->vr_hdr.ioprio = virtio_rw32(vsc, 0);
791 vr->vr_hdr.sector = virtio_rw64(vsc,
792 blkno * ld->sc_secsize /
793 VIRTIO_BLK_BSIZE);
794
795 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts,
796 0, sizeof(struct virtio_blk_req_hdr),
797 BUS_DMASYNC_PREWRITE);
798 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_payload,
799 0, blkcnt*ld->sc_secsize,
800 BUS_DMASYNC_PREWRITE);
801 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts,
802 offsetof(struct virtio_blk_req, vr_status),
803 sizeof(uint8_t),
804 BUS_DMASYNC_PREREAD);
805
806 virtio_enqueue_p(vsc, vq, slot, vr->vr_cmdsts,
807 0, sizeof(struct virtio_blk_req_hdr),
808 true);
809 virtio_enqueue(vsc, vq, slot, vr->vr_payload, true);
810 virtio_enqueue_p(vsc, vq, slot, vr->vr_cmdsts,
811 offsetof(struct virtio_blk_req, vr_status),
812 sizeof(uint8_t),
813 false);
814 virtio_enqueue_commit(vsc, vq, slot, true);
815
816 for ( ; ; ) {
817 int dslot;
818
819 r = virtio_dequeue(vsc, vq, &dslot, NULL);
820 if (r != 0)
821 continue;
822 if (dslot != slot) {
823 ld_virtio_vq_done1(sc, vsc, vq, dslot);
824 continue;
825 } else
826 break;
827 }
828
829 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts,
830 0, sizeof(struct virtio_blk_req_hdr),
831 BUS_DMASYNC_POSTWRITE);
832 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_payload,
833 0, blkcnt*ld->sc_secsize,
834 BUS_DMASYNC_POSTWRITE);
835 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts,
836 offsetof(struct virtio_blk_req, vr_status),
837 sizeof(uint8_t),
838 BUS_DMASYNC_POSTREAD);
839 if (vr->vr_status == VIRTIO_BLK_S_OK)
840 r = 0;
841 else
842 r = EIO;
843 virtio_dequeue_commit(vsc, vq, slot);
844
845 return r;
846 }
847
848 static int
849 ld_virtio_detach(device_t self, int flags)
850 {
851 struct ld_virtio_softc *sc = device_private(self);
852 struct ld_softc *ld = &sc->sc_ld;
853 bus_dma_tag_t dmat = virtio_dmat(sc->sc_virtio);
854 int r, i, qsize;
855
856 qsize = sc->sc_vq.vq_num;
857 r = ldbegindetach(ld, flags);
858 if (r != 0)
859 return r;
860 virtio_reset(sc->sc_virtio);
861 virtio_free_vq(sc->sc_virtio, &sc->sc_vq);
862
863 for (i = 0; i < qsize; i++) {
864 bus_dmamap_destroy(dmat,
865 sc->sc_reqs[i].vr_cmdsts);
866 bus_dmamap_destroy(dmat,
867 sc->sc_reqs[i].vr_payload);
868 }
869 bus_dmamem_unmap(dmat, sc->sc_reqs,
870 sizeof(struct virtio_blk_req) * qsize);
871 bus_dmamem_free(dmat, &sc->sc_reqs_seg, 1);
872
873 ldenddetach(ld);
874
875 if (sc->sc_typename != NULL)
876 kmem_strfree(sc->sc_typename);
877
878 cv_destroy(&sc->sc_sync_wait);
879 mutex_destroy(&sc->sc_sync_wait_lock);
880
881 virtio_child_detach(sc->sc_virtio);
882
883 return 0;
884 }
885
886 static int
887 ld_virtio_flush(struct ld_softc *ld, bool poll)
888 {
889 struct ld_virtio_softc * const sc = device_private(ld->sc_dv);
890 struct virtio_softc * const vsc = sc->sc_virtio;
891 const uint64_t features = virtio_features(vsc);
892 struct virtqueue *vq = &sc->sc_vq;
893 struct virtio_blk_req *vr;
894 int slot;
895 int r;
896
897 if ((features & VIRTIO_BLK_F_FLUSH) == 0)
898 return 0;
899
900 mutex_enter(&sc->sc_sync_wait_lock);
901 while (sc->sc_sync_use != SYNC_FREE) {
902 if (poll) {
903 mutex_exit(&sc->sc_sync_wait_lock);
904 ld_virtio_vq_done(vq);
905 mutex_enter(&sc->sc_sync_wait_lock);
906 continue;
907 }
908 cv_wait(&sc->sc_sync_wait, &sc->sc_sync_wait_lock);
909 }
910 sc->sc_sync_use = SYNC_BUSY;
911 mutex_exit(&sc->sc_sync_wait_lock);
912
913 r = virtio_enqueue_prep(vsc, vq, &slot);
914 if (r != 0) {
915 return r;
916 }
917
918 vr = &sc->sc_reqs[slot];
919 KASSERT(vr->vr_bp == NULL);
920
921 r = virtio_enqueue_reserve(vsc, vq, slot, VIRTIO_BLK_CTRL_SEGMENTS);
922 if (r != 0) {
923 return r;
924 }
925
926 vr->vr_bp = DUMMY_VR_BP;
927 vr->vr_hdr.type = virtio_rw32(vsc, VIRTIO_BLK_T_FLUSH);
928 vr->vr_hdr.ioprio = virtio_rw32(vsc, 0);
929 vr->vr_hdr.sector = virtio_rw64(vsc, 0);
930
931 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts,
932 0, sizeof(struct virtio_blk_req_hdr),
933 BUS_DMASYNC_PREWRITE);
934 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts,
935 offsetof(struct virtio_blk_req, vr_status),
936 sizeof(uint8_t),
937 BUS_DMASYNC_PREREAD);
938
939 virtio_enqueue_p(vsc, vq, slot, vr->vr_cmdsts,
940 0, sizeof(struct virtio_blk_req_hdr),
941 true);
942 virtio_enqueue_p(vsc, vq, slot, vr->vr_cmdsts,
943 offsetof(struct virtio_blk_req, vr_status),
944 sizeof(uint8_t),
945 false);
946 virtio_enqueue_commit(vsc, vq, slot, true);
947
948 mutex_enter(&sc->sc_sync_wait_lock);
949 while (sc->sc_sync_use != SYNC_DONE) {
950 if (poll) {
951 mutex_exit(&sc->sc_sync_wait_lock);
952 ld_virtio_vq_done(vq);
953 mutex_enter(&sc->sc_sync_wait_lock);
954 continue;
955 }
956 cv_wait(&sc->sc_sync_wait, &sc->sc_sync_wait_lock);
957 }
958
959 if (sc->sc_sync_status == VIRTIO_BLK_S_OK)
960 r = 0;
961 else
962 r = EIO;
963
964 sc->sc_sync_use = SYNC_FREE;
965 cv_broadcast(&sc->sc_sync_wait);
966 mutex_exit(&sc->sc_sync_wait_lock);
967
968 return r;
969 }
970
971 static int
972 ld_virtio_getcache(struct ld_softc *ld, int *bitsp)
973 {
974 struct ld_virtio_softc * const sc = device_private(ld->sc_dv);
975 struct virtio_softc * const vsc = sc->sc_virtio;
976 const uint64_t features = virtio_features(vsc);
977
978 *bitsp = DKCACHE_READ;
979 if ((features & VIRTIO_BLK_F_CONFIG_WCE) != 0)
980 *bitsp |= DKCACHE_WCHANGE;
981 if (virtio_read_device_config_1(vsc,
982 VIRTIO_BLK_CONFIG_WRITEBACK) != 0x00)
983 *bitsp |= DKCACHE_WRITE;
984
985 return 0;
986 }
987
988 static int
989 ld_virtio_setcache(struct ld_softc *ld, int bits)
990 {
991 struct ld_virtio_softc * const sc = device_private(ld->sc_dv);
992 struct virtio_softc * const vsc = sc->sc_virtio;
993 const uint8_t wce = (bits & DKCACHE_WRITE) ? 0x01 : 0x00;
994
995 virtio_write_device_config_1(vsc,
996 VIRTIO_BLK_CONFIG_WRITEBACK, wce);
997 if (virtio_read_device_config_1(vsc,
998 VIRTIO_BLK_CONFIG_WRITEBACK) != wce)
999 return EIO;
1000
1001 return 0;
1002 }
1003
1004 static int
1005 ld_virtio_ioctl(struct ld_softc *ld, u_long cmd, void *addr, int32_t flag, bool poll)
1006 {
1007 int error;
1008
1009 switch (cmd) {
1010 case DIOCCACHESYNC:
1011 error = ld_virtio_flush(ld, poll);
1012 break;
1013
1014 case DIOCGCACHE:
1015 error = ld_virtio_getcache(ld, (int *)addr);
1016 break;
1017
1018 case DIOCSCACHE:
1019 error = ld_virtio_setcache(ld, *(int *)addr);
1020 break;
1021
1022 default:
1023 error = EPASSTHROUGH;
1024 break;
1025 }
1026
1027 return error;
1028 }
1029
1030 static int
1031 ld_virtio_discard(struct ld_softc *ld, struct buf *bp)
1032 {
1033 struct ld_virtio_softc * const sc = device_private(ld->sc_dv);
1034 struct virtio_softc * const vsc = sc->sc_virtio;
1035 struct virtqueue * const vq = &sc->sc_vq;
1036 struct virtio_blk_req *vr;
1037 const uint64_t features = virtio_features(vsc);
1038 int r;
1039 int slot;
1040 uint64_t blkno;
1041 uint32_t nblks;
1042 struct virtio_blk_discard_write_zeroes * dwz;
1043
1044 if ((features & VIRTIO_BLK_F_DISCARD) == 0 ||
1045 sc->sc_max_discard_seg < 1)
1046 return EINVAL;
1047
1048 if (sc->sc_readonly)
1049 return EIO;
1050
1051 blkno = bp->b_rawblkno * sc->sc_ld.sc_secsize / VIRTIO_BLK_BSIZE;
1052 nblks = bp->b_bcount / VIRTIO_BLK_BSIZE;
1053
1054 if (nblks > sc->sc_max_discard_sectors)
1055 return ERANGE;
1056
1057 r = virtio_enqueue_prep(vsc, vq, &slot);
1058 if (r != 0) {
1059 return r;
1060 }
1061
1062 vr = &sc->sc_reqs[slot];
1063 KASSERT(vr->vr_bp == NULL);
1064
1065 dwz = kmem_alloc(sizeof(*dwz), KM_SLEEP);
1066
1067 r = bus_dmamap_load(virtio_dmat(vsc), vr->vr_payload,
1068 dwz, sizeof(*dwz), NULL, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1069 if (r != 0) {
1070 device_printf(sc->sc_dev,
1071 "discard payload dmamap failed, error code %d\n", r);
1072 virtio_enqueue_abort(vsc, vq, slot);
1073 kmem_free(dwz, sizeof(*dwz));
1074 return r;
1075 }
1076
1077 KASSERT(vr->vr_payload->dm_nsegs <= sc->sc_seg_max);
1078 r = virtio_enqueue_reserve(vsc, vq, slot, vr->vr_payload->dm_nsegs +
1079 VIRTIO_BLK_CTRL_SEGMENTS);
1080 if (r != 0) {
1081 bus_dmamap_unload(virtio_dmat(vsc), vr->vr_payload);
1082 kmem_free(dwz, sizeof(*dwz));
1083 return r;
1084 }
1085
1086 vr->vr_hdr.type = virtio_rw32(vsc, VIRTIO_BLK_T_DISCARD);
1087 vr->vr_hdr.ioprio = virtio_rw32(vsc, 0);
1088 vr->vr_hdr.sector = virtio_rw64(vsc, 0);
1089 vr->vr_bp = bp;
1090
1091 KASSERT(vr->vr_datap == NULL);
1092 vr->vr_datap = dwz;
1093 vr->vr_datas = sizeof(*dwz);
1094
1095 dwz->sector = virtio_rw64(vsc, blkno);
1096 dwz->num_sectors = virtio_rw32(vsc, nblks);
1097 dwz->flags = virtio_rw32(vsc, 0);
1098
1099 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts,
1100 0, sizeof(struct virtio_blk_req_hdr),
1101 BUS_DMASYNC_PREWRITE);
1102 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_payload,
1103 0, vr->vr_datas, BUS_DMASYNC_PREWRITE);
1104 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts,
1105 offsetof(struct virtio_blk_req, vr_status),
1106 sizeof(uint8_t),
1107 BUS_DMASYNC_PREREAD);
1108
1109 virtio_enqueue_p(vsc, vq, slot, vr->vr_cmdsts,
1110 0, sizeof(struct virtio_blk_req_hdr),
1111 true);
1112 virtio_enqueue(vsc, vq, slot, vr->vr_payload, true);
1113 virtio_enqueue_p(vsc, vq, slot, vr->vr_cmdsts,
1114 offsetof(struct virtio_blk_req, vr_status),
1115 sizeof(uint8_t),
1116 false);
1117 virtio_enqueue_commit(vsc, vq, slot, true);
1118
1119 return 0;
1120 }
1121
1122 MODULE(MODULE_CLASS_DRIVER, ld_virtio, "ld,virtio");
1123
1124 static int
1125 ld_virtio_modcmd(modcmd_t cmd, void *opaque)
1126 {
1127 int error = 0;
1128
1129 switch (cmd) {
1130 case MODULE_CMD_INIT:
1131 #ifdef _MODULE
1132 error = config_init_component(cfdriver_ioconf_ld_virtio,
1133 cfattach_ioconf_ld_virtio, cfdata_ioconf_ld_virtio);
1134 #endif
1135 break;
1136 case MODULE_CMD_FINI:
1137 #ifdef _MODULE
1138 error = config_fini_component(cfdriver_ioconf_ld_virtio,
1139 cfattach_ioconf_ld_virtio, cfdata_ioconf_ld_virtio);
1140 #endif
1141 break;
1142 default:
1143 error = ENOTTY;
1144 break;
1145 }
1146
1147 return error;
1148 }
1149