ld_virtio.c revision 1.44 1 /* $NetBSD: ld_virtio.c,v 1.44 2025/07/05 11:41:13 mlelstv Exp $ */
2
3 /*
4 * Copyright (c) 2010 Minoura Makoto.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: ld_virtio.c,v 1.44 2025/07/05 11:41:13 mlelstv Exp $");
30
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/buf.h>
35 #include <sys/bufq.h>
36 #include <sys/bus.h>
37 #include <sys/device.h>
38 #include <sys/disk.h>
39 #include <sys/mutex.h>
40 #include <sys/module.h>
41 #include <sys/kmem.h>
42
43 #include <dev/ldvar.h>
44 #include <dev/pci/virtioreg.h>
45 #include <dev/pci/virtiovar.h>
46
47 #include "ioconf.h"
48
49 /*
50 * ld_virtioreg:
51 */
52 /* Configuration registers */
53 #define VIRTIO_BLK_CONFIG_CAPACITY 0 /* 64bit */
54 #define VIRTIO_BLK_CONFIG_SIZE_MAX 8 /* 32bit */
55 #define VIRTIO_BLK_CONFIG_SEG_MAX 12 /* 32bit */
56 #define VIRTIO_BLK_CONFIG_GEOMETRY_C 16 /* 16bit */
57 #define VIRTIO_BLK_CONFIG_GEOMETRY_H 18 /* 8bit */
58 #define VIRTIO_BLK_CONFIG_GEOMETRY_S 19 /* 8bit */
59 #define VIRTIO_BLK_CONFIG_BLK_SIZE 20 /* 32bit */
60 #define VIRTIO_BLK_CONFIG_PHYSICAL_BLOCK_EXP 24 /* 8bit */
61 #define VIRTIO_BLK_CONFIG_ALIGNMENT_OFFSET 25 /* 8bit */
62 #define VIRTIO_BLK_CONFIG_MIN_IO_SIZE 26 /* 16bit */
63 #define VIRTIO_BLK_CONFIG_OPT_IO_SIZE 28 /* 32bit */
64 #define VIRTIO_BLK_CONFIG_WRITEBACK 32 /* 8bit */
65 #define VIRTIO_BLK_CONFIG_NUM_QUEUES 34 /* 16bit */
66 #define VIRTIO_BLK_CONFIG_MAX_DISCARD_SECTORS 36 /* 32bit */
67 #define VIRTIO_BLK_CONFIG_MAX_DISCARD_SEG 40 /* 32bit */
68 #define VIRTIO_BLK_CONFIG_DISCARD_SECTOR_ALIGNMENT 44 /* 32bit */
69
70 /* Feature bits */
71 #define VIRTIO_BLK_F_BARRIER (1<<0)
72 #define VIRTIO_BLK_F_SIZE_MAX (1<<1)
73 #define VIRTIO_BLK_F_SEG_MAX (1<<2)
74 #define VIRTIO_BLK_F_GEOMETRY (1<<4)
75 #define VIRTIO_BLK_F_RO (1<<5)
76 #define VIRTIO_BLK_F_BLK_SIZE (1<<6)
77 #define VIRTIO_BLK_F_SCSI (1<<7)
78 #define VIRTIO_BLK_F_FLUSH (1<<9)
79 #define VIRTIO_BLK_F_TOPOLOGY (1<<10)
80 #define VIRTIO_BLK_F_CONFIG_WCE (1<<11)
81 #define VIRTIO_BLK_F_MQ (1<<12)
82 #define VIRTIO_BLK_F_DISCARD (1<<13)
83 #define VIRTIO_BLK_F_WRITE_ZEROES (1<<14)
84 #define VIRTIO_BLK_F_LIFETIME (1<<15)
85 #define VIRTIO_BLK_F_SECURE_ERASE (1<<16)
86
87 /*
88 * Each block request uses at least two segments - one for the header
89 * and one for the status.
90 */
91 #define VIRTIO_BLK_CTRL_SEGMENTS 2
92
93 #define VIRTIO_BLK_FLAG_BITS \
94 VIRTIO_COMMON_FLAG_BITS \
95 "b\x10" "SECURE_ERASE\0" \
96 "b\x0f" "LIFETIME\0" \
97 "b\x0e" "WRITE_ZEROES\0" \
98 "b\x0d" "DISCARD\0" \
99 "b\x0c" "MQ\0" \
100 "b\x0b" "CONFIG_WCE\0" \
101 "b\x0a" "TOPOLOGY\0" \
102 "b\x09" "FLUSH\0" \
103 "b\x07" "SCSI\0" \
104 "b\x06" "BLK_SIZE\0" \
105 "b\x05" "RO\0" \
106 "b\x04" "GEOMETRY\0" \
107 "b\x02" "SEG_MAX\0" \
108 "b\x01" "SIZE_MAX\0" \
109 "b\x00" "BARRIER\0"
110
111 /* Command */
112 #define VIRTIO_BLK_T_IN 0
113 #define VIRTIO_BLK_T_OUT 1
114 #define VIRTIO_BLK_T_FLUSH 4
115 #define VIRTIO_BLK_T_GET_ID 8
116 #define VIRTIO_BLK_T_GET_LIFETIME 10
117 #define VIRTIO_BLK_T_DISCARD 11
118 #define VIRTIO_BLK_T_WRITE_ZEROES 13
119 #define VIRTIO_BLK_T_SECURE_ERASE 14
120 #define VIRTIO_BLK_T_BARRIER 0x80000000
121
122 /* Sector */
123 #define VIRTIO_BLK_BSIZE 512
124
125 /* Status */
126 #define VIRTIO_BLK_S_OK 0
127 #define VIRTIO_BLK_S_IOERR 1
128 #define VIRTIO_BLK_S_UNSUPP 2
129
130 /* Request header structure */
131 struct virtio_blk_req_hdr {
132 uint32_t type; /* VIRTIO_BLK_T_* */
133 uint32_t ioprio;
134 uint64_t sector;
135 } __packed;
136 /* payload and 1 byte status follows */
137
138 struct virtio_blk_discard_write_zeroes {
139 uint64_t sector;
140 uint32_t num_sectors;
141 union {
142 uint32_t flags;
143 struct {
144 uint32_t unmap:1;
145 uint32_t reserved:31;
146 };
147 };
148 } __packed;
149
150 /*
151 * ld_virtiovar:
152 */
153 struct virtio_blk_req {
154 struct virtio_blk_req_hdr vr_hdr;
155 uint8_t vr_status;
156 struct buf *vr_bp;
157 #define DUMMY_VR_BP ((void *)1)
158 bus_dmamap_t vr_cmdsts;
159 bus_dmamap_t vr_payload;
160 void * vr_datap;
161 size_t vr_datas;
162 };
163
164 struct ld_virtio_softc {
165 struct ld_softc sc_ld;
166 device_t sc_dev;
167
168 uint32_t sc_seg_max; /* max number of segs in xfer */
169 uint32_t sc_size_max; /* max size of single seg */
170
171 struct virtio_softc *sc_virtio;
172 struct virtqueue sc_vq;
173
174 struct virtio_blk_req *sc_reqs;
175 bus_dma_segment_t sc_reqs_seg;
176
177 int sc_readonly;
178
179 enum {
180 SYNC_FREE, SYNC_BUSY, SYNC_DONE
181 } sc_sync_use;
182 kcondvar_t sc_sync_wait;
183 kmutex_t sc_sync_wait_lock;
184 uint8_t sc_sync_status;
185 uint8_t *sc_typename;
186
187 uint32_t sc_max_discard_sectors;
188 uint32_t sc_max_discard_seg;
189 #if 0
190 uint32_t sc_discard_sector_alignment;
191 #endif
192 };
193
194 static int ld_virtio_match(device_t, cfdata_t, void *);
195 static void ld_virtio_attach(device_t, device_t, void *);
196 static int ld_virtio_detach(device_t, int);
197
198 CFATTACH_DECL_NEW(ld_virtio, sizeof(struct ld_virtio_softc),
199 ld_virtio_match, ld_virtio_attach, ld_virtio_detach, NULL);
200
201 static int
202 ld_virtio_match(device_t parent, cfdata_t match, void *aux)
203 {
204 struct virtio_attach_args *va = aux;
205
206 if (va->sc_childdevid == VIRTIO_DEVICE_ID_BLOCK)
207 return 1;
208
209 return 0;
210 }
211
212 static int ld_virtio_vq_done(struct virtqueue *);
213 static int ld_virtio_dump(struct ld_softc *, void *, daddr_t, int);
214 static int ld_virtio_start(struct ld_softc *, struct buf *);
215 static int ld_virtio_ioctl(struct ld_softc *, u_long, void *, int32_t, bool);
216 static int ld_virtio_info(struct ld_softc *, bool);
217 static int ld_virtio_discard(struct ld_softc *, struct buf *);
218
219 static int
220 ld_virtio_alloc_reqs(struct ld_virtio_softc *sc, int qsize)
221 {
222 int allocsize, r, rsegs, i;
223 struct ld_softc *ld = &sc->sc_ld;
224 void *vaddr;
225
226 allocsize = sizeof(struct virtio_blk_req) * qsize;
227 r = bus_dmamem_alloc(virtio_dmat(sc->sc_virtio), allocsize, 0, 0,
228 &sc->sc_reqs_seg, 1, &rsegs, BUS_DMA_WAITOK);
229 if (r != 0) {
230 aprint_error_dev(sc->sc_dev,
231 "DMA memory allocation failed, size %d, "
232 "error code %d\n", allocsize, r);
233 goto err_none;
234 }
235 r = bus_dmamem_map(virtio_dmat(sc->sc_virtio),
236 &sc->sc_reqs_seg, 1, allocsize,
237 &vaddr, BUS_DMA_WAITOK);
238 if (r != 0) {
239 aprint_error_dev(sc->sc_dev,
240 "DMA memory map failed, "
241 "error code %d\n", r);
242 goto err_dmamem_alloc;
243 }
244 sc->sc_reqs = vaddr;
245 memset(vaddr, 0, allocsize);
246 for (i = 0; i < qsize; i++) {
247 struct virtio_blk_req *vr = &sc->sc_reqs[i];
248 r = bus_dmamap_create(virtio_dmat(sc->sc_virtio),
249 offsetof(struct virtio_blk_req, vr_bp),
250 1,
251 offsetof(struct virtio_blk_req, vr_bp),
252 0,
253 BUS_DMA_WAITOK|BUS_DMA_ALLOCNOW,
254 &vr->vr_cmdsts);
255 if (r != 0) {
256 aprint_error_dev(sc->sc_dev,
257 "command dmamap creation failed, "
258 "error code %d\n", r);
259 goto err_reqs;
260 }
261 r = bus_dmamap_load(virtio_dmat(sc->sc_virtio), vr->vr_cmdsts,
262 &vr->vr_hdr,
263 offsetof(struct virtio_blk_req, vr_bp),
264 NULL, BUS_DMA_WAITOK);
265 if (r != 0) {
266 aprint_error_dev(sc->sc_dev,
267 "command dmamap load failed, "
268 "error code %d\n", r);
269 goto err_reqs;
270 }
271 r = bus_dmamap_create(virtio_dmat(sc->sc_virtio),
272 /*size*/ld->sc_maxxfer,
273 /*nseg*/sc->sc_seg_max,
274 /*maxsegsz*/sc->sc_size_max,
275 /*boundary*/0,
276 BUS_DMA_WAITOK|BUS_DMA_ALLOCNOW,
277 &vr->vr_payload);
278 if (r != 0) {
279 aprint_error_dev(sc->sc_dev,
280 "payload dmamap creation failed, "
281 "error code %d\n", r);
282 goto err_reqs;
283 }
284 vr->vr_datap = NULL;
285 vr->vr_datas = 0;
286 }
287 return 0;
288
289 err_reqs:
290 for (i = 0; i < qsize; i++) {
291 struct virtio_blk_req *vr = &sc->sc_reqs[i];
292 if (vr->vr_cmdsts) {
293 bus_dmamap_destroy(virtio_dmat(sc->sc_virtio),
294 vr->vr_cmdsts);
295 vr->vr_cmdsts = 0;
296 }
297 if (vr->vr_payload) {
298 bus_dmamap_destroy(virtio_dmat(sc->sc_virtio),
299 vr->vr_payload);
300 vr->vr_payload = 0;
301 }
302 }
303 bus_dmamem_unmap(virtio_dmat(sc->sc_virtio), sc->sc_reqs, allocsize);
304 err_dmamem_alloc:
305 bus_dmamem_free(virtio_dmat(sc->sc_virtio), &sc->sc_reqs_seg, 1);
306 err_none:
307 return -1;
308 }
309
310 static void
311 ld_virtio_attach(device_t parent, device_t self, void *aux)
312 {
313 struct ld_virtio_softc *sc = device_private(self);
314 struct ld_softc *ld = &sc->sc_ld;
315 struct virtio_softc *vsc = device_private(parent);
316 uint64_t features;
317 int qsize;
318
319 if (virtio_child(vsc) != NULL) {
320 aprint_normal(": child already attached for %s; "
321 "something wrong...\n", device_xname(parent));
322 return;
323 }
324
325 sc->sc_dev = self;
326 sc->sc_virtio = vsc;
327
328 virtio_child_attach_start(vsc, self, IPL_BIO,
329 (VIRTIO_BLK_F_SIZE_MAX | VIRTIO_BLK_F_SEG_MAX |
330 VIRTIO_BLK_F_GEOMETRY | VIRTIO_BLK_F_RO | VIRTIO_BLK_F_BLK_SIZE |
331 VIRTIO_BLK_F_FLUSH | VIRTIO_BLK_F_TOPOLOGY |
332 VIRTIO_BLK_F_CONFIG_WCE | VIRTIO_BLK_F_DISCARD),
333 VIRTIO_BLK_FLAG_BITS);
334
335 features = virtio_features(vsc);
336 if (features == 0)
337 goto err;
338
339 if (features & VIRTIO_BLK_F_RO)
340 sc->sc_readonly = 1;
341 else
342 sc->sc_readonly = 0;
343
344 if (features & VIRTIO_BLK_F_BLK_SIZE) {
345 ld->sc_secsize = virtio_read_device_config_4(vsc,
346 VIRTIO_BLK_CONFIG_BLK_SIZE);
347 } else
348 ld->sc_secsize = VIRTIO_BLK_BSIZE;
349
350 if (features & VIRTIO_BLK_F_SEG_MAX) {
351 sc->sc_seg_max = virtio_read_device_config_4(vsc,
352 VIRTIO_BLK_CONFIG_SEG_MAX);
353 if (sc->sc_seg_max == 0) {
354 aprint_error_dev(sc->sc_dev,
355 "Invalid SEG_MAX %d\n", sc->sc_seg_max);
356 goto err;
357 }
358 } else {
359 sc->sc_seg_max = 1;
360 aprint_verbose_dev(sc->sc_dev,
361 "Unknown SEG_MAX, assuming %"PRIu32"\n", sc->sc_seg_max);
362 }
363
364 /* At least genfs_io assumes size_max*seg_max >= MAXPHYS. */
365 if (features & VIRTIO_BLK_F_SIZE_MAX) {
366 sc->sc_size_max = virtio_read_device_config_4(vsc,
367 VIRTIO_BLK_CONFIG_SIZE_MAX);
368 if (sc->sc_size_max < MAXPHYS/sc->sc_seg_max) {
369 aprint_error_dev(sc->sc_dev,
370 "Too small SIZE_MAX %d minimum is %d\n",
371 sc->sc_size_max, MAXPHYS/sc->sc_seg_max);
372 // goto err;
373 sc->sc_size_max = MAXPHYS/sc->sc_seg_max;
374 } else if (sc->sc_size_max > MAXPHYS) {
375 aprint_verbose_dev(sc->sc_dev,
376 "Clip SIZE_MAX from %d to %d\n",
377 sc->sc_size_max, MAXPHYS);
378 sc->sc_size_max = MAXPHYS;
379 }
380 } else {
381 sc->sc_size_max = MAXPHYS;
382 aprint_verbose_dev(sc->sc_dev,
383 "Unknown SIZE_MAX, assuming %"PRIu32"\n",
384 sc->sc_size_max);
385 }
386
387 aprint_normal_dev(sc->sc_dev, "max %"PRIu32" segs"
388 " of max %"PRIu32" bytes\n",
389 sc->sc_seg_max, sc->sc_size_max);
390
391 virtio_init_vq_vqdone(vsc, &sc->sc_vq, 0,
392 ld_virtio_vq_done);
393
394 if (virtio_alloc_vq(vsc, &sc->sc_vq, sc->sc_size_max,
395 sc->sc_seg_max + VIRTIO_BLK_CTRL_SEGMENTS, "I/O request") != 0)
396 goto err;
397 qsize = sc->sc_vq.vq_num;
398
399 if (virtio_child_attach_finish(vsc, &sc->sc_vq, 1,
400 NULL, VIRTIO_F_INTR_MSIX) != 0)
401 goto err;
402
403 ld->sc_dv = self;
404 ld->sc_secperunit = virtio_read_device_config_8(vsc,
405 VIRTIO_BLK_CONFIG_CAPACITY) / (ld->sc_secsize / VIRTIO_BLK_BSIZE);
406
407 /*
408 * Clamp ld->sc_maxxfer to MAXPHYS before ld_virtio_alloc_reqs
409 * allocates DMA maps of at most ld->sc_maxxfer bytes.
410 * ldattach will also clamp to MAXPHYS, but not until after
411 * ld_virtio_alloc_reqs is done, so that doesn't help.
412 */
413 ld->sc_maxxfer = MIN(MAXPHYS, sc->sc_size_max * sc->sc_seg_max);
414
415 if (features & VIRTIO_BLK_F_GEOMETRY) {
416 ld->sc_ncylinders = virtio_read_device_config_2(vsc,
417 VIRTIO_BLK_CONFIG_GEOMETRY_C);
418 ld->sc_nheads = virtio_read_device_config_1(vsc,
419 VIRTIO_BLK_CONFIG_GEOMETRY_H);
420 ld->sc_nsectors = virtio_read_device_config_1(vsc,
421 VIRTIO_BLK_CONFIG_GEOMETRY_S);
422 }
423 if (features & VIRTIO_BLK_F_TOPOLOGY) {
424 ld->sc_alignedsec = virtio_read_device_config_1(vsc,
425 VIRTIO_BLK_CONFIG_ALIGNMENT_OFFSET);
426 ld->sc_physsecsize = ld->sc_secsize <<
427 virtio_read_device_config_1(vsc,
428 VIRTIO_BLK_CONFIG_PHYSICAL_BLOCK_EXP);
429 }
430 ld->sc_maxqueuecnt = qsize - 1; /* reserve slot for dumps, flushes */
431
432 if (ld_virtio_alloc_reqs(sc, qsize) < 0)
433 goto err;
434
435 cv_init(&sc->sc_sync_wait, "vblksync");
436 mutex_init(&sc->sc_sync_wait_lock, MUTEX_DEFAULT, IPL_BIO);
437 sc->sc_sync_use = SYNC_FREE;
438
439 ld->sc_dump = ld_virtio_dump;
440 ld->sc_start = ld_virtio_start;
441 ld->sc_ioctl = ld_virtio_ioctl;
442
443 if (ld_virtio_info(ld, true) == 0)
444 ld->sc_typename = sc->sc_typename;
445 else
446 ld->sc_typename = __UNCONST("Virtio Block Device");
447
448 if (features & VIRTIO_BLK_F_DISCARD) {
449 ld->sc_discard = ld_virtio_discard;
450 sc->sc_max_discard_sectors = virtio_read_device_config_4(vsc,
451 VIRTIO_BLK_CONFIG_MAX_DISCARD_SECTORS);
452 sc->sc_max_discard_seg = virtio_read_device_config_4(vsc,
453 VIRTIO_BLK_CONFIG_MAX_DISCARD_SEG);
454 #if 0
455 sc->sc_discard_sector_alignment =
456 virtio_read_device_config_4(vsc,
457 VIRTIO_BLK_CONFIG_DISCARD_SECTOR_ALIGNMENT);
458 #endif
459 }
460
461 ld->sc_flags = LDF_ENABLED | LDF_MPSAFE;
462 ldattach(ld, BUFQ_DISK_DEFAULT_STRAT);
463
464 return;
465
466 err:
467 virtio_child_attach_failed(vsc);
468 return;
469 }
470
471 static int __used
472 ld_virtio_info(struct ld_softc *ld, bool poll)
473 {
474 struct ld_virtio_softc *sc = device_private(ld->sc_dv);
475 struct virtio_softc *vsc = sc->sc_virtio;
476 struct virtqueue *vq = &sc->sc_vq;
477 struct virtio_blk_req *vr;
478 int r;
479 int slot;
480 uint8_t *id_data; /* virtio v1.2 5.2.6 */
481 size_t id_len = 20;
482 bool unload = false;
483
484 if (sc->sc_typename != NULL) {
485 kmem_strfree(sc->sc_typename);
486 sc->sc_typename = NULL;
487 }
488
489 id_data = kmem_alloc(id_len, KM_SLEEP);
490
491 mutex_enter(&sc->sc_sync_wait_lock);
492 while (sc->sc_sync_use != SYNC_FREE) {
493 if (poll) {
494 mutex_exit(&sc->sc_sync_wait_lock);
495 ld_virtio_vq_done(vq);
496 mutex_enter(&sc->sc_sync_wait_lock);
497 continue;
498 }
499 cv_wait(&sc->sc_sync_wait, &sc->sc_sync_wait_lock);
500 }
501 sc->sc_sync_use = SYNC_BUSY;
502 mutex_exit(&sc->sc_sync_wait_lock);
503
504 r = virtio_enqueue_prep(vsc, vq, &slot);
505 if (r != 0)
506 goto done;
507
508 vr = &sc->sc_reqs[slot];
509 KASSERT(vr->vr_bp == NULL);
510
511 r = bus_dmamap_load(virtio_dmat(vsc), vr->vr_payload,
512 id_data, id_len, NULL,
513 BUS_DMA_READ|BUS_DMA_NOWAIT);
514 if (r != 0) {
515 aprint_error_dev(sc->sc_dev,
516 "payload dmamap failed, error code %d\n", r);
517 virtio_enqueue_abort(vsc, vq, slot);
518 goto done;
519 }
520 unload = true;
521
522 KASSERT(vr->vr_payload->dm_nsegs <= sc->sc_seg_max);
523 r = virtio_enqueue_reserve(vsc, vq, slot, vr->vr_payload->dm_nsegs +
524 VIRTIO_BLK_CTRL_SEGMENTS);
525 if (r != 0) {
526 bus_dmamap_unload(virtio_dmat(vsc), vr->vr_payload);
527 goto done;
528 }
529
530 vr->vr_bp = DUMMY_VR_BP;
531 vr->vr_hdr.type = virtio_rw32(vsc, VIRTIO_BLK_T_GET_ID);
532 vr->vr_hdr.ioprio = virtio_rw32(vsc, 0);
533 vr->vr_hdr.sector = virtio_rw64(vsc, 0);
534
535 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts,
536 0, sizeof(struct virtio_blk_req_hdr),
537 BUS_DMASYNC_PREWRITE);
538 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_payload,
539 0, id_len,
540 BUS_DMASYNC_PREREAD);
541 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts,
542 offsetof(struct virtio_blk_req, vr_status),
543 sizeof(uint8_t),
544 BUS_DMASYNC_PREREAD);
545
546 virtio_enqueue_p(vsc, vq, slot, vr->vr_cmdsts,
547 0, sizeof(struct virtio_blk_req_hdr),
548 true);
549 virtio_enqueue(vsc, vq, slot, vr->vr_payload, false);
550 virtio_enqueue_p(vsc, vq, slot, vr->vr_cmdsts,
551 offsetof(struct virtio_blk_req, vr_status),
552 sizeof(uint8_t),
553 false);
554 virtio_enqueue_commit(vsc, vq, slot, true);
555
556 done:
557 mutex_enter(&sc->sc_sync_wait_lock);
558 while (sc->sc_sync_use != SYNC_DONE) {
559 if (poll) {
560 mutex_exit(&sc->sc_sync_wait_lock);
561 ld_virtio_vq_done(vq);
562 mutex_enter(&sc->sc_sync_wait_lock);
563 continue;
564 }
565 cv_wait(&sc->sc_sync_wait, &sc->sc_sync_wait_lock);
566 }
567
568 if (sc->sc_sync_status == VIRTIO_BLK_S_OK)
569 r = 0;
570 else
571 r = EIO;
572
573 sc->sc_sync_use = SYNC_FREE;
574 cv_broadcast(&sc->sc_sync_wait);
575 mutex_exit(&sc->sc_sync_wait_lock);
576
577 if (unload) {
578 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_payload,
579 0, id_len, BUS_DMASYNC_POSTREAD);
580 bus_dmamap_unload(virtio_dmat(vsc), vr->vr_payload);
581 }
582
583 if (r == 0) {
584 if (id_data[0] == '\0')
585 r = ENOENT;
586 else
587 sc->sc_typename = kmem_strndup(id_data, sizeof(id_data), KM_NOSLEEP);
588 }
589
590 kmem_free(id_data, id_len);
591
592 return r;
593 }
594
595 static int
596 ld_virtio_start(struct ld_softc *ld, struct buf *bp)
597 {
598 /* splbio */
599 struct ld_virtio_softc *sc = device_private(ld->sc_dv);
600 struct virtio_softc *vsc = sc->sc_virtio;
601 struct virtqueue *vq = &sc->sc_vq;
602 struct virtio_blk_req *vr;
603 int r;
604 int isread = (bp->b_flags & B_READ);
605 int slot;
606
607 if (sc->sc_readonly && !isread)
608 return EIO;
609
610 r = virtio_enqueue_prep(vsc, vq, &slot);
611 if (r != 0)
612 return r;
613
614 vr = &sc->sc_reqs[slot];
615 KASSERT(vr->vr_bp == NULL);
616
617 r = bus_dmamap_load(virtio_dmat(vsc), vr->vr_payload,
618 bp->b_data, bp->b_bcount, NULL,
619 ((isread?BUS_DMA_READ:BUS_DMA_WRITE)
620 |BUS_DMA_NOWAIT));
621 if (r != 0) {
622 aprint_error_dev(sc->sc_dev,
623 "payload dmamap failed, error code %d\n", r);
624 virtio_enqueue_abort(vsc, vq, slot);
625 return r;
626 }
627
628 KASSERT(vr->vr_payload->dm_nsegs <= sc->sc_seg_max);
629 r = virtio_enqueue_reserve(vsc, vq, slot, vr->vr_payload->dm_nsegs +
630 VIRTIO_BLK_CTRL_SEGMENTS);
631 if (r != 0) {
632 bus_dmamap_unload(virtio_dmat(vsc), vr->vr_payload);
633 return r;
634 }
635
636 vr->vr_bp = bp;
637 vr->vr_hdr.type = virtio_rw32(vsc,
638 isread ? VIRTIO_BLK_T_IN : VIRTIO_BLK_T_OUT);
639 vr->vr_hdr.ioprio = virtio_rw32(vsc, 0);
640 vr->vr_hdr.sector = virtio_rw64(vsc,
641 bp->b_rawblkno * sc->sc_ld.sc_secsize /
642 VIRTIO_BLK_BSIZE);
643
644 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts,
645 0, sizeof(struct virtio_blk_req_hdr),
646 BUS_DMASYNC_PREWRITE);
647 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_payload,
648 0, bp->b_bcount,
649 isread?BUS_DMASYNC_PREREAD:BUS_DMASYNC_PREWRITE);
650 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts,
651 offsetof(struct virtio_blk_req, vr_status),
652 sizeof(uint8_t),
653 BUS_DMASYNC_PREREAD);
654
655 virtio_enqueue_p(vsc, vq, slot, vr->vr_cmdsts,
656 0, sizeof(struct virtio_blk_req_hdr),
657 true);
658 virtio_enqueue(vsc, vq, slot, vr->vr_payload, !isread);
659 virtio_enqueue_p(vsc, vq, slot, vr->vr_cmdsts,
660 offsetof(struct virtio_blk_req, vr_status),
661 sizeof(uint8_t),
662 false);
663 virtio_enqueue_commit(vsc, vq, slot, true);
664
665 return 0;
666 }
667
668 static void
669 ld_virtio_vq_done1(struct ld_virtio_softc *sc, struct virtio_softc *vsc,
670 struct virtqueue *vq, int slot)
671 {
672 struct virtio_blk_req *vr = &sc->sc_reqs[slot];
673 struct buf *bp = vr->vr_bp;
674 const uint32_t rt = virtio_rw32(vsc, vr->vr_hdr.type);
675
676 vr->vr_bp = NULL;
677
678 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts,
679 0, sizeof(struct virtio_blk_req_hdr),
680 BUS_DMASYNC_POSTWRITE);
681 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts,
682 sizeof(struct virtio_blk_req_hdr), sizeof(uint8_t),
683 BUS_DMASYNC_POSTREAD);
684 if (bp == DUMMY_VR_BP) {
685 mutex_enter(&sc->sc_sync_wait_lock);
686 sc->sc_sync_status = vr->vr_status;
687 sc->sc_sync_use = SYNC_DONE;
688 cv_broadcast(&sc->sc_sync_wait);
689 mutex_exit(&sc->sc_sync_wait_lock);
690 virtio_dequeue_commit(vsc, vq, slot);
691 return;
692 }
693 switch (rt) {
694 case VIRTIO_BLK_T_OUT:
695 case VIRTIO_BLK_T_IN:
696 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_payload,
697 0, bp->b_bcount,
698 (bp->b_flags & B_READ)?BUS_DMASYNC_POSTREAD
699 :BUS_DMASYNC_POSTWRITE);
700 break;
701 default:
702 if (vr->vr_datap == NULL)
703 break;
704 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_payload,
705 0, vr->vr_datas, BUS_DMASYNC_POSTREAD |
706 BUS_DMASYNC_POSTWRITE);
707 break;
708 }
709 bus_dmamap_unload(virtio_dmat(vsc), vr->vr_payload);
710
711 if (vr->vr_status != VIRTIO_BLK_S_OK) {
712 bp->b_error = EIO;
713 bp->b_resid = bp->b_bcount;
714 } else {
715 bp->b_error = 0;
716 bp->b_resid = 0;
717 }
718
719 if (vr->vr_datap != NULL) {
720 kmem_free(vr->vr_datap, vr->vr_datas);
721 vr->vr_datap = NULL;
722 vr->vr_datas = 0;
723 }
724
725 virtio_dequeue_commit(vsc, vq, slot);
726
727 switch (rt) {
728 case VIRTIO_BLK_T_OUT:
729 case VIRTIO_BLK_T_IN:
730 lddone(&sc->sc_ld, bp);
731 break;
732 case VIRTIO_BLK_T_DISCARD:
733 lddiscardend(&sc->sc_ld, bp);
734 break;
735 }
736 }
737
738 static int
739 ld_virtio_vq_done(struct virtqueue *vq)
740 {
741 struct virtio_softc *vsc = vq->vq_owner;
742 struct ld_virtio_softc *sc = device_private(virtio_child(vsc));
743 int r = 0;
744 int slot;
745
746 again:
747 if (virtio_dequeue(vsc, vq, &slot, NULL))
748 return r;
749 r = 1;
750
751 ld_virtio_vq_done1(sc, vsc, vq, slot);
752 goto again;
753 }
754
755 static int
756 ld_virtio_dump(struct ld_softc *ld, void *data, daddr_t blkno, int blkcnt)
757 {
758 struct ld_virtio_softc *sc = device_private(ld->sc_dv);
759 struct virtio_softc *vsc = sc->sc_virtio;
760 struct virtqueue *vq = &sc->sc_vq;
761 struct virtio_blk_req *vr;
762 int slot, r;
763
764 if (sc->sc_readonly)
765 return EIO;
766
767 r = virtio_enqueue_prep(vsc, vq, &slot);
768 if (r != 0) {
769 if (r == EAGAIN) { /* no free slot; dequeue first */
770 delay(100);
771 ld_virtio_vq_done(vq);
772 r = virtio_enqueue_prep(vsc, vq, &slot);
773 if (r != 0)
774 return r;
775 }
776 return r;
777 }
778 vr = &sc->sc_reqs[slot];
779 r = bus_dmamap_load(virtio_dmat(vsc), vr->vr_payload,
780 data, blkcnt*ld->sc_secsize, NULL,
781 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
782 if (r != 0)
783 return r;
784
785 r = virtio_enqueue_reserve(vsc, vq, slot, vr->vr_payload->dm_nsegs +
786 VIRTIO_BLK_CTRL_SEGMENTS);
787 if (r != 0) {
788 bus_dmamap_unload(virtio_dmat(vsc), vr->vr_payload);
789 return r;
790 }
791
792 vr->vr_bp = (void*)0xdeadbeef;
793 vr->vr_hdr.type = virtio_rw32(vsc, VIRTIO_BLK_T_OUT);
794 vr->vr_hdr.ioprio = virtio_rw32(vsc, 0);
795 vr->vr_hdr.sector = virtio_rw64(vsc,
796 blkno * ld->sc_secsize /
797 VIRTIO_BLK_BSIZE);
798
799 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts,
800 0, sizeof(struct virtio_blk_req_hdr),
801 BUS_DMASYNC_PREWRITE);
802 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_payload,
803 0, blkcnt*ld->sc_secsize,
804 BUS_DMASYNC_PREWRITE);
805 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts,
806 offsetof(struct virtio_blk_req, vr_status),
807 sizeof(uint8_t),
808 BUS_DMASYNC_PREREAD);
809
810 virtio_enqueue_p(vsc, vq, slot, vr->vr_cmdsts,
811 0, sizeof(struct virtio_blk_req_hdr),
812 true);
813 virtio_enqueue(vsc, vq, slot, vr->vr_payload, true);
814 virtio_enqueue_p(vsc, vq, slot, vr->vr_cmdsts,
815 offsetof(struct virtio_blk_req, vr_status),
816 sizeof(uint8_t),
817 false);
818 virtio_enqueue_commit(vsc, vq, slot, true);
819
820 for ( ; ; ) {
821 int dslot;
822
823 r = virtio_dequeue(vsc, vq, &dslot, NULL);
824 if (r != 0)
825 continue;
826 if (dslot != slot) {
827 ld_virtio_vq_done1(sc, vsc, vq, dslot);
828 continue;
829 } else
830 break;
831 }
832
833 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts,
834 0, sizeof(struct virtio_blk_req_hdr),
835 BUS_DMASYNC_POSTWRITE);
836 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_payload,
837 0, blkcnt*ld->sc_secsize,
838 BUS_DMASYNC_POSTWRITE);
839 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts,
840 offsetof(struct virtio_blk_req, vr_status),
841 sizeof(uint8_t),
842 BUS_DMASYNC_POSTREAD);
843 if (vr->vr_status == VIRTIO_BLK_S_OK)
844 r = 0;
845 else
846 r = EIO;
847 virtio_dequeue_commit(vsc, vq, slot);
848
849 return r;
850 }
851
852 static int
853 ld_virtio_detach(device_t self, int flags)
854 {
855 struct ld_virtio_softc *sc = device_private(self);
856 struct ld_softc *ld = &sc->sc_ld;
857 bus_dma_tag_t dmat = virtio_dmat(sc->sc_virtio);
858 int r, i, qsize;
859
860 qsize = sc->sc_vq.vq_num;
861 r = ldbegindetach(ld, flags);
862 if (r != 0)
863 return r;
864 virtio_reset(sc->sc_virtio);
865 virtio_free_vq(sc->sc_virtio, &sc->sc_vq);
866
867 for (i = 0; i < qsize; i++) {
868 bus_dmamap_destroy(dmat,
869 sc->sc_reqs[i].vr_cmdsts);
870 bus_dmamap_destroy(dmat,
871 sc->sc_reqs[i].vr_payload);
872 }
873 bus_dmamem_unmap(dmat, sc->sc_reqs,
874 sizeof(struct virtio_blk_req) * qsize);
875 bus_dmamem_free(dmat, &sc->sc_reqs_seg, 1);
876
877 ldenddetach(ld);
878
879 if (sc->sc_typename != NULL)
880 kmem_strfree(sc->sc_typename);
881
882 cv_destroy(&sc->sc_sync_wait);
883 mutex_destroy(&sc->sc_sync_wait_lock);
884
885 virtio_child_detach(sc->sc_virtio);
886
887 return 0;
888 }
889
890 static int
891 ld_virtio_flush(struct ld_softc *ld, bool poll)
892 {
893 struct ld_virtio_softc * const sc = device_private(ld->sc_dv);
894 struct virtio_softc * const vsc = sc->sc_virtio;
895 const uint64_t features = virtio_features(vsc);
896 struct virtqueue *vq = &sc->sc_vq;
897 struct virtio_blk_req *vr;
898 int slot;
899 int r;
900
901 if ((features & VIRTIO_BLK_F_FLUSH) == 0)
902 return 0;
903
904 mutex_enter(&sc->sc_sync_wait_lock);
905 while (sc->sc_sync_use != SYNC_FREE) {
906 if (poll) {
907 mutex_exit(&sc->sc_sync_wait_lock);
908 ld_virtio_vq_done(vq);
909 mutex_enter(&sc->sc_sync_wait_lock);
910 continue;
911 }
912 cv_wait(&sc->sc_sync_wait, &sc->sc_sync_wait_lock);
913 }
914 sc->sc_sync_use = SYNC_BUSY;
915 mutex_exit(&sc->sc_sync_wait_lock);
916
917 r = virtio_enqueue_prep(vsc, vq, &slot);
918 if (r != 0) {
919 return r;
920 }
921
922 vr = &sc->sc_reqs[slot];
923 KASSERT(vr->vr_bp == NULL);
924
925 r = virtio_enqueue_reserve(vsc, vq, slot, VIRTIO_BLK_CTRL_SEGMENTS);
926 if (r != 0) {
927 return r;
928 }
929
930 vr->vr_bp = DUMMY_VR_BP;
931 vr->vr_hdr.type = virtio_rw32(vsc, VIRTIO_BLK_T_FLUSH);
932 vr->vr_hdr.ioprio = virtio_rw32(vsc, 0);
933 vr->vr_hdr.sector = virtio_rw64(vsc, 0);
934
935 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts,
936 0, sizeof(struct virtio_blk_req_hdr),
937 BUS_DMASYNC_PREWRITE);
938 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts,
939 offsetof(struct virtio_blk_req, vr_status),
940 sizeof(uint8_t),
941 BUS_DMASYNC_PREREAD);
942
943 virtio_enqueue_p(vsc, vq, slot, vr->vr_cmdsts,
944 0, sizeof(struct virtio_blk_req_hdr),
945 true);
946 virtio_enqueue_p(vsc, vq, slot, vr->vr_cmdsts,
947 offsetof(struct virtio_blk_req, vr_status),
948 sizeof(uint8_t),
949 false);
950 virtio_enqueue_commit(vsc, vq, slot, true);
951
952 mutex_enter(&sc->sc_sync_wait_lock);
953 while (sc->sc_sync_use != SYNC_DONE) {
954 if (poll) {
955 mutex_exit(&sc->sc_sync_wait_lock);
956 ld_virtio_vq_done(vq);
957 mutex_enter(&sc->sc_sync_wait_lock);
958 continue;
959 }
960 cv_wait(&sc->sc_sync_wait, &sc->sc_sync_wait_lock);
961 }
962
963 if (sc->sc_sync_status == VIRTIO_BLK_S_OK)
964 r = 0;
965 else
966 r = EIO;
967
968 sc->sc_sync_use = SYNC_FREE;
969 cv_broadcast(&sc->sc_sync_wait);
970 mutex_exit(&sc->sc_sync_wait_lock);
971
972 return r;
973 }
974
975 static int
976 ld_virtio_getcache(struct ld_softc *ld, int *bitsp)
977 {
978 struct ld_virtio_softc * const sc = device_private(ld->sc_dv);
979 struct virtio_softc * const vsc = sc->sc_virtio;
980 const uint64_t features = virtio_features(vsc);
981
982 *bitsp = DKCACHE_READ;
983 if ((features & VIRTIO_BLK_F_CONFIG_WCE) != 0)
984 *bitsp |= DKCACHE_WCHANGE;
985 if (virtio_read_device_config_1(vsc,
986 VIRTIO_BLK_CONFIG_WRITEBACK) != 0x00)
987 *bitsp |= DKCACHE_WRITE;
988
989 return 0;
990 }
991
992 static int
993 ld_virtio_setcache(struct ld_softc *ld, int bits)
994 {
995 struct ld_virtio_softc * const sc = device_private(ld->sc_dv);
996 struct virtio_softc * const vsc = sc->sc_virtio;
997 const uint8_t wce = (bits & DKCACHE_WRITE) ? 0x01 : 0x00;
998
999 virtio_write_device_config_1(vsc,
1000 VIRTIO_BLK_CONFIG_WRITEBACK, wce);
1001 if (virtio_read_device_config_1(vsc,
1002 VIRTIO_BLK_CONFIG_WRITEBACK) != wce)
1003 return EIO;
1004
1005 return 0;
1006 }
1007
1008 static int
1009 ld_virtio_ioctl(struct ld_softc *ld, u_long cmd, void *addr, int32_t flag, bool poll)
1010 {
1011 int error;
1012
1013 switch (cmd) {
1014 case DIOCCACHESYNC:
1015 error = ld_virtio_flush(ld, poll);
1016 break;
1017
1018 case DIOCGCACHE:
1019 error = ld_virtio_getcache(ld, (int *)addr);
1020 break;
1021
1022 case DIOCSCACHE:
1023 error = ld_virtio_setcache(ld, *(int *)addr);
1024 break;
1025
1026 default:
1027 error = EPASSTHROUGH;
1028 break;
1029 }
1030
1031 return error;
1032 }
1033
1034 static int
1035 ld_virtio_discard(struct ld_softc *ld, struct buf *bp)
1036 {
1037 struct ld_virtio_softc * const sc = device_private(ld->sc_dv);
1038 struct virtio_softc * const vsc = sc->sc_virtio;
1039 struct virtqueue * const vq = &sc->sc_vq;
1040 struct virtio_blk_req *vr;
1041 const uint64_t features = virtio_features(vsc);
1042 int r;
1043 int slot;
1044 uint64_t blkno;
1045 uint32_t nblks;
1046 struct virtio_blk_discard_write_zeroes * dwz;
1047
1048 if ((features & VIRTIO_BLK_F_DISCARD) == 0 ||
1049 sc->sc_max_discard_seg < 1)
1050 return EINVAL;
1051
1052 if (sc->sc_readonly)
1053 return EIO;
1054
1055 blkno = bp->b_rawblkno * sc->sc_ld.sc_secsize / VIRTIO_BLK_BSIZE;
1056 nblks = bp->b_bcount / VIRTIO_BLK_BSIZE;
1057
1058 if (nblks > sc->sc_max_discard_sectors)
1059 return ERANGE;
1060
1061 r = virtio_enqueue_prep(vsc, vq, &slot);
1062 if (r != 0) {
1063 return r;
1064 }
1065
1066 vr = &sc->sc_reqs[slot];
1067 KASSERT(vr->vr_bp == NULL);
1068
1069 dwz = kmem_alloc(sizeof(*dwz), KM_SLEEP);
1070
1071 r = bus_dmamap_load(virtio_dmat(vsc), vr->vr_payload,
1072 dwz, sizeof(*dwz), NULL, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1073 if (r != 0) {
1074 device_printf(sc->sc_dev,
1075 "discard payload dmamap failed, error code %d\n", r);
1076 virtio_enqueue_abort(vsc, vq, slot);
1077 kmem_free(dwz, sizeof(*dwz));
1078 return r;
1079 }
1080
1081 KASSERT(vr->vr_payload->dm_nsegs <= sc->sc_seg_max);
1082 r = virtio_enqueue_reserve(vsc, vq, slot, vr->vr_payload->dm_nsegs +
1083 VIRTIO_BLK_CTRL_SEGMENTS);
1084 if (r != 0) {
1085 bus_dmamap_unload(virtio_dmat(vsc), vr->vr_payload);
1086 kmem_free(dwz, sizeof(*dwz));
1087 return r;
1088 }
1089
1090 vr->vr_hdr.type = virtio_rw32(vsc, VIRTIO_BLK_T_DISCARD);
1091 vr->vr_hdr.ioprio = virtio_rw32(vsc, 0);
1092 vr->vr_hdr.sector = virtio_rw64(vsc, 0);
1093 vr->vr_bp = bp;
1094
1095 KASSERT(vr->vr_datap == NULL);
1096 vr->vr_datap = dwz;
1097 vr->vr_datas = sizeof(*dwz);
1098
1099 dwz->sector = virtio_rw64(vsc, blkno);
1100 dwz->num_sectors = virtio_rw32(vsc, nblks);
1101 dwz->flags = virtio_rw32(vsc, 0);
1102
1103 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts,
1104 0, sizeof(struct virtio_blk_req_hdr),
1105 BUS_DMASYNC_PREWRITE);
1106 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_payload,
1107 0, vr->vr_datas, BUS_DMASYNC_PREWRITE);
1108 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts,
1109 offsetof(struct virtio_blk_req, vr_status),
1110 sizeof(uint8_t),
1111 BUS_DMASYNC_PREREAD);
1112
1113 virtio_enqueue_p(vsc, vq, slot, vr->vr_cmdsts,
1114 0, sizeof(struct virtio_blk_req_hdr),
1115 true);
1116 virtio_enqueue(vsc, vq, slot, vr->vr_payload, true);
1117 virtio_enqueue_p(vsc, vq, slot, vr->vr_cmdsts,
1118 offsetof(struct virtio_blk_req, vr_status),
1119 sizeof(uint8_t),
1120 false);
1121 virtio_enqueue_commit(vsc, vq, slot, true);
1122
1123 return 0;
1124 }
1125
1126 MODULE(MODULE_CLASS_DRIVER, ld_virtio, "ld,virtio");
1127
1128 static int
1129 ld_virtio_modcmd(modcmd_t cmd, void *opaque)
1130 {
1131 int error = 0;
1132
1133 switch (cmd) {
1134 case MODULE_CMD_INIT:
1135 #ifdef _MODULE
1136 error = config_init_component(cfdriver_ioconf_ld_virtio,
1137 cfattach_ioconf_ld_virtio, cfdata_ioconf_ld_virtio);
1138 #endif
1139 break;
1140 case MODULE_CMD_FINI:
1141 #ifdef _MODULE
1142 error = config_fini_component(cfdriver_ioconf_ld_virtio,
1143 cfattach_ioconf_ld_virtio, cfdata_ioconf_ld_virtio);
1144 #endif
1145 break;
1146 default:
1147 error = ENOTTY;
1148 break;
1149 }
1150
1151 return error;
1152 }
1153