virtio.c revision 1.45 1 /* $NetBSD: virtio.c,v 1.45 2021/01/28 15:43:12 reinoud Exp $ */
2
3 /*
4 * Copyright (c) 2020 The NetBSD Foundation, Inc.
5 * Copyright (c) 2012 Stefan Fritsch, Alexander Fiveg.
6 * Copyright (c) 2010 Minoura Makoto.
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: virtio.c,v 1.45 2021/01/28 15:43:12 reinoud Exp $");
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/kernel.h>
36 #include <sys/atomic.h>
37 #include <sys/bus.h>
38 #include <sys/device.h>
39 #include <sys/kmem.h>
40 #include <sys/module.h>
41
42 #define VIRTIO_PRIVATE
43
44 #include <dev/pci/virtioreg.h> /* XXX: move to non-pci */
45 #include <dev/pci/virtiovar.h> /* XXX: move to non-pci */
46
47 #define MINSEG_INDIRECT 2 /* use indirect if nsegs >= this value */
48
49 /* incomplete list */
50 static const char *virtio_device_name[] = {
51 "unknown (0)", /* 0 */
52 "network", /* 1 */
53 "block", /* 2 */
54 "console", /* 3 */
55 "entropy", /* 4 */
56 "memory balloon", /* 5 */
57 "I/O memory", /* 6 */
58 "remote processor messaging", /* 7 */
59 "SCSI", /* 8 */
60 "9P transport", /* 9 */
61 };
62 #define NDEVNAMES __arraycount(virtio_device_name)
63
64 static void virtio_init_vq(struct virtio_softc *,
65 struct virtqueue *, const bool);
66
67 void
68 virtio_set_status(struct virtio_softc *sc, int status)
69 {
70 sc->sc_ops->set_status(sc, status);
71 }
72
73 /*
74 * Reset the device.
75 */
76 /*
77 * To reset the device to a known state, do following:
78 * virtio_reset(sc); // this will stop the device activity
79 * <dequeue finished requests>; // virtio_dequeue() still can be called
80 * <revoke pending requests in the vqs if any>;
81 * virtio_reinit_start(sc); // dequeue prohibitted
82 * newfeatures = virtio_negotiate_features(sc, requestedfeatures);
83 * <some other initialization>;
84 * virtio_reinit_end(sc); // device activated; enqueue allowed
85 * Once attached, feature negotiation can only be allowed after virtio_reset.
86 */
87 void
88 virtio_reset(struct virtio_softc *sc)
89 {
90 virtio_device_reset(sc);
91 }
92
93 void
94 virtio_reinit_start(struct virtio_softc *sc)
95 {
96 int i;
97
98 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_ACK);
99 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER);
100 for (i = 0; i < sc->sc_nvqs; i++) {
101 int n;
102 struct virtqueue *vq = &sc->sc_vqs[i];
103 n = sc->sc_ops->read_queue_size(sc, vq->vq_index);
104 if (n == 0) /* vq disappeared */
105 continue;
106 if (n != vq->vq_num) {
107 panic("%s: virtqueue size changed, vq index %d\n",
108 device_xname(sc->sc_dev),
109 vq->vq_index);
110 }
111 virtio_init_vq(sc, vq, true);
112 sc->sc_ops->setup_queue(sc, vq->vq_index,
113 vq->vq_dmamap->dm_segs[0].ds_addr);
114 }
115 }
116
117 void
118 virtio_reinit_end(struct virtio_softc *sc)
119 {
120 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK);
121 }
122
123 /*
124 * Feature negotiation.
125 */
126 void
127 virtio_negotiate_features(struct virtio_softc *sc, uint64_t guest_features)
128 {
129 if (!(device_cfdata(sc->sc_dev)->cf_flags & 1) &&
130 !(device_cfdata(sc->sc_child)->cf_flags & 1)) /* XXX */
131 guest_features |= VIRTIO_F_RING_INDIRECT_DESC;
132 sc->sc_ops->neg_features(sc, guest_features);
133 if (sc->sc_active_features & VIRTIO_F_RING_INDIRECT_DESC)
134 sc->sc_indirect = true;
135 else
136 sc->sc_indirect = false;
137 }
138
139
140 /*
141 * Device configuration registers readers/writers
142 */
143 #if 0
144 #define DPRINTFR(n, fmt, val, index, num) \
145 printf("\n%s (", n); \
146 for (int i = 0; i < num; i++) \
147 printf("%02x ", bus_space_read_1(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index+i)); \
148 printf(") -> "); printf(fmt, val); printf("\n");
149 #define DPRINTFR2(n, fmt, val_s, val_n) \
150 printf("%s ", n); \
151 printf("\n stream "); printf(fmt, val_s); printf(" norm "); printf(fmt, val_n); printf("\n");
152 #else
153 #define DPRINTFR(n, fmt, val, index, num)
154 #define DPRINTFR2(n, fmt, val_s, val_n)
155 #endif
156
157
158 uint8_t
159 virtio_read_device_config_1(struct virtio_softc *sc, int index) {
160 bus_space_tag_t iot = sc->sc_devcfg_iot;
161 bus_space_handle_t ioh = sc->sc_devcfg_ioh;
162 uint8_t val;
163
164 val = bus_space_read_1(iot, ioh, index);
165
166 DPRINTFR("read_1", "%02x", val, index, 1);
167 return val;
168 }
169
170 uint16_t
171 virtio_read_device_config_2(struct virtio_softc *sc, int index) {
172 bus_space_tag_t iot = sc->sc_devcfg_iot;
173 bus_space_handle_t ioh = sc->sc_devcfg_ioh;
174 uint16_t val;
175
176 val = bus_space_read_2(iot, ioh, index);
177 if (BYTE_ORDER != sc->sc_bus_endian)
178 val = bswap16(val);
179
180 DPRINTFR("read_2", "%04x", val, index, 2);
181 DPRINTFR2("read_2", "%04x",
182 bus_space_read_stream_2(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index),
183 bus_space_read_2(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index));
184 return val;
185 }
186
187 uint32_t
188 virtio_read_device_config_4(struct virtio_softc *sc, int index) {
189 bus_space_tag_t iot = sc->sc_devcfg_iot;
190 bus_space_handle_t ioh = sc->sc_devcfg_ioh;
191 uint32_t val;
192
193 val = bus_space_read_4(iot, ioh, index);
194 if (BYTE_ORDER != sc->sc_bus_endian)
195 val = bswap32(val);
196
197 DPRINTFR("read_4", "%08x", val, index, 4);
198 DPRINTFR2("read_4", "%08x",
199 bus_space_read_stream_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index),
200 bus_space_read_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index));
201 return val;
202 }
203
204 uint64_t
205 virtio_read_device_config_8(struct virtio_softc *sc, int index) {
206 bus_space_tag_t iot = sc->sc_devcfg_iot;
207 bus_space_handle_t ioh = sc->sc_devcfg_ioh;
208 uint64_t val, val_0, val_1, val_l, val_h;
209
210 val_0 = bus_space_read_4(iot, ioh, index);
211 val_1 = bus_space_read_4(iot, ioh, index + 4);
212 if (BYTE_ORDER != sc->sc_bus_endian) {
213 val_l = bswap32(val_1);
214 val_h = bswap32(val_0);
215 } else {
216 val_l = val_0;
217 val_h = val_1;
218 }
219
220 #ifdef AARCH64EB_PROBLEM
221 /* XXX see comment at virtio_pci.c */
222 if (sc->sc_aarch64eb_bus_problem) {
223 val_l = val_1;
224 val_h = val_0;
225 }
226 #endif
227
228 val = val_h << 32;
229 val |= val_l;
230
231 DPRINTFR("read_8", "%08lx", val, index, 8);
232 DPRINTFR2("read_8 low ", "%08x",
233 bus_space_read_stream_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index),
234 bus_space_read_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index));
235 DPRINTFR2("read_8 high ", "%08x",
236 bus_space_read_stream_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index + 4),
237 bus_space_read_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index + 4));
238 return val;
239 }
240
241 /*
242 * In the older virtio spec, device config registers are host endian. On newer
243 * they are little endian. Some newer devices however explicitly specify their
244 * register to always be little endian. These fuctions cater for these.
245 */
246 uint16_t
247 virtio_read_device_config_le_2(struct virtio_softc *sc, int index) {
248 bus_space_tag_t iot = sc->sc_devcfg_iot;
249 bus_space_handle_t ioh = sc->sc_devcfg_ioh;
250 uint16_t val;
251
252 val = bus_space_read_2(iot, ioh, index);
253 if (sc->sc_bus_endian != LITTLE_ENDIAN)
254 val = bswap16(val);
255
256 DPRINTFR("read_le_2", "%04x", val, index, 2);
257 DPRINTFR2("read_le_2", "%04x",
258 bus_space_read_stream_2(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, 0),
259 bus_space_read_2(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, 0));
260 return val;
261 }
262
263 uint32_t
264 virtio_read_device_config_le_4(struct virtio_softc *sc, int index) {
265 bus_space_tag_t iot = sc->sc_devcfg_iot;
266 bus_space_handle_t ioh = sc->sc_devcfg_ioh;
267 uint32_t val;
268
269 val = bus_space_read_4(iot, ioh, index);
270 if (sc->sc_bus_endian != LITTLE_ENDIAN)
271 val = bswap32(val);
272
273 DPRINTFR("read_le_4", "%08x", val, index, 4);
274 DPRINTFR2("read_le_4", "%08x",
275 bus_space_read_stream_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, 0),
276 bus_space_read_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, 0));
277 return val;
278 }
279
280 void
281 virtio_write_device_config_1(struct virtio_softc *sc, int index, uint8_t value)
282 {
283 bus_space_tag_t iot = sc->sc_devcfg_iot;
284 bus_space_handle_t ioh = sc->sc_devcfg_ioh;
285
286 bus_space_write_1(iot, ioh, index, value);
287 }
288
289 void
290 virtio_write_device_config_2(struct virtio_softc *sc, int index, uint16_t value)
291 {
292 bus_space_tag_t iot = sc->sc_devcfg_iot;
293 bus_space_handle_t ioh = sc->sc_devcfg_ioh;
294
295 if (BYTE_ORDER != sc->sc_bus_endian)
296 value = bswap16(value);
297 bus_space_write_2(iot, ioh, index, value);
298 }
299
300 void
301 virtio_write_device_config_4(struct virtio_softc *sc, int index, uint32_t value)
302 {
303 bus_space_tag_t iot = sc->sc_devcfg_iot;
304 bus_space_handle_t ioh = sc->sc_devcfg_ioh;
305
306 if (BYTE_ORDER != sc->sc_bus_endian)
307 value = bswap32(value);
308 bus_space_write_4(iot, ioh, index, value);
309 }
310
311 void
312 virtio_write_device_config_8(struct virtio_softc *sc, int index, uint64_t value)
313 {
314 bus_space_tag_t iot = sc->sc_devcfg_iot;
315 bus_space_handle_t ioh = sc->sc_devcfg_ioh;
316 uint64_t val_0, val_1, val_l, val_h;
317
318 val_l = BUS_ADDR_LO32(value);
319 val_h = BUS_ADDR_HI32(value);
320
321 if (BYTE_ORDER != sc->sc_bus_endian) {
322 val_0 = bswap32(val_h);
323 val_1 = bswap32(val_l);
324 } else {
325 val_0 = val_l;
326 val_1 = val_h;
327 }
328
329 #ifdef AARCH64EB_PROBLEM
330 /* XXX see comment at virtio_pci.c */
331 if (sc->sc_aarch64eb_bus_problem) {
332 val_0 = val_h;
333 val_1 = val_l;
334 }
335 #endif
336
337 bus_space_write_4(iot, ioh, index, val_0);
338 bus_space_write_4(iot, ioh, index + 4, val_1);
339 }
340
341 /*
342 * In the older virtio spec, device config registers are host endian. On newer
343 * they are little endian. Some newer devices however explicitly specify their
344 * register to always be little endian. These fuctions cater for these.
345 */
346 void
347 virtio_write_device_config_le_2(struct virtio_softc *sc, int index, uint16_t value)
348 {
349 bus_space_tag_t iot = sc->sc_devcfg_iot;
350 bus_space_handle_t ioh = sc->sc_devcfg_ioh;
351
352 if (sc->sc_bus_endian != LITTLE_ENDIAN)
353 value = bswap16(value);
354 bus_space_write_2(iot, ioh, index, value);
355 }
356
357 void
358 virtio_write_device_config_le_4(struct virtio_softc *sc, int index, uint32_t value)
359 {
360 bus_space_tag_t iot = sc->sc_devcfg_iot;
361 bus_space_handle_t ioh = sc->sc_devcfg_ioh;
362
363 if (sc->sc_bus_endian != LITTLE_ENDIAN)
364 value = bswap32(value);
365 bus_space_write_4(iot, ioh, index, value);
366 }
367
368
369 /*
370 * data structures endian helpers
371 */
372 uint16_t virtio_rw16(struct virtio_softc *sc, uint16_t val)
373 {
374 KASSERT(sc);
375 return BYTE_ORDER != sc->sc_struct_endian ? bswap16(val) : val;
376 }
377
378 uint32_t virtio_rw32(struct virtio_softc *sc, uint32_t val)
379 {
380 KASSERT(sc);
381 return BYTE_ORDER != sc->sc_struct_endian ? bswap32(val) : val;
382 }
383
384 uint64_t virtio_rw64(struct virtio_softc *sc, uint64_t val)
385 {
386 KASSERT(sc);
387 return BYTE_ORDER != sc->sc_struct_endian ? bswap64(val) : val;
388 }
389
390
391 /*
392 * Interrupt handler.
393 */
394 static void
395 virtio_soft_intr(void *arg)
396 {
397 struct virtio_softc *sc = arg;
398
399 KASSERT(sc->sc_intrhand != NULL);
400
401 (sc->sc_intrhand)(sc);
402 }
403
404 /*
405 * dmamap sync operations for a virtqueue.
406 */
407 static inline void
408 vq_sync_descs(struct virtio_softc *sc, struct virtqueue *vq, int ops)
409 {
410 /* availoffset == sizeof(vring_desc)*vq_num */
411 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap, 0, vq->vq_availoffset,
412 ops);
413 }
414
415 static inline void
416 vq_sync_aring(struct virtio_softc *sc, struct virtqueue *vq, int ops)
417 {
418 uint16_t hdrlen = offsetof(struct vring_avail, ring);
419 if (sc->sc_active_features & VIRTIO_F_RING_EVENT_IDX)
420 hdrlen += sizeof(uint16_t);
421
422 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap,
423 vq->vq_availoffset,
424 hdrlen + sc->sc_nvqs * sizeof(uint16_t),
425 ops);
426 }
427
428 static inline void
429 vq_sync_uring(struct virtio_softc *sc, struct virtqueue *vq, int ops)
430 {
431 uint16_t hdrlen = offsetof(struct vring_used, ring);
432 if (sc->sc_active_features & VIRTIO_F_RING_EVENT_IDX)
433 hdrlen += sizeof(uint16_t);
434
435 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap,
436 vq->vq_usedoffset,
437 hdrlen + sc->sc_nvqs * sizeof(struct vring_used_elem),
438 ops);
439 }
440
441 static inline void
442 vq_sync_indirect(struct virtio_softc *sc, struct virtqueue *vq, int slot,
443 int ops)
444 {
445 int offset = vq->vq_indirectoffset
446 + sizeof(struct vring_desc) * vq->vq_maxnsegs * slot;
447
448 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap,
449 offset, sizeof(struct vring_desc) * vq->vq_maxnsegs,
450 ops);
451 }
452
453 /*
454 * Can be used as sc_intrhand.
455 */
456 /*
457 * Scan vq, bus_dmamap_sync for the vqs (not for the payload),
458 * and calls (*vq_done)() if some entries are consumed.
459 */
460 bool
461 virtio_vq_is_enqueued(struct virtio_softc *sc, struct virtqueue *vq)
462 {
463
464 if (vq->vq_queued) {
465 vq->vq_queued = 0;
466 vq_sync_aring(sc, vq, BUS_DMASYNC_POSTWRITE);
467 }
468 vq_sync_uring(sc, vq, BUS_DMASYNC_POSTREAD);
469 membar_consumer();
470
471 return (vq->vq_used_idx != virtio_rw16(sc, vq->vq_used->idx)) ? 1 : 0;
472 }
473
474 int
475 virtio_vq_intr(struct virtio_softc *sc)
476 {
477 struct virtqueue *vq;
478 int i, r = 0;
479
480 for (i = 0; i < sc->sc_nvqs; i++) {
481 vq = &sc->sc_vqs[i];
482 if (virtio_vq_is_enqueued(sc, vq) == 1) {
483 if (vq->vq_done)
484 r |= (vq->vq_done)(vq);
485 }
486 }
487
488 return r;
489 }
490
491 int
492 virtio_vq_intrhand(struct virtio_softc *sc)
493 {
494 struct virtqueue *vq;
495 int i, r = 0;
496
497 for (i = 0; i < sc->sc_nvqs; i++) {
498 vq = &sc->sc_vqs[i];
499 r |= (vq->vq_intrhand)(vq->vq_intrhand_arg);
500 }
501
502 return r;
503 }
504
505
506 /*
507 * Increase the event index in order to delay interrupts.
508 */
509 int
510 virtio_postpone_intr(struct virtio_softc *sc, struct virtqueue *vq,
511 uint16_t nslots)
512 {
513 uint16_t idx, nused;
514
515 idx = vq->vq_used_idx + nslots;
516
517 /* set the new event index: avail_ring->used_event = idx */
518 *vq->vq_used_event = virtio_rw16(sc, idx);
519 membar_producer();
520
521 vq_sync_aring(vq->vq_owner, vq, BUS_DMASYNC_PREWRITE);
522 vq->vq_queued++;
523
524 nused = (uint16_t)
525 (virtio_rw16(sc, vq->vq_used->idx) - vq->vq_used_idx);
526 KASSERT(nused <= vq->vq_num);
527
528 return nslots < nused;
529 }
530
531 /*
532 * Postpone interrupt until 3/4 of the available descriptors have been
533 * consumed.
534 */
535 int
536 virtio_postpone_intr_smart(struct virtio_softc *sc, struct virtqueue *vq)
537 {
538 uint16_t nslots;
539
540 nslots = (uint16_t)
541 (virtio_rw16(sc, vq->vq_avail->idx) - vq->vq_used_idx) * 3 / 4;
542
543 return virtio_postpone_intr(sc, vq, nslots);
544 }
545
546 /*
547 * Postpone interrupt until all of the available descriptors have been
548 * consumed.
549 */
550 int
551 virtio_postpone_intr_far(struct virtio_softc *sc, struct virtqueue *vq)
552 {
553 uint16_t nslots;
554
555 nslots = (uint16_t)
556 (virtio_rw16(sc, vq->vq_avail->idx) - vq->vq_used_idx);
557
558 return virtio_postpone_intr(sc, vq, nslots);
559 }
560
561 /*
562 * Start/stop vq interrupt. No guarantee.
563 */
564 void
565 virtio_stop_vq_intr(struct virtio_softc *sc, struct virtqueue *vq)
566 {
567 if (sc->sc_active_features & VIRTIO_F_RING_EVENT_IDX) {
568 /*
569 * No way to disable the interrupt completely with
570 * RingEventIdx. Instead advance used_event by half the
571 * possible value. This won't happen soon and is far enough in
572 * the past to not trigger a spurios interrupt.
573 */
574 *vq->vq_used_event = virtio_rw16(sc, vq->vq_used_idx + 0x8000);
575 } else {
576 vq->vq_avail->flags |= virtio_rw16(sc, VRING_AVAIL_F_NO_INTERRUPT);
577 }
578 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
579 vq->vq_queued++;
580 }
581
582 int
583 virtio_start_vq_intr(struct virtio_softc *sc, struct virtqueue *vq)
584 {
585 if (sc->sc_active_features & VIRTIO_F_RING_EVENT_IDX) {
586 /*
587 * If event index feature is negotiated, enabling interrupts
588 * is done through setting the latest consumed index in the
589 * used_event field
590 */
591 *vq->vq_used_event = virtio_rw16(sc, vq->vq_used_idx);
592 } else {
593 vq->vq_avail->flags &= ~virtio_rw16(sc, VRING_AVAIL_F_NO_INTERRUPT);
594 }
595 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
596 vq->vq_queued++;
597
598 return vq->vq_used_idx != virtio_rw16(sc, vq->vq_used->idx);
599 }
600
601 /*
602 * Initialize vq structure.
603 */
604 static void
605 virtio_init_vq(struct virtio_softc *sc, struct virtqueue *vq,
606 const bool reinit)
607 {
608 int i, j;
609 int vq_size = vq->vq_num;
610
611 memset(vq->vq_vaddr, 0, vq->vq_bytesize);
612
613 /* build the indirect descriptor chain */
614 if (vq->vq_indirect != NULL) {
615 struct vring_desc *vd;
616
617 for (i = 0; i < vq_size; i++) {
618 vd = vq->vq_indirect;
619 vd += vq->vq_maxnsegs * i;
620 for (j = 0; j < vq->vq_maxnsegs-1; j++) {
621 vd[j].next = virtio_rw16(sc, j + 1);
622 }
623 }
624 }
625
626 /* free slot management */
627 SIMPLEQ_INIT(&vq->vq_freelist);
628 for (i = 0; i < vq_size; i++) {
629 SIMPLEQ_INSERT_TAIL(&vq->vq_freelist,
630 &vq->vq_entries[i], qe_list);
631 vq->vq_entries[i].qe_index = i;
632 }
633 if (!reinit)
634 mutex_init(&vq->vq_freelist_lock, MUTEX_SPIN, sc->sc_ipl);
635
636 /* enqueue/dequeue status */
637 vq->vq_avail_idx = 0;
638 vq->vq_used_idx = 0;
639 vq->vq_queued = 0;
640 if (!reinit) {
641 mutex_init(&vq->vq_aring_lock, MUTEX_SPIN, sc->sc_ipl);
642 mutex_init(&vq->vq_uring_lock, MUTEX_SPIN, sc->sc_ipl);
643 }
644 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
645 vq_sync_uring(sc, vq, BUS_DMASYNC_PREREAD);
646 vq->vq_queued++;
647 }
648
649 /*
650 * Allocate/free a vq.
651 */
652 int
653 virtio_alloc_vq(struct virtio_softc *sc, struct virtqueue *vq, int index,
654 int maxsegsize, int maxnsegs, const char *name)
655 {
656 int vq_size, allocsize1, allocsize2, allocsize3, allocsize = 0;
657 int rsegs, r, hdrlen;
658 #define VIRTQUEUE_ALIGN(n) (((n)+(VIRTIO_PAGE_SIZE-1))& \
659 ~(VIRTIO_PAGE_SIZE-1))
660
661 /* Make sure callers allocate vqs in order */
662 KASSERT(sc->sc_nvqs == index);
663
664 memset(vq, 0, sizeof(*vq));
665
666 vq_size = sc->sc_ops->read_queue_size(sc, index);
667 if (vq_size == 0) {
668 aprint_error_dev(sc->sc_dev,
669 "virtqueue not exist, index %d for %s\n",
670 index, name);
671 goto err;
672 }
673
674 hdrlen = sc->sc_active_features & VIRTIO_F_RING_EVENT_IDX ? 3 : 2;
675
676 /* allocsize1: descriptor table + avail ring + pad */
677 allocsize1 = VIRTQUEUE_ALIGN(sizeof(struct vring_desc)*vq_size
678 + sizeof(uint16_t)*(hdrlen + vq_size));
679 /* allocsize2: used ring + pad */
680 allocsize2 = VIRTQUEUE_ALIGN(sizeof(uint16_t) * hdrlen
681 + sizeof(struct vring_used_elem)*vq_size);
682 /* allocsize3: indirect table */
683 if (sc->sc_indirect && maxnsegs >= MINSEG_INDIRECT)
684 allocsize3 = sizeof(struct vring_desc) * maxnsegs * vq_size;
685 else
686 allocsize3 = 0;
687 allocsize = allocsize1 + allocsize2 + allocsize3;
688
689 /* alloc and map the memory */
690 r = bus_dmamem_alloc(sc->sc_dmat, allocsize, VIRTIO_PAGE_SIZE, 0,
691 &vq->vq_segs[0], 1, &rsegs, BUS_DMA_NOWAIT);
692 if (r != 0) {
693 aprint_error_dev(sc->sc_dev,
694 "virtqueue %d for %s allocation failed, "
695 "error code %d\n", index, name, r);
696 goto err;
697 }
698 r = bus_dmamem_map(sc->sc_dmat, &vq->vq_segs[0], rsegs, allocsize,
699 &vq->vq_vaddr, BUS_DMA_NOWAIT);
700 if (r != 0) {
701 aprint_error_dev(sc->sc_dev,
702 "virtqueue %d for %s map failed, "
703 "error code %d\n", index, name, r);
704 goto err;
705 }
706 r = bus_dmamap_create(sc->sc_dmat, allocsize, 1, allocsize, 0,
707 BUS_DMA_NOWAIT, &vq->vq_dmamap);
708 if (r != 0) {
709 aprint_error_dev(sc->sc_dev,
710 "virtqueue %d for %s dmamap creation failed, "
711 "error code %d\n", index, name, r);
712 goto err;
713 }
714 r = bus_dmamap_load(sc->sc_dmat, vq->vq_dmamap,
715 vq->vq_vaddr, allocsize, NULL, BUS_DMA_NOWAIT);
716 if (r != 0) {
717 aprint_error_dev(sc->sc_dev,
718 "virtqueue %d for %s dmamap load failed, "
719 "error code %d\n", index, name, r);
720 goto err;
721 }
722
723 /* remember addresses and offsets for later use */
724 vq->vq_owner = sc;
725 vq->vq_num = vq_size;
726 vq->vq_index = index;
727 vq->vq_desc = vq->vq_vaddr;
728 vq->vq_availoffset = sizeof(struct vring_desc)*vq_size;
729 vq->vq_avail = (void*)(((char*)vq->vq_desc) + vq->vq_availoffset);
730 vq->vq_used_event = (uint16_t *) ((char *)vq->vq_avail +
731 offsetof(struct vring_avail, ring[vq->vq_num]));
732 vq->vq_usedoffset = allocsize1;
733 vq->vq_used = (void*)(((char*)vq->vq_desc) + vq->vq_usedoffset);
734 vq->vq_avail_event = (uint16_t *)((char *)vq->vq_used +
735 offsetof(struct vring_used, ring[vq->vq_num]));
736
737 if (allocsize3 > 0) {
738 vq->vq_indirectoffset = allocsize1 + allocsize2;
739 vq->vq_indirect = (void*)(((char*)vq->vq_desc)
740 + vq->vq_indirectoffset);
741 }
742 vq->vq_bytesize = allocsize;
743 vq->vq_maxsegsize = maxsegsize;
744 vq->vq_maxnsegs = maxnsegs;
745
746 /* free slot management */
747 vq->vq_entries = kmem_zalloc(sizeof(struct vq_entry)*vq_size,
748 KM_SLEEP);
749 virtio_init_vq(sc, vq, false);
750
751 /* set the vq address */
752 sc->sc_ops->setup_queue(sc, index,
753 vq->vq_dmamap->dm_segs[0].ds_addr);
754
755 aprint_verbose_dev(sc->sc_dev,
756 "allocated %u byte for virtqueue %d for %s, "
757 "size %d\n", allocsize, index, name, vq_size);
758 if (allocsize3 > 0)
759 aprint_verbose_dev(sc->sc_dev,
760 "using %d byte (%d entries) "
761 "indirect descriptors\n",
762 allocsize3, maxnsegs * vq_size);
763
764 sc->sc_nvqs++;
765
766 return 0;
767
768 err:
769 sc->sc_ops->setup_queue(sc, index, 0);
770 if (vq->vq_dmamap)
771 bus_dmamap_destroy(sc->sc_dmat, vq->vq_dmamap);
772 if (vq->vq_vaddr)
773 bus_dmamem_unmap(sc->sc_dmat, vq->vq_vaddr, allocsize);
774 if (vq->vq_segs[0].ds_addr)
775 bus_dmamem_free(sc->sc_dmat, &vq->vq_segs[0], 1);
776 memset(vq, 0, sizeof(*vq));
777
778 return -1;
779 }
780
781 int
782 virtio_free_vq(struct virtio_softc *sc, struct virtqueue *vq)
783 {
784 struct vq_entry *qe;
785 int i = 0;
786
787 /* device must be already deactivated */
788 /* confirm the vq is empty */
789 SIMPLEQ_FOREACH(qe, &vq->vq_freelist, qe_list) {
790 i++;
791 }
792 if (i != vq->vq_num) {
793 printf("%s: freeing non-empty vq, index %d\n",
794 device_xname(sc->sc_dev), vq->vq_index);
795 return EBUSY;
796 }
797
798 /* tell device that there's no virtqueue any longer */
799 sc->sc_ops->setup_queue(sc, vq->vq_index, 0);
800
801 kmem_free(vq->vq_entries, sizeof(*vq->vq_entries) * vq->vq_num);
802 bus_dmamap_unload(sc->sc_dmat, vq->vq_dmamap);
803 bus_dmamap_destroy(sc->sc_dmat, vq->vq_dmamap);
804 bus_dmamem_unmap(sc->sc_dmat, vq->vq_vaddr, vq->vq_bytesize);
805 bus_dmamem_free(sc->sc_dmat, &vq->vq_segs[0], 1);
806 mutex_destroy(&vq->vq_freelist_lock);
807 mutex_destroy(&vq->vq_uring_lock);
808 mutex_destroy(&vq->vq_aring_lock);
809 memset(vq, 0, sizeof(*vq));
810
811 sc->sc_nvqs--;
812
813 return 0;
814 }
815
816 /*
817 * Free descriptor management.
818 */
819 static struct vq_entry *
820 vq_alloc_entry(struct virtqueue *vq)
821 {
822 struct vq_entry *qe;
823
824 mutex_enter(&vq->vq_freelist_lock);
825 if (SIMPLEQ_EMPTY(&vq->vq_freelist)) {
826 mutex_exit(&vq->vq_freelist_lock);
827 return NULL;
828 }
829 qe = SIMPLEQ_FIRST(&vq->vq_freelist);
830 SIMPLEQ_REMOVE_HEAD(&vq->vq_freelist, qe_list);
831 mutex_exit(&vq->vq_freelist_lock);
832
833 return qe;
834 }
835
836 static void
837 vq_free_entry(struct virtqueue *vq, struct vq_entry *qe)
838 {
839 mutex_enter(&vq->vq_freelist_lock);
840 SIMPLEQ_INSERT_TAIL(&vq->vq_freelist, qe, qe_list);
841 mutex_exit(&vq->vq_freelist_lock);
842
843 return;
844 }
845
846 /*
847 * Enqueue several dmamaps as a single request.
848 */
849 /*
850 * Typical usage:
851 * <queue size> number of followings are stored in arrays
852 * - command blocks (in dmamem) should be pre-allocated and mapped
853 * - dmamaps for command blocks should be pre-allocated and loaded
854 * - dmamaps for payload should be pre-allocated
855 * r = virtio_enqueue_prep(sc, vq, &slot); // allocate a slot
856 * if (r) // currently 0 or EAGAIN
857 * return r;
858 * r = bus_dmamap_load(dmat, dmamap_payload[slot], data, count, ..);
859 * if (r) {
860 * virtio_enqueue_abort(sc, vq, slot);
861 * return r;
862 * }
863 * r = virtio_enqueue_reserve(sc, vq, slot,
864 * dmamap_payload[slot]->dm_nsegs+1);
865 * // ^ +1 for command
866 * if (r) { // currently 0 or EAGAIN
867 * bus_dmamap_unload(dmat, dmamap_payload[slot]);
868 * return r; // do not call abort()
869 * }
870 * <setup and prepare commands>
871 * bus_dmamap_sync(dmat, dmamap_cmd[slot],... BUS_DMASYNC_PREWRITE);
872 * bus_dmamap_sync(dmat, dmamap_payload[slot],...);
873 * virtio_enqueue(sc, vq, slot, dmamap_cmd[slot], false);
874 * virtio_enqueue(sc, vq, slot, dmamap_payload[slot], iswrite);
875 * virtio_enqueue_commit(sc, vq, slot, true);
876 */
877
878 /*
879 * enqueue_prep: allocate a slot number
880 */
881 int
882 virtio_enqueue_prep(struct virtio_softc *sc, struct virtqueue *vq, int *slotp)
883 {
884 struct vq_entry *qe1;
885
886 KASSERT(slotp != NULL);
887
888 qe1 = vq_alloc_entry(vq);
889 if (qe1 == NULL)
890 return EAGAIN;
891 /* next slot is not allocated yet */
892 qe1->qe_next = -1;
893 *slotp = qe1->qe_index;
894
895 return 0;
896 }
897
898 /*
899 * enqueue_reserve: allocate remaining slots and build the descriptor chain.
900 */
901 int
902 virtio_enqueue_reserve(struct virtio_softc *sc, struct virtqueue *vq,
903 int slot, int nsegs)
904 {
905 int indirect;
906 struct vq_entry *qe1 = &vq->vq_entries[slot];
907
908 KASSERT(qe1->qe_next == -1);
909 KASSERT(1 <= nsegs && nsegs <= vq->vq_num);
910
911 if ((vq->vq_indirect != NULL) &&
912 (nsegs >= MINSEG_INDIRECT) &&
913 (nsegs <= vq->vq_maxnsegs))
914 indirect = 1;
915 else
916 indirect = 0;
917 qe1->qe_indirect = indirect;
918
919 if (indirect) {
920 struct vring_desc *vd;
921 uint64_t addr;
922 int i;
923
924 vd = &vq->vq_desc[qe1->qe_index];
925 addr = vq->vq_dmamap->dm_segs[0].ds_addr
926 + vq->vq_indirectoffset;
927 addr += sizeof(struct vring_desc)
928 * vq->vq_maxnsegs * qe1->qe_index;
929 vd->addr = virtio_rw64(sc, addr);
930 vd->len = virtio_rw32(sc, sizeof(struct vring_desc) * nsegs);
931 vd->flags = virtio_rw16(sc, VRING_DESC_F_INDIRECT);
932
933 vd = vq->vq_indirect;
934 vd += vq->vq_maxnsegs * qe1->qe_index;
935 qe1->qe_desc_base = vd;
936
937 for (i = 0; i < nsegs-1; i++) {
938 vd[i].flags = virtio_rw16(sc, VRING_DESC_F_NEXT);
939 }
940 vd[i].flags = virtio_rw16(sc, 0);
941 qe1->qe_next = 0;
942
943 return 0;
944 } else {
945 struct vring_desc *vd;
946 struct vq_entry *qe;
947 int i, s;
948
949 vd = &vq->vq_desc[0];
950 qe1->qe_desc_base = vd;
951 qe1->qe_next = qe1->qe_index;
952 s = slot;
953 for (i = 0; i < nsegs - 1; i++) {
954 qe = vq_alloc_entry(vq);
955 if (qe == NULL) {
956 vd[s].flags = virtio_rw16(sc, 0);
957 virtio_enqueue_abort(sc, vq, slot);
958 return EAGAIN;
959 }
960 vd[s].flags = virtio_rw16(sc, VRING_DESC_F_NEXT);
961 vd[s].next = virtio_rw16(sc, qe->qe_index);
962 s = qe->qe_index;
963 }
964 vd[s].flags = virtio_rw16(sc, 0);
965
966 return 0;
967 }
968 }
969
970 /*
971 * enqueue: enqueue a single dmamap.
972 */
973 int
974 virtio_enqueue(struct virtio_softc *sc, struct virtqueue *vq, int slot,
975 bus_dmamap_t dmamap, bool write)
976 {
977 struct vq_entry *qe1 = &vq->vq_entries[slot];
978 struct vring_desc *vd = qe1->qe_desc_base;
979 int i;
980 int s = qe1->qe_next;
981
982 KASSERT(s >= 0);
983 KASSERT(dmamap->dm_nsegs > 0);
984
985 for (i = 0; i < dmamap->dm_nsegs; i++) {
986 vd[s].addr = virtio_rw64(sc, dmamap->dm_segs[i].ds_addr);
987 vd[s].len = virtio_rw32(sc, dmamap->dm_segs[i].ds_len);
988 if (!write)
989 vd[s].flags |= virtio_rw16(sc, VRING_DESC_F_WRITE);
990 s = virtio_rw16(sc, vd[s].next);
991 }
992 qe1->qe_next = s;
993
994 return 0;
995 }
996
997 int
998 virtio_enqueue_p(struct virtio_softc *sc, struct virtqueue *vq, int slot,
999 bus_dmamap_t dmamap, bus_addr_t start, bus_size_t len,
1000 bool write)
1001 {
1002 struct vq_entry *qe1 = &vq->vq_entries[slot];
1003 struct vring_desc *vd = qe1->qe_desc_base;
1004 int s = qe1->qe_next;
1005
1006 KASSERT(s >= 0);
1007 KASSERT(dmamap->dm_nsegs == 1); /* XXX */
1008 KASSERT((dmamap->dm_segs[0].ds_len > start) &&
1009 (dmamap->dm_segs[0].ds_len >= start + len));
1010
1011 vd[s].addr = virtio_rw64(sc, dmamap->dm_segs[0].ds_addr + start);
1012 vd[s].len = virtio_rw32(sc, len);
1013 if (!write)
1014 vd[s].flags |= virtio_rw16(sc, VRING_DESC_F_WRITE);
1015 qe1->qe_next = virtio_rw16(sc, vd[s].next);
1016
1017 return 0;
1018 }
1019
1020 /*
1021 * enqueue_commit: add it to the aring.
1022 */
1023 int
1024 virtio_enqueue_commit(struct virtio_softc *sc, struct virtqueue *vq, int slot,
1025 bool notifynow)
1026 {
1027 struct vq_entry *qe1;
1028
1029 if (slot < 0) {
1030 mutex_enter(&vq->vq_aring_lock);
1031 goto notify;
1032 }
1033 vq_sync_descs(sc, vq, BUS_DMASYNC_PREWRITE);
1034 qe1 = &vq->vq_entries[slot];
1035 if (qe1->qe_indirect)
1036 vq_sync_indirect(sc, vq, slot, BUS_DMASYNC_PREWRITE);
1037 mutex_enter(&vq->vq_aring_lock);
1038 vq->vq_avail->ring[(vq->vq_avail_idx++) % vq->vq_num] =
1039 virtio_rw16(sc, slot);
1040
1041 notify:
1042 if (notifynow) {
1043 uint16_t o, n, t;
1044 uint16_t flags;
1045 o = virtio_rw16(sc, vq->vq_avail->idx);
1046 n = vq->vq_avail_idx;
1047
1048 /* publish avail idx */
1049 membar_producer();
1050 vq->vq_avail->idx = virtio_rw16(sc, vq->vq_avail_idx);
1051 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
1052 vq->vq_queued++;
1053
1054 membar_consumer();
1055 vq_sync_uring(sc, vq, BUS_DMASYNC_PREREAD);
1056 if (sc->sc_active_features & VIRTIO_F_RING_EVENT_IDX) {
1057 t = virtio_rw16(sc, *vq->vq_avail_event) + 1;
1058 if ((uint16_t) (n - t) < (uint16_t) (n - o))
1059 sc->sc_ops->kick(sc, vq->vq_index);
1060 } else {
1061 flags = virtio_rw16(sc, vq->vq_used->flags);
1062 if (!(flags & VRING_USED_F_NO_NOTIFY))
1063 sc->sc_ops->kick(sc, vq->vq_index);
1064 }
1065 vq_sync_uring(sc, vq, BUS_DMASYNC_POSTREAD);
1066 vq_sync_aring(sc, vq, BUS_DMASYNC_POSTWRITE);
1067 }
1068 mutex_exit(&vq->vq_aring_lock);
1069
1070 return 0;
1071 }
1072
1073 /*
1074 * enqueue_abort: rollback.
1075 */
1076 int
1077 virtio_enqueue_abort(struct virtio_softc *sc, struct virtqueue *vq, int slot)
1078 {
1079 struct vq_entry *qe = &vq->vq_entries[slot];
1080 struct vring_desc *vd;
1081 int s;
1082
1083 if (qe->qe_next < 0) {
1084 vq_free_entry(vq, qe);
1085 return 0;
1086 }
1087
1088 s = slot;
1089 vd = &vq->vq_desc[0];
1090 while (virtio_rw16(sc, vd[s].flags) & VRING_DESC_F_NEXT) {
1091 s = virtio_rw16(sc, vd[s].next);
1092 vq_free_entry(vq, qe);
1093 qe = &vq->vq_entries[s];
1094 }
1095 vq_free_entry(vq, qe);
1096 return 0;
1097 }
1098
1099 /*
1100 * Dequeue a request.
1101 */
1102 /*
1103 * dequeue: dequeue a request from uring; dmamap_sync for uring is
1104 * already done in the interrupt handler.
1105 */
1106 int
1107 virtio_dequeue(struct virtio_softc *sc, struct virtqueue *vq,
1108 int *slotp, int *lenp)
1109 {
1110 uint16_t slot, usedidx;
1111 struct vq_entry *qe;
1112
1113 if (vq->vq_used_idx == virtio_rw16(sc, vq->vq_used->idx))
1114 return ENOENT;
1115 mutex_enter(&vq->vq_uring_lock);
1116 usedidx = vq->vq_used_idx++;
1117 mutex_exit(&vq->vq_uring_lock);
1118 usedidx %= vq->vq_num;
1119 slot = virtio_rw32(sc, vq->vq_used->ring[usedidx].id);
1120 qe = &vq->vq_entries[slot];
1121
1122 if (qe->qe_indirect)
1123 vq_sync_indirect(sc, vq, slot, BUS_DMASYNC_POSTWRITE);
1124
1125 if (slotp)
1126 *slotp = slot;
1127 if (lenp)
1128 *lenp = virtio_rw32(sc, vq->vq_used->ring[usedidx].len);
1129
1130 return 0;
1131 }
1132
1133 /*
1134 * dequeue_commit: complete dequeue; the slot is recycled for future use.
1135 * if you forget to call this the slot will be leaked.
1136 */
1137 int
1138 virtio_dequeue_commit(struct virtio_softc *sc, struct virtqueue *vq, int slot)
1139 {
1140 struct vq_entry *qe = &vq->vq_entries[slot];
1141 struct vring_desc *vd = &vq->vq_desc[0];
1142 int s = slot;
1143
1144 while (virtio_rw16(sc, vd[s].flags) & VRING_DESC_F_NEXT) {
1145 s = virtio_rw16(sc, vd[s].next);
1146 vq_free_entry(vq, qe);
1147 qe = &vq->vq_entries[s];
1148 }
1149 vq_free_entry(vq, qe);
1150
1151 return 0;
1152 }
1153
1154 /*
1155 * Attach a child, fill all the members.
1156 */
1157 void
1158 virtio_child_attach_start(struct virtio_softc *sc, device_t child, int ipl,
1159 struct virtqueue *vqs,
1160 virtio_callback config_change,
1161 virtio_callback intr_hand,
1162 int req_flags, int req_features, const char *feat_bits)
1163 {
1164 char buf[1024];
1165
1166 sc->sc_child = child;
1167 sc->sc_ipl = ipl;
1168 sc->sc_vqs = vqs;
1169 sc->sc_config_change = config_change;
1170 sc->sc_intrhand = intr_hand;
1171 sc->sc_flags = req_flags;
1172
1173 virtio_negotiate_features(sc, req_features);
1174 snprintb(buf, sizeof(buf), feat_bits, sc->sc_active_features);
1175 aprint_normal(": features: %s\n", buf);
1176 aprint_naive("\n");
1177 }
1178
1179 void
1180 virtio_child_attach_set_vqs(struct virtio_softc *sc,
1181 struct virtqueue *vqs, int nvq_pairs)
1182 {
1183
1184 KASSERT(nvq_pairs == 1 ||
1185 (sc->sc_flags & VIRTIO_F_INTR_SOFTINT) == 0);
1186 if (nvq_pairs > 1)
1187 sc->sc_child_mq = true;
1188
1189 sc->sc_vqs = vqs;
1190 }
1191
1192 int
1193 virtio_child_attach_finish(struct virtio_softc *sc)
1194 {
1195 int r;
1196
1197 sc->sc_finished_called = true;
1198 r = sc->sc_ops->setup_interrupts(sc);
1199 if (r != 0) {
1200 aprint_error_dev(sc->sc_dev, "failed to setup interrupts\n");
1201 goto fail;
1202 }
1203
1204 KASSERT(sc->sc_soft_ih == NULL);
1205 if (sc->sc_flags & VIRTIO_F_INTR_SOFTINT) {
1206 u_int flags = SOFTINT_NET;
1207 if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE)
1208 flags |= SOFTINT_MPSAFE;
1209
1210 sc->sc_soft_ih = softint_establish(flags, virtio_soft_intr, sc);
1211 if (sc->sc_soft_ih == NULL) {
1212 sc->sc_ops->free_interrupts(sc);
1213 aprint_error_dev(sc->sc_dev,
1214 "failed to establish soft interrupt\n");
1215 goto fail;
1216 }
1217 }
1218
1219 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK);
1220 return 0;
1221
1222 fail:
1223 if (sc->sc_soft_ih) {
1224 softint_disestablish(sc->sc_soft_ih);
1225 sc->sc_soft_ih = NULL;
1226 }
1227
1228 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
1229 return 1;
1230 }
1231
1232 void
1233 virtio_child_detach(struct virtio_softc *sc)
1234 {
1235 sc->sc_child = NULL;
1236 sc->sc_vqs = NULL;
1237
1238 virtio_device_reset(sc);
1239
1240 sc->sc_ops->free_interrupts(sc);
1241
1242 if (sc->sc_soft_ih) {
1243 softint_disestablish(sc->sc_soft_ih);
1244 sc->sc_soft_ih = NULL;
1245 }
1246 }
1247
1248 void
1249 virtio_child_attach_failed(struct virtio_softc *sc)
1250 {
1251 virtio_child_detach(sc);
1252
1253 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
1254
1255 sc->sc_child = VIRTIO_CHILD_FAILED;
1256 }
1257
1258 bus_dma_tag_t
1259 virtio_dmat(struct virtio_softc *sc)
1260 {
1261 return sc->sc_dmat;
1262 }
1263
1264 device_t
1265 virtio_child(struct virtio_softc *sc)
1266 {
1267 return sc->sc_child;
1268 }
1269
1270 int
1271 virtio_intrhand(struct virtio_softc *sc)
1272 {
1273 return (sc->sc_intrhand)(sc);
1274 }
1275
1276 uint64_t
1277 virtio_features(struct virtio_softc *sc)
1278 {
1279 return sc->sc_active_features;
1280 }
1281
1282 int
1283 virtio_attach_failed(struct virtio_softc *sc)
1284 {
1285 device_t self = sc->sc_dev;
1286
1287 /* no error if its not connected, but its failed */
1288 if (sc->sc_childdevid == 0)
1289 return 1;
1290
1291 if (sc->sc_child == NULL) {
1292 aprint_error_dev(self,
1293 "no matching child driver; not configured\n");
1294 return 1;
1295 }
1296
1297 if (sc->sc_child == VIRTIO_CHILD_FAILED) {
1298 aprint_error_dev(self, "virtio configuration failed\n");
1299 return 1;
1300 }
1301
1302 /* sanity check */
1303 if (!sc->sc_finished_called) {
1304 aprint_error_dev(self, "virtio internal error, child driver "
1305 "signaled OK but didn't initialize interrupts\n");
1306 return 1;
1307 }
1308
1309 return 0;
1310 }
1311
1312 void
1313 virtio_print_device_type(device_t self, int id, int revision)
1314 {
1315 aprint_normal_dev(self, "%s device (rev. 0x%02x)\n",
1316 (id < NDEVNAMES ? virtio_device_name[id] : "Unknown"),
1317 revision);
1318 }
1319
1320
1321 MODULE(MODULE_CLASS_DRIVER, virtio, NULL);
1322
1323 #ifdef _MODULE
1324 #include "ioconf.c"
1325 #endif
1326
1327 static int
1328 virtio_modcmd(modcmd_t cmd, void *opaque)
1329 {
1330 int error = 0;
1331
1332 #ifdef _MODULE
1333 switch (cmd) {
1334 case MODULE_CMD_INIT:
1335 error = config_init_component(cfdriver_ioconf_virtio,
1336 cfattach_ioconf_virtio, cfdata_ioconf_virtio);
1337 break;
1338 case MODULE_CMD_FINI:
1339 error = config_fini_component(cfdriver_ioconf_virtio,
1340 cfattach_ioconf_virtio, cfdata_ioconf_virtio);
1341 break;
1342 default:
1343 error = ENOTTY;
1344 break;
1345 }
1346 #endif
1347
1348 return error;
1349 }
1350