virtio.c revision 1.47 1 /* $NetBSD: virtio.c,v 1.47 2021/02/05 20:45:38 reinoud Exp $ */
2
3 /*
4 * Copyright (c) 2020 The NetBSD Foundation, Inc.
5 * Copyright (c) 2012 Stefan Fritsch, Alexander Fiveg.
6 * Copyright (c) 2010 Minoura Makoto.
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: virtio.c,v 1.47 2021/02/05 20:45:38 reinoud Exp $");
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/kernel.h>
36 #include <sys/atomic.h>
37 #include <sys/bus.h>
38 #include <sys/device.h>
39 #include <sys/kmem.h>
40 #include <sys/module.h>
41
42 #define VIRTIO_PRIVATE
43
44 #include <dev/pci/virtioreg.h> /* XXX: move to non-pci */
45 #include <dev/pci/virtiovar.h> /* XXX: move to non-pci */
46
47 #define MINSEG_INDIRECT 2 /* use indirect if nsegs >= this value */
48
49 /* incomplete list */
50 static const char *virtio_device_name[] = {
51 "unknown (0)", /* 0 */
52 "network", /* 1 */
53 "block", /* 2 */
54 "console", /* 3 */
55 "entropy", /* 4 */
56 "memory balloon", /* 5 */
57 "I/O memory", /* 6 */
58 "remote processor messaging", /* 7 */
59 "SCSI", /* 8 */
60 "9P transport", /* 9 */
61 };
62 #define NDEVNAMES __arraycount(virtio_device_name)
63
64 static void virtio_init_vq(struct virtio_softc *,
65 struct virtqueue *, const bool);
66
67 void
68 virtio_set_status(struct virtio_softc *sc, int status)
69 {
70 sc->sc_ops->set_status(sc, status);
71 }
72
73 /*
74 * Reset the device.
75 */
76 /*
77 * To reset the device to a known state, do following:
78 * virtio_reset(sc); // this will stop the device activity
79 * <dequeue finished requests>; // virtio_dequeue() still can be called
80 * <revoke pending requests in the vqs if any>;
81 * virtio_reinit_start(sc); // dequeue prohibitted
82 * newfeatures = virtio_negotiate_features(sc, requestedfeatures);
83 * <some other initialization>;
84 * virtio_reinit_end(sc); // device activated; enqueue allowed
85 * Once attached, feature negotiation can only be allowed after virtio_reset.
86 */
87 void
88 virtio_reset(struct virtio_softc *sc)
89 {
90 virtio_device_reset(sc);
91 }
92
93 void
94 virtio_reinit_start(struct virtio_softc *sc)
95 {
96 int i;
97
98 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_ACK);
99 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER);
100 for (i = 0; i < sc->sc_nvqs; i++) {
101 int n;
102 struct virtqueue *vq = &sc->sc_vqs[i];
103 n = sc->sc_ops->read_queue_size(sc, vq->vq_index);
104 if (n == 0) /* vq disappeared */
105 continue;
106 if (n != vq->vq_num) {
107 panic("%s: virtqueue size changed, vq index %d\n",
108 device_xname(sc->sc_dev),
109 vq->vq_index);
110 }
111 virtio_init_vq(sc, vq, true);
112 sc->sc_ops->setup_queue(sc, vq->vq_index,
113 vq->vq_dmamap->dm_segs[0].ds_addr);
114 }
115 }
116
117 void
118 virtio_reinit_end(struct virtio_softc *sc)
119 {
120 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK);
121 }
122
123 /*
124 * Feature negotiation.
125 */
126 void
127 virtio_negotiate_features(struct virtio_softc *sc, uint64_t guest_features)
128 {
129 if (!(device_cfdata(sc->sc_dev)->cf_flags & 1) &&
130 !(device_cfdata(sc->sc_child)->cf_flags & 1)) /* XXX */
131 guest_features |= VIRTIO_F_RING_INDIRECT_DESC;
132 sc->sc_ops->neg_features(sc, guest_features);
133 if (sc->sc_active_features & VIRTIO_F_RING_INDIRECT_DESC)
134 sc->sc_indirect = true;
135 else
136 sc->sc_indirect = false;
137 }
138
139
140 /*
141 * Device configuration registers readers/writers
142 */
143 #if 0
144 #define DPRINTFR(n, fmt, val, index, num) \
145 printf("\n%s (", n); \
146 for (int i = 0; i < num; i++) \
147 printf("%02x ", bus_space_read_1(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index+i)); \
148 printf(") -> "); printf(fmt, val); printf("\n");
149 #define DPRINTFR2(n, fmt, val_s, val_n) \
150 printf("%s ", n); \
151 printf("\n stream "); printf(fmt, val_s); printf(" norm "); printf(fmt, val_n); printf("\n");
152 #else
153 #define DPRINTFR(n, fmt, val, index, num)
154 #define DPRINTFR2(n, fmt, val_s, val_n)
155 #endif
156
157
158 uint8_t
159 virtio_read_device_config_1(struct virtio_softc *sc, int index) {
160 bus_space_tag_t iot = sc->sc_devcfg_iot;
161 bus_space_handle_t ioh = sc->sc_devcfg_ioh;
162 uint8_t val;
163
164 val = bus_space_read_1(iot, ioh, index);
165
166 DPRINTFR("read_1", "%02x", val, index, 1);
167 return val;
168 }
169
170 uint16_t
171 virtio_read_device_config_2(struct virtio_softc *sc, int index) {
172 bus_space_tag_t iot = sc->sc_devcfg_iot;
173 bus_space_handle_t ioh = sc->sc_devcfg_ioh;
174 uint16_t val;
175
176 val = bus_space_read_2(iot, ioh, index);
177 if (BYTE_ORDER != sc->sc_bus_endian)
178 val = bswap16(val);
179
180 DPRINTFR("read_2", "%04x", val, index, 2);
181 DPRINTFR2("read_2", "%04x",
182 bus_space_read_stream_2(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index),
183 bus_space_read_2(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index));
184 return val;
185 }
186
187 uint32_t
188 virtio_read_device_config_4(struct virtio_softc *sc, int index) {
189 bus_space_tag_t iot = sc->sc_devcfg_iot;
190 bus_space_handle_t ioh = sc->sc_devcfg_ioh;
191 uint32_t val;
192
193 val = bus_space_read_4(iot, ioh, index);
194 if (BYTE_ORDER != sc->sc_bus_endian)
195 val = bswap32(val);
196
197 DPRINTFR("read_4", "%08x", val, index, 4);
198 DPRINTFR2("read_4", "%08x",
199 bus_space_read_stream_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index),
200 bus_space_read_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index));
201 return val;
202 }
203
204 /*
205 * The Virtio spec explicitly tells that reading and writing 8 bytes are not
206 * considered atomic and no triggers may be connected to reading or writing
207 * it. We access it using two 32 reads. See virtio spec 4.1.3.1.
208 */
209 uint64_t
210 virtio_read_device_config_8(struct virtio_softc *sc, int index) {
211 bus_space_tag_t iot = sc->sc_devcfg_iot;
212 bus_space_handle_t ioh = sc->sc_devcfg_ioh;
213 union {
214 uint64_t u64;
215 uint32_t l[2];
216 } v;
217 uint64_t val;
218
219 v.l[0] = bus_space_read_4(iot, ioh, index);
220 v.l[1] = bus_space_read_4(iot, ioh, index + 4);
221 if (sc->sc_bus_endian != sc->sc_struct_endian) {
222 v.l[0] = bswap32(v.l[0]);
223 v.l[1] = bswap32(v.l[1]);
224 }
225 val = v.u64;
226
227 if (BYTE_ORDER != sc->sc_struct_endian)
228 val = bswap64(val);
229
230 DPRINTFR("read_8", "%08lx", val, index, 8);
231 DPRINTFR2("read_8 low ", "%08x",
232 bus_space_read_stream_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index),
233 bus_space_read_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index));
234 DPRINTFR2("read_8 high ", "%08x",
235 bus_space_read_stream_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index + 4),
236 bus_space_read_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index + 4));
237 return val;
238 }
239
240 /*
241 * In the older virtio spec, device config registers are host endian. On newer
242 * they are little endian. Some newer devices however explicitly specify their
243 * register to always be little endian. These fuctions cater for these.
244 */
245 uint16_t
246 virtio_read_device_config_le_2(struct virtio_softc *sc, int index) {
247 bus_space_tag_t iot = sc->sc_devcfg_iot;
248 bus_space_handle_t ioh = sc->sc_devcfg_ioh;
249 uint16_t val;
250
251 val = bus_space_read_2(iot, ioh, index);
252 if (sc->sc_bus_endian != LITTLE_ENDIAN)
253 val = bswap16(val);
254
255 DPRINTFR("read_le_2", "%04x", val, index, 2);
256 DPRINTFR2("read_le_2", "%04x",
257 bus_space_read_stream_2(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, 0),
258 bus_space_read_2(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, 0));
259 return val;
260 }
261
262 uint32_t
263 virtio_read_device_config_le_4(struct virtio_softc *sc, int index) {
264 bus_space_tag_t iot = sc->sc_devcfg_iot;
265 bus_space_handle_t ioh = sc->sc_devcfg_ioh;
266 uint32_t val;
267
268 val = bus_space_read_4(iot, ioh, index);
269 if (sc->sc_bus_endian != LITTLE_ENDIAN)
270 val = bswap32(val);
271
272 DPRINTFR("read_le_4", "%08x", val, index, 4);
273 DPRINTFR2("read_le_4", "%08x",
274 bus_space_read_stream_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, 0),
275 bus_space_read_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, 0));
276 return val;
277 }
278
279 void
280 virtio_write_device_config_1(struct virtio_softc *sc, int index, uint8_t value)
281 {
282 bus_space_tag_t iot = sc->sc_devcfg_iot;
283 bus_space_handle_t ioh = sc->sc_devcfg_ioh;
284
285 bus_space_write_1(iot, ioh, index, value);
286 }
287
288 void
289 virtio_write_device_config_2(struct virtio_softc *sc, int index, uint16_t value)
290 {
291 bus_space_tag_t iot = sc->sc_devcfg_iot;
292 bus_space_handle_t ioh = sc->sc_devcfg_ioh;
293
294 if (BYTE_ORDER != sc->sc_bus_endian)
295 value = bswap16(value);
296 bus_space_write_2(iot, ioh, index, value);
297 }
298
299 void
300 virtio_write_device_config_4(struct virtio_softc *sc, int index, uint32_t value)
301 {
302 bus_space_tag_t iot = sc->sc_devcfg_iot;
303 bus_space_handle_t ioh = sc->sc_devcfg_ioh;
304
305 if (BYTE_ORDER != sc->sc_bus_endian)
306 value = bswap32(value);
307 bus_space_write_4(iot, ioh, index, value);
308 }
309
310 /*
311 * The Virtio spec explicitly tells that reading and writing 8 bytes are not
312 * considered atomic and no triggers may be connected to reading or writing
313 * it. We access it using two 32 bit writes. For good measure it is stated to
314 * always write lsb first just in case of a hypervisor bug. See See virtio
315 * spec 4.1.3.1.
316 */
317 void
318 virtio_write_device_config_8(struct virtio_softc *sc, int index, uint64_t value)
319 {
320 bus_space_tag_t iot = sc->sc_devcfg_iot;
321 bus_space_handle_t ioh = sc->sc_devcfg_ioh;
322 union {
323 uint64_t u64;
324 uint32_t l[2];
325 } v;
326
327 if (BYTE_ORDER != sc->sc_struct_endian)
328 value = bswap64(value);
329
330 v.u64 = value;
331 if (sc->sc_bus_endian != sc->sc_struct_endian) {
332 v.l[0] = bswap32(v.l[0]);
333 v.l[1] = bswap32(v.l[1]);
334 }
335
336 if (sc->sc_struct_endian == LITTLE_ENDIAN) {
337 bus_space_write_4(iot, ioh, index, v.l[0]);
338 bus_space_write_4(iot, ioh, index + 4, v.l[1]);
339 } else {
340 bus_space_write_4(iot, ioh, index + 4, v.l[1]);
341 bus_space_write_4(iot, ioh, index, v.l[0]);
342 }
343 }
344
345 /*
346 * In the older virtio spec, device config registers are host endian. On newer
347 * they are little endian. Some newer devices however explicitly specify their
348 * register to always be little endian. These fuctions cater for these.
349 */
350 void
351 virtio_write_device_config_le_2(struct virtio_softc *sc, int index, uint16_t value)
352 {
353 bus_space_tag_t iot = sc->sc_devcfg_iot;
354 bus_space_handle_t ioh = sc->sc_devcfg_ioh;
355
356 if (sc->sc_bus_endian != LITTLE_ENDIAN)
357 value = bswap16(value);
358 bus_space_write_2(iot, ioh, index, value);
359 }
360
361 void
362 virtio_write_device_config_le_4(struct virtio_softc *sc, int index, uint32_t value)
363 {
364 bus_space_tag_t iot = sc->sc_devcfg_iot;
365 bus_space_handle_t ioh = sc->sc_devcfg_ioh;
366
367 if (sc->sc_bus_endian != LITTLE_ENDIAN)
368 value = bswap32(value);
369 bus_space_write_4(iot, ioh, index, value);
370 }
371
372
373 /*
374 * data structures endian helpers
375 */
376 uint16_t virtio_rw16(struct virtio_softc *sc, uint16_t val)
377 {
378 KASSERT(sc);
379 return BYTE_ORDER != sc->sc_struct_endian ? bswap16(val) : val;
380 }
381
382 uint32_t virtio_rw32(struct virtio_softc *sc, uint32_t val)
383 {
384 KASSERT(sc);
385 return BYTE_ORDER != sc->sc_struct_endian ? bswap32(val) : val;
386 }
387
388 uint64_t virtio_rw64(struct virtio_softc *sc, uint64_t val)
389 {
390 KASSERT(sc);
391 return BYTE_ORDER != sc->sc_struct_endian ? bswap64(val) : val;
392 }
393
394
395 /*
396 * Interrupt handler.
397 */
398 static void
399 virtio_soft_intr(void *arg)
400 {
401 struct virtio_softc *sc = arg;
402
403 KASSERT(sc->sc_intrhand != NULL);
404
405 (sc->sc_intrhand)(sc);
406 }
407
408 /*
409 * dmamap sync operations for a virtqueue.
410 */
411 static inline void
412 vq_sync_descs(struct virtio_softc *sc, struct virtqueue *vq, int ops)
413 {
414 /* availoffset == sizeof(vring_desc)*vq_num */
415 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap, 0, vq->vq_availoffset,
416 ops);
417 }
418
419 static inline void
420 vq_sync_aring(struct virtio_softc *sc, struct virtqueue *vq, int ops)
421 {
422 uint16_t hdrlen = offsetof(struct vring_avail, ring);
423 if (sc->sc_active_features & VIRTIO_F_RING_EVENT_IDX)
424 hdrlen += sizeof(uint16_t);
425
426 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap,
427 vq->vq_availoffset,
428 hdrlen + sc->sc_nvqs * sizeof(uint16_t),
429 ops);
430 }
431
432 static inline void
433 vq_sync_uring(struct virtio_softc *sc, struct virtqueue *vq, int ops)
434 {
435 uint16_t hdrlen = offsetof(struct vring_used, ring);
436 if (sc->sc_active_features & VIRTIO_F_RING_EVENT_IDX)
437 hdrlen += sizeof(uint16_t);
438
439 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap,
440 vq->vq_usedoffset,
441 hdrlen + sc->sc_nvqs * sizeof(struct vring_used_elem),
442 ops);
443 }
444
445 static inline void
446 vq_sync_indirect(struct virtio_softc *sc, struct virtqueue *vq, int slot,
447 int ops)
448 {
449 int offset = vq->vq_indirectoffset
450 + sizeof(struct vring_desc) * vq->vq_maxnsegs * slot;
451
452 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap,
453 offset, sizeof(struct vring_desc) * vq->vq_maxnsegs,
454 ops);
455 }
456
457 /*
458 * Can be used as sc_intrhand.
459 */
460 /*
461 * Scan vq, bus_dmamap_sync for the vqs (not for the payload),
462 * and calls (*vq_done)() if some entries are consumed.
463 */
464 bool
465 virtio_vq_is_enqueued(struct virtio_softc *sc, struct virtqueue *vq)
466 {
467
468 if (vq->vq_queued) {
469 vq->vq_queued = 0;
470 vq_sync_aring(sc, vq, BUS_DMASYNC_POSTWRITE);
471 }
472 vq_sync_uring(sc, vq, BUS_DMASYNC_POSTREAD);
473 membar_consumer();
474
475 return (vq->vq_used_idx != virtio_rw16(sc, vq->vq_used->idx)) ? 1 : 0;
476 }
477
478 int
479 virtio_vq_intr(struct virtio_softc *sc)
480 {
481 struct virtqueue *vq;
482 int i, r = 0;
483
484 for (i = 0; i < sc->sc_nvqs; i++) {
485 vq = &sc->sc_vqs[i];
486 if (virtio_vq_is_enqueued(sc, vq) == 1) {
487 if (vq->vq_done)
488 r |= (vq->vq_done)(vq);
489 }
490 }
491
492 return r;
493 }
494
495 int
496 virtio_vq_intrhand(struct virtio_softc *sc)
497 {
498 struct virtqueue *vq;
499 int i, r = 0;
500
501 for (i = 0; i < sc->sc_nvqs; i++) {
502 vq = &sc->sc_vqs[i];
503 r |= (vq->vq_intrhand)(vq->vq_intrhand_arg);
504 }
505
506 return r;
507 }
508
509
510 /*
511 * Increase the event index in order to delay interrupts.
512 */
513 int
514 virtio_postpone_intr(struct virtio_softc *sc, struct virtqueue *vq,
515 uint16_t nslots)
516 {
517 uint16_t idx, nused;
518
519 idx = vq->vq_used_idx + nslots;
520
521 /* set the new event index: avail_ring->used_event = idx */
522 *vq->vq_used_event = virtio_rw16(sc, idx);
523 membar_producer();
524
525 vq_sync_aring(vq->vq_owner, vq, BUS_DMASYNC_PREWRITE);
526 vq->vq_queued++;
527
528 nused = (uint16_t)
529 (virtio_rw16(sc, vq->vq_used->idx) - vq->vq_used_idx);
530 KASSERT(nused <= vq->vq_num);
531
532 return nslots < nused;
533 }
534
535 /*
536 * Postpone interrupt until 3/4 of the available descriptors have been
537 * consumed.
538 */
539 int
540 virtio_postpone_intr_smart(struct virtio_softc *sc, struct virtqueue *vq)
541 {
542 uint16_t nslots;
543
544 nslots = (uint16_t)
545 (virtio_rw16(sc, vq->vq_avail->idx) - vq->vq_used_idx) * 3 / 4;
546
547 return virtio_postpone_intr(sc, vq, nslots);
548 }
549
550 /*
551 * Postpone interrupt until all of the available descriptors have been
552 * consumed.
553 */
554 int
555 virtio_postpone_intr_far(struct virtio_softc *sc, struct virtqueue *vq)
556 {
557 uint16_t nslots;
558
559 nslots = (uint16_t)
560 (virtio_rw16(sc, vq->vq_avail->idx) - vq->vq_used_idx);
561
562 return virtio_postpone_intr(sc, vq, nslots);
563 }
564
565 /*
566 * Start/stop vq interrupt. No guarantee.
567 */
568 void
569 virtio_stop_vq_intr(struct virtio_softc *sc, struct virtqueue *vq)
570 {
571 if (sc->sc_active_features & VIRTIO_F_RING_EVENT_IDX) {
572 /*
573 * No way to disable the interrupt completely with
574 * RingEventIdx. Instead advance used_event by half the
575 * possible value. This won't happen soon and is far enough in
576 * the past to not trigger a spurios interrupt.
577 */
578 *vq->vq_used_event = virtio_rw16(sc, vq->vq_used_idx + 0x8000);
579 } else {
580 vq->vq_avail->flags |= virtio_rw16(sc, VRING_AVAIL_F_NO_INTERRUPT);
581 }
582 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
583 vq->vq_queued++;
584 }
585
586 int
587 virtio_start_vq_intr(struct virtio_softc *sc, struct virtqueue *vq)
588 {
589 if (sc->sc_active_features & VIRTIO_F_RING_EVENT_IDX) {
590 /*
591 * If event index feature is negotiated, enabling interrupts
592 * is done through setting the latest consumed index in the
593 * used_event field
594 */
595 *vq->vq_used_event = virtio_rw16(sc, vq->vq_used_idx);
596 } else {
597 vq->vq_avail->flags &= ~virtio_rw16(sc, VRING_AVAIL_F_NO_INTERRUPT);
598 }
599 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
600 vq->vq_queued++;
601
602 return vq->vq_used_idx != virtio_rw16(sc, vq->vq_used->idx);
603 }
604
605 /*
606 * Initialize vq structure.
607 */
608 static void
609 virtio_init_vq(struct virtio_softc *sc, struct virtqueue *vq,
610 const bool reinit)
611 {
612 int i, j;
613 int vq_size = vq->vq_num;
614
615 memset(vq->vq_vaddr, 0, vq->vq_bytesize);
616
617 /* build the indirect descriptor chain */
618 if (vq->vq_indirect != NULL) {
619 struct vring_desc *vd;
620
621 for (i = 0; i < vq_size; i++) {
622 vd = vq->vq_indirect;
623 vd += vq->vq_maxnsegs * i;
624 for (j = 0; j < vq->vq_maxnsegs-1; j++) {
625 vd[j].next = virtio_rw16(sc, j + 1);
626 }
627 }
628 }
629
630 /* free slot management */
631 SIMPLEQ_INIT(&vq->vq_freelist);
632 for (i = 0; i < vq_size; i++) {
633 SIMPLEQ_INSERT_TAIL(&vq->vq_freelist,
634 &vq->vq_entries[i], qe_list);
635 vq->vq_entries[i].qe_index = i;
636 }
637 if (!reinit)
638 mutex_init(&vq->vq_freelist_lock, MUTEX_SPIN, sc->sc_ipl);
639
640 /* enqueue/dequeue status */
641 vq->vq_avail_idx = 0;
642 vq->vq_used_idx = 0;
643 vq->vq_queued = 0;
644 if (!reinit) {
645 mutex_init(&vq->vq_aring_lock, MUTEX_SPIN, sc->sc_ipl);
646 mutex_init(&vq->vq_uring_lock, MUTEX_SPIN, sc->sc_ipl);
647 }
648 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
649 vq_sync_uring(sc, vq, BUS_DMASYNC_PREREAD);
650 vq->vq_queued++;
651 }
652
653 /*
654 * Allocate/free a vq.
655 */
656 int
657 virtio_alloc_vq(struct virtio_softc *sc, struct virtqueue *vq, int index,
658 int maxsegsize, int maxnsegs, const char *name)
659 {
660 int vq_size, allocsize1, allocsize2, allocsize3, allocsize = 0;
661 int rsegs, r, hdrlen;
662 #define VIRTQUEUE_ALIGN(n) (((n)+(VIRTIO_PAGE_SIZE-1))& \
663 ~(VIRTIO_PAGE_SIZE-1))
664
665 /* Make sure callers allocate vqs in order */
666 KASSERT(sc->sc_nvqs == index);
667
668 memset(vq, 0, sizeof(*vq));
669
670 vq_size = sc->sc_ops->read_queue_size(sc, index);
671 if (vq_size == 0) {
672 aprint_error_dev(sc->sc_dev,
673 "virtqueue not exist, index %d for %s\n",
674 index, name);
675 goto err;
676 }
677
678 hdrlen = sc->sc_active_features & VIRTIO_F_RING_EVENT_IDX ? 3 : 2;
679
680 /* allocsize1: descriptor table + avail ring + pad */
681 allocsize1 = VIRTQUEUE_ALIGN(sizeof(struct vring_desc)*vq_size
682 + sizeof(uint16_t)*(hdrlen + vq_size));
683 /* allocsize2: used ring + pad */
684 allocsize2 = VIRTQUEUE_ALIGN(sizeof(uint16_t) * hdrlen
685 + sizeof(struct vring_used_elem)*vq_size);
686 /* allocsize3: indirect table */
687 if (sc->sc_indirect && maxnsegs >= MINSEG_INDIRECT)
688 allocsize3 = sizeof(struct vring_desc) * maxnsegs * vq_size;
689 else
690 allocsize3 = 0;
691 allocsize = allocsize1 + allocsize2 + allocsize3;
692
693 /* alloc and map the memory */
694 r = bus_dmamem_alloc(sc->sc_dmat, allocsize, VIRTIO_PAGE_SIZE, 0,
695 &vq->vq_segs[0], 1, &rsegs, BUS_DMA_NOWAIT);
696 if (r != 0) {
697 aprint_error_dev(sc->sc_dev,
698 "virtqueue %d for %s allocation failed, "
699 "error code %d\n", index, name, r);
700 goto err;
701 }
702 r = bus_dmamem_map(sc->sc_dmat, &vq->vq_segs[0], rsegs, allocsize,
703 &vq->vq_vaddr, BUS_DMA_NOWAIT);
704 if (r != 0) {
705 aprint_error_dev(sc->sc_dev,
706 "virtqueue %d for %s map failed, "
707 "error code %d\n", index, name, r);
708 goto err;
709 }
710 r = bus_dmamap_create(sc->sc_dmat, allocsize, 1, allocsize, 0,
711 BUS_DMA_NOWAIT, &vq->vq_dmamap);
712 if (r != 0) {
713 aprint_error_dev(sc->sc_dev,
714 "virtqueue %d for %s dmamap creation failed, "
715 "error code %d\n", index, name, r);
716 goto err;
717 }
718 r = bus_dmamap_load(sc->sc_dmat, vq->vq_dmamap,
719 vq->vq_vaddr, allocsize, NULL, BUS_DMA_NOWAIT);
720 if (r != 0) {
721 aprint_error_dev(sc->sc_dev,
722 "virtqueue %d for %s dmamap load failed, "
723 "error code %d\n", index, name, r);
724 goto err;
725 }
726
727 /* remember addresses and offsets for later use */
728 vq->vq_owner = sc;
729 vq->vq_num = vq_size;
730 vq->vq_index = index;
731 vq->vq_desc = vq->vq_vaddr;
732 vq->vq_availoffset = sizeof(struct vring_desc)*vq_size;
733 vq->vq_avail = (void*)(((char*)vq->vq_desc) + vq->vq_availoffset);
734 vq->vq_used_event = (uint16_t *) ((char *)vq->vq_avail +
735 offsetof(struct vring_avail, ring[vq->vq_num]));
736 vq->vq_usedoffset = allocsize1;
737 vq->vq_used = (void*)(((char*)vq->vq_desc) + vq->vq_usedoffset);
738 vq->vq_avail_event = (uint16_t *)((char *)vq->vq_used +
739 offsetof(struct vring_used, ring[vq->vq_num]));
740
741 if (allocsize3 > 0) {
742 vq->vq_indirectoffset = allocsize1 + allocsize2;
743 vq->vq_indirect = (void*)(((char*)vq->vq_desc)
744 + vq->vq_indirectoffset);
745 }
746 vq->vq_bytesize = allocsize;
747 vq->vq_maxsegsize = maxsegsize;
748 vq->vq_maxnsegs = maxnsegs;
749
750 /* free slot management */
751 vq->vq_entries = kmem_zalloc(sizeof(struct vq_entry)*vq_size,
752 KM_SLEEP);
753 virtio_init_vq(sc, vq, false);
754
755 /* set the vq address */
756 sc->sc_ops->setup_queue(sc, index,
757 vq->vq_dmamap->dm_segs[0].ds_addr);
758
759 aprint_verbose_dev(sc->sc_dev,
760 "allocated %u byte for virtqueue %d for %s, "
761 "size %d\n", allocsize, index, name, vq_size);
762 if (allocsize3 > 0)
763 aprint_verbose_dev(sc->sc_dev,
764 "using %d byte (%d entries) "
765 "indirect descriptors\n",
766 allocsize3, maxnsegs * vq_size);
767
768 sc->sc_nvqs++;
769
770 return 0;
771
772 err:
773 sc->sc_ops->setup_queue(sc, index, 0);
774 if (vq->vq_dmamap)
775 bus_dmamap_destroy(sc->sc_dmat, vq->vq_dmamap);
776 if (vq->vq_vaddr)
777 bus_dmamem_unmap(sc->sc_dmat, vq->vq_vaddr, allocsize);
778 if (vq->vq_segs[0].ds_addr)
779 bus_dmamem_free(sc->sc_dmat, &vq->vq_segs[0], 1);
780 memset(vq, 0, sizeof(*vq));
781
782 return -1;
783 }
784
785 int
786 virtio_free_vq(struct virtio_softc *sc, struct virtqueue *vq)
787 {
788 struct vq_entry *qe;
789 int i = 0;
790
791 /* device must be already deactivated */
792 /* confirm the vq is empty */
793 SIMPLEQ_FOREACH(qe, &vq->vq_freelist, qe_list) {
794 i++;
795 }
796 if (i != vq->vq_num) {
797 printf("%s: freeing non-empty vq, index %d\n",
798 device_xname(sc->sc_dev), vq->vq_index);
799 return EBUSY;
800 }
801
802 /* tell device that there's no virtqueue any longer */
803 sc->sc_ops->setup_queue(sc, vq->vq_index, 0);
804
805 kmem_free(vq->vq_entries, sizeof(*vq->vq_entries) * vq->vq_num);
806 bus_dmamap_unload(sc->sc_dmat, vq->vq_dmamap);
807 bus_dmamap_destroy(sc->sc_dmat, vq->vq_dmamap);
808 bus_dmamem_unmap(sc->sc_dmat, vq->vq_vaddr, vq->vq_bytesize);
809 bus_dmamem_free(sc->sc_dmat, &vq->vq_segs[0], 1);
810 mutex_destroy(&vq->vq_freelist_lock);
811 mutex_destroy(&vq->vq_uring_lock);
812 mutex_destroy(&vq->vq_aring_lock);
813 memset(vq, 0, sizeof(*vq));
814
815 sc->sc_nvqs--;
816
817 return 0;
818 }
819
820 /*
821 * Free descriptor management.
822 */
823 static struct vq_entry *
824 vq_alloc_entry(struct virtqueue *vq)
825 {
826 struct vq_entry *qe;
827
828 mutex_enter(&vq->vq_freelist_lock);
829 if (SIMPLEQ_EMPTY(&vq->vq_freelist)) {
830 mutex_exit(&vq->vq_freelist_lock);
831 return NULL;
832 }
833 qe = SIMPLEQ_FIRST(&vq->vq_freelist);
834 SIMPLEQ_REMOVE_HEAD(&vq->vq_freelist, qe_list);
835 mutex_exit(&vq->vq_freelist_lock);
836
837 return qe;
838 }
839
840 static void
841 vq_free_entry(struct virtqueue *vq, struct vq_entry *qe)
842 {
843 mutex_enter(&vq->vq_freelist_lock);
844 SIMPLEQ_INSERT_TAIL(&vq->vq_freelist, qe, qe_list);
845 mutex_exit(&vq->vq_freelist_lock);
846
847 return;
848 }
849
850 /*
851 * Enqueue several dmamaps as a single request.
852 */
853 /*
854 * Typical usage:
855 * <queue size> number of followings are stored in arrays
856 * - command blocks (in dmamem) should be pre-allocated and mapped
857 * - dmamaps for command blocks should be pre-allocated and loaded
858 * - dmamaps for payload should be pre-allocated
859 * r = virtio_enqueue_prep(sc, vq, &slot); // allocate a slot
860 * if (r) // currently 0 or EAGAIN
861 * return r;
862 * r = bus_dmamap_load(dmat, dmamap_payload[slot], data, count, ..);
863 * if (r) {
864 * virtio_enqueue_abort(sc, vq, slot);
865 * return r;
866 * }
867 * r = virtio_enqueue_reserve(sc, vq, slot,
868 * dmamap_payload[slot]->dm_nsegs+1);
869 * // ^ +1 for command
870 * if (r) { // currently 0 or EAGAIN
871 * bus_dmamap_unload(dmat, dmamap_payload[slot]);
872 * return r; // do not call abort()
873 * }
874 * <setup and prepare commands>
875 * bus_dmamap_sync(dmat, dmamap_cmd[slot],... BUS_DMASYNC_PREWRITE);
876 * bus_dmamap_sync(dmat, dmamap_payload[slot],...);
877 * virtio_enqueue(sc, vq, slot, dmamap_cmd[slot], false);
878 * virtio_enqueue(sc, vq, slot, dmamap_payload[slot], iswrite);
879 * virtio_enqueue_commit(sc, vq, slot, true);
880 */
881
882 /*
883 * enqueue_prep: allocate a slot number
884 */
885 int
886 virtio_enqueue_prep(struct virtio_softc *sc, struct virtqueue *vq, int *slotp)
887 {
888 struct vq_entry *qe1;
889
890 KASSERT(slotp != NULL);
891
892 qe1 = vq_alloc_entry(vq);
893 if (qe1 == NULL)
894 return EAGAIN;
895 /* next slot is not allocated yet */
896 qe1->qe_next = -1;
897 *slotp = qe1->qe_index;
898
899 return 0;
900 }
901
902 /*
903 * enqueue_reserve: allocate remaining slots and build the descriptor chain.
904 */
905 int
906 virtio_enqueue_reserve(struct virtio_softc *sc, struct virtqueue *vq,
907 int slot, int nsegs)
908 {
909 int indirect;
910 struct vq_entry *qe1 = &vq->vq_entries[slot];
911
912 KASSERT(qe1->qe_next == -1);
913 KASSERT(1 <= nsegs && nsegs <= vq->vq_num);
914
915 if ((vq->vq_indirect != NULL) &&
916 (nsegs >= MINSEG_INDIRECT) &&
917 (nsegs <= vq->vq_maxnsegs))
918 indirect = 1;
919 else
920 indirect = 0;
921 qe1->qe_indirect = indirect;
922
923 if (indirect) {
924 struct vring_desc *vd;
925 uint64_t addr;
926 int i;
927
928 vd = &vq->vq_desc[qe1->qe_index];
929 addr = vq->vq_dmamap->dm_segs[0].ds_addr
930 + vq->vq_indirectoffset;
931 addr += sizeof(struct vring_desc)
932 * vq->vq_maxnsegs * qe1->qe_index;
933 vd->addr = virtio_rw64(sc, addr);
934 vd->len = virtio_rw32(sc, sizeof(struct vring_desc) * nsegs);
935 vd->flags = virtio_rw16(sc, VRING_DESC_F_INDIRECT);
936
937 vd = vq->vq_indirect;
938 vd += vq->vq_maxnsegs * qe1->qe_index;
939 qe1->qe_desc_base = vd;
940
941 for (i = 0; i < nsegs-1; i++) {
942 vd[i].flags = virtio_rw16(sc, VRING_DESC_F_NEXT);
943 }
944 vd[i].flags = virtio_rw16(sc, 0);
945 qe1->qe_next = 0;
946
947 return 0;
948 } else {
949 struct vring_desc *vd;
950 struct vq_entry *qe;
951 int i, s;
952
953 vd = &vq->vq_desc[0];
954 qe1->qe_desc_base = vd;
955 qe1->qe_next = qe1->qe_index;
956 s = slot;
957 for (i = 0; i < nsegs - 1; i++) {
958 qe = vq_alloc_entry(vq);
959 if (qe == NULL) {
960 vd[s].flags = virtio_rw16(sc, 0);
961 virtio_enqueue_abort(sc, vq, slot);
962 return EAGAIN;
963 }
964 vd[s].flags = virtio_rw16(sc, VRING_DESC_F_NEXT);
965 vd[s].next = virtio_rw16(sc, qe->qe_index);
966 s = qe->qe_index;
967 }
968 vd[s].flags = virtio_rw16(sc, 0);
969
970 return 0;
971 }
972 }
973
974 /*
975 * enqueue: enqueue a single dmamap.
976 */
977 int
978 virtio_enqueue(struct virtio_softc *sc, struct virtqueue *vq, int slot,
979 bus_dmamap_t dmamap, bool write)
980 {
981 struct vq_entry *qe1 = &vq->vq_entries[slot];
982 struct vring_desc *vd = qe1->qe_desc_base;
983 int i;
984 int s = qe1->qe_next;
985
986 KASSERT(s >= 0);
987 KASSERT(dmamap->dm_nsegs > 0);
988
989 for (i = 0; i < dmamap->dm_nsegs; i++) {
990 vd[s].addr = virtio_rw64(sc, dmamap->dm_segs[i].ds_addr);
991 vd[s].len = virtio_rw32(sc, dmamap->dm_segs[i].ds_len);
992 if (!write)
993 vd[s].flags |= virtio_rw16(sc, VRING_DESC_F_WRITE);
994 s = virtio_rw16(sc, vd[s].next);
995 }
996 qe1->qe_next = s;
997
998 return 0;
999 }
1000
1001 int
1002 virtio_enqueue_p(struct virtio_softc *sc, struct virtqueue *vq, int slot,
1003 bus_dmamap_t dmamap, bus_addr_t start, bus_size_t len,
1004 bool write)
1005 {
1006 struct vq_entry *qe1 = &vq->vq_entries[slot];
1007 struct vring_desc *vd = qe1->qe_desc_base;
1008 int s = qe1->qe_next;
1009
1010 KASSERT(s >= 0);
1011 KASSERT(dmamap->dm_nsegs == 1); /* XXX */
1012 KASSERT((dmamap->dm_segs[0].ds_len > start) &&
1013 (dmamap->dm_segs[0].ds_len >= start + len));
1014
1015 vd[s].addr = virtio_rw64(sc, dmamap->dm_segs[0].ds_addr + start);
1016 vd[s].len = virtio_rw32(sc, len);
1017 if (!write)
1018 vd[s].flags |= virtio_rw16(sc, VRING_DESC_F_WRITE);
1019 qe1->qe_next = virtio_rw16(sc, vd[s].next);
1020
1021 return 0;
1022 }
1023
1024 /*
1025 * enqueue_commit: add it to the aring.
1026 */
1027 int
1028 virtio_enqueue_commit(struct virtio_softc *sc, struct virtqueue *vq, int slot,
1029 bool notifynow)
1030 {
1031 struct vq_entry *qe1;
1032
1033 if (slot < 0) {
1034 mutex_enter(&vq->vq_aring_lock);
1035 goto notify;
1036 }
1037 vq_sync_descs(sc, vq, BUS_DMASYNC_PREWRITE);
1038 qe1 = &vq->vq_entries[slot];
1039 if (qe1->qe_indirect)
1040 vq_sync_indirect(sc, vq, slot, BUS_DMASYNC_PREWRITE);
1041 mutex_enter(&vq->vq_aring_lock);
1042 vq->vq_avail->ring[(vq->vq_avail_idx++) % vq->vq_num] =
1043 virtio_rw16(sc, slot);
1044
1045 notify:
1046 if (notifynow) {
1047 uint16_t o, n, t;
1048 uint16_t flags;
1049 o = virtio_rw16(sc, vq->vq_avail->idx);
1050 n = vq->vq_avail_idx;
1051
1052 /* publish avail idx */
1053 membar_producer();
1054 vq->vq_avail->idx = virtio_rw16(sc, vq->vq_avail_idx);
1055 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
1056 vq->vq_queued++;
1057
1058 membar_consumer();
1059 vq_sync_uring(sc, vq, BUS_DMASYNC_PREREAD);
1060 if (sc->sc_active_features & VIRTIO_F_RING_EVENT_IDX) {
1061 t = virtio_rw16(sc, *vq->vq_avail_event) + 1;
1062 if ((uint16_t) (n - t) < (uint16_t) (n - o))
1063 sc->sc_ops->kick(sc, vq->vq_index);
1064 } else {
1065 flags = virtio_rw16(sc, vq->vq_used->flags);
1066 if (!(flags & VRING_USED_F_NO_NOTIFY))
1067 sc->sc_ops->kick(sc, vq->vq_index);
1068 }
1069 vq_sync_uring(sc, vq, BUS_DMASYNC_POSTREAD);
1070 vq_sync_aring(sc, vq, BUS_DMASYNC_POSTWRITE);
1071 }
1072 mutex_exit(&vq->vq_aring_lock);
1073
1074 return 0;
1075 }
1076
1077 /*
1078 * enqueue_abort: rollback.
1079 */
1080 int
1081 virtio_enqueue_abort(struct virtio_softc *sc, struct virtqueue *vq, int slot)
1082 {
1083 struct vq_entry *qe = &vq->vq_entries[slot];
1084 struct vring_desc *vd;
1085 int s;
1086
1087 if (qe->qe_next < 0) {
1088 vq_free_entry(vq, qe);
1089 return 0;
1090 }
1091
1092 s = slot;
1093 vd = &vq->vq_desc[0];
1094 while (virtio_rw16(sc, vd[s].flags) & VRING_DESC_F_NEXT) {
1095 s = virtio_rw16(sc, vd[s].next);
1096 vq_free_entry(vq, qe);
1097 qe = &vq->vq_entries[s];
1098 }
1099 vq_free_entry(vq, qe);
1100 return 0;
1101 }
1102
1103 /*
1104 * Dequeue a request.
1105 */
1106 /*
1107 * dequeue: dequeue a request from uring; dmamap_sync for uring is
1108 * already done in the interrupt handler.
1109 */
1110 int
1111 virtio_dequeue(struct virtio_softc *sc, struct virtqueue *vq,
1112 int *slotp, int *lenp)
1113 {
1114 uint16_t slot, usedidx;
1115 struct vq_entry *qe;
1116
1117 if (vq->vq_used_idx == virtio_rw16(sc, vq->vq_used->idx))
1118 return ENOENT;
1119 mutex_enter(&vq->vq_uring_lock);
1120 usedidx = vq->vq_used_idx++;
1121 mutex_exit(&vq->vq_uring_lock);
1122 usedidx %= vq->vq_num;
1123 slot = virtio_rw32(sc, vq->vq_used->ring[usedidx].id);
1124 qe = &vq->vq_entries[slot];
1125
1126 if (qe->qe_indirect)
1127 vq_sync_indirect(sc, vq, slot, BUS_DMASYNC_POSTWRITE);
1128
1129 if (slotp)
1130 *slotp = slot;
1131 if (lenp)
1132 *lenp = virtio_rw32(sc, vq->vq_used->ring[usedidx].len);
1133
1134 return 0;
1135 }
1136
1137 /*
1138 * dequeue_commit: complete dequeue; the slot is recycled for future use.
1139 * if you forget to call this the slot will be leaked.
1140 */
1141 int
1142 virtio_dequeue_commit(struct virtio_softc *sc, struct virtqueue *vq, int slot)
1143 {
1144 struct vq_entry *qe = &vq->vq_entries[slot];
1145 struct vring_desc *vd = &vq->vq_desc[0];
1146 int s = slot;
1147
1148 while (virtio_rw16(sc, vd[s].flags) & VRING_DESC_F_NEXT) {
1149 s = virtio_rw16(sc, vd[s].next);
1150 vq_free_entry(vq, qe);
1151 qe = &vq->vq_entries[s];
1152 }
1153 vq_free_entry(vq, qe);
1154
1155 return 0;
1156 }
1157
1158 /*
1159 * Attach a child, fill all the members.
1160 */
1161 void
1162 virtio_child_attach_start(struct virtio_softc *sc, device_t child, int ipl,
1163 struct virtqueue *vqs,
1164 virtio_callback config_change,
1165 virtio_callback intr_hand,
1166 int req_flags, int req_features, const char *feat_bits)
1167 {
1168 char buf[1024];
1169
1170 sc->sc_child = child;
1171 sc->sc_ipl = ipl;
1172 sc->sc_vqs = vqs;
1173 sc->sc_config_change = config_change;
1174 sc->sc_intrhand = intr_hand;
1175 sc->sc_flags = req_flags;
1176
1177 virtio_negotiate_features(sc, req_features);
1178 snprintb(buf, sizeof(buf), feat_bits, sc->sc_active_features);
1179 aprint_normal(": features: %s\n", buf);
1180 aprint_naive("\n");
1181 }
1182
1183 void
1184 virtio_child_attach_set_vqs(struct virtio_softc *sc,
1185 struct virtqueue *vqs, int nvq_pairs)
1186 {
1187
1188 KASSERT(nvq_pairs == 1 ||
1189 (sc->sc_flags & VIRTIO_F_INTR_SOFTINT) == 0);
1190 if (nvq_pairs > 1)
1191 sc->sc_child_mq = true;
1192
1193 sc->sc_vqs = vqs;
1194 }
1195
1196 int
1197 virtio_child_attach_finish(struct virtio_softc *sc)
1198 {
1199 int r;
1200
1201 sc->sc_finished_called = true;
1202 r = sc->sc_ops->setup_interrupts(sc);
1203 if (r != 0) {
1204 aprint_error_dev(sc->sc_dev, "failed to setup interrupts\n");
1205 goto fail;
1206 }
1207
1208 KASSERT(sc->sc_soft_ih == NULL);
1209 if (sc->sc_flags & VIRTIO_F_INTR_SOFTINT) {
1210 u_int flags = SOFTINT_NET;
1211 if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE)
1212 flags |= SOFTINT_MPSAFE;
1213
1214 sc->sc_soft_ih = softint_establish(flags, virtio_soft_intr, sc);
1215 if (sc->sc_soft_ih == NULL) {
1216 sc->sc_ops->free_interrupts(sc);
1217 aprint_error_dev(sc->sc_dev,
1218 "failed to establish soft interrupt\n");
1219 goto fail;
1220 }
1221 }
1222
1223 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK);
1224 return 0;
1225
1226 fail:
1227 if (sc->sc_soft_ih) {
1228 softint_disestablish(sc->sc_soft_ih);
1229 sc->sc_soft_ih = NULL;
1230 }
1231
1232 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
1233 return 1;
1234 }
1235
1236 void
1237 virtio_child_detach(struct virtio_softc *sc)
1238 {
1239 sc->sc_child = NULL;
1240 sc->sc_vqs = NULL;
1241
1242 virtio_device_reset(sc);
1243
1244 sc->sc_ops->free_interrupts(sc);
1245
1246 if (sc->sc_soft_ih) {
1247 softint_disestablish(sc->sc_soft_ih);
1248 sc->sc_soft_ih = NULL;
1249 }
1250 }
1251
1252 void
1253 virtio_child_attach_failed(struct virtio_softc *sc)
1254 {
1255 virtio_child_detach(sc);
1256
1257 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
1258
1259 sc->sc_child = VIRTIO_CHILD_FAILED;
1260 }
1261
1262 bus_dma_tag_t
1263 virtio_dmat(struct virtio_softc *sc)
1264 {
1265 return sc->sc_dmat;
1266 }
1267
1268 device_t
1269 virtio_child(struct virtio_softc *sc)
1270 {
1271 return sc->sc_child;
1272 }
1273
1274 int
1275 virtio_intrhand(struct virtio_softc *sc)
1276 {
1277 return (sc->sc_intrhand)(sc);
1278 }
1279
1280 uint64_t
1281 virtio_features(struct virtio_softc *sc)
1282 {
1283 return sc->sc_active_features;
1284 }
1285
1286 int
1287 virtio_attach_failed(struct virtio_softc *sc)
1288 {
1289 device_t self = sc->sc_dev;
1290
1291 /* no error if its not connected, but its failed */
1292 if (sc->sc_childdevid == 0)
1293 return 1;
1294
1295 if (sc->sc_child == NULL) {
1296 aprint_error_dev(self,
1297 "no matching child driver; not configured\n");
1298 return 1;
1299 }
1300
1301 if (sc->sc_child == VIRTIO_CHILD_FAILED) {
1302 aprint_error_dev(self, "virtio configuration failed\n");
1303 return 1;
1304 }
1305
1306 /* sanity check */
1307 if (!sc->sc_finished_called) {
1308 aprint_error_dev(self, "virtio internal error, child driver "
1309 "signaled OK but didn't initialize interrupts\n");
1310 return 1;
1311 }
1312
1313 return 0;
1314 }
1315
1316 void
1317 virtio_print_device_type(device_t self, int id, int revision)
1318 {
1319 aprint_normal_dev(self, "%s device (rev. 0x%02x)\n",
1320 (id < NDEVNAMES ? virtio_device_name[id] : "Unknown"),
1321 revision);
1322 }
1323
1324
1325 MODULE(MODULE_CLASS_DRIVER, virtio, NULL);
1326
1327 #ifdef _MODULE
1328 #include "ioconf.c"
1329 #endif
1330
1331 static int
1332 virtio_modcmd(modcmd_t cmd, void *opaque)
1333 {
1334 int error = 0;
1335
1336 #ifdef _MODULE
1337 switch (cmd) {
1338 case MODULE_CMD_INIT:
1339 error = config_init_component(cfdriver_ioconf_virtio,
1340 cfattach_ioconf_virtio, cfdata_ioconf_virtio);
1341 break;
1342 case MODULE_CMD_FINI:
1343 error = config_fini_component(cfdriver_ioconf_virtio,
1344 cfattach_ioconf_virtio, cfdata_ioconf_virtio);
1345 break;
1346 default:
1347 error = ENOTTY;
1348 break;
1349 }
1350 #endif
1351
1352 return error;
1353 }
1354