virtio.c revision 1.51 1 /* $NetBSD: virtio.c,v 1.51 2021/10/21 05:37:43 yamaguchi Exp $ */
2
3 /*
4 * Copyright (c) 2020 The NetBSD Foundation, Inc.
5 * Copyright (c) 2012 Stefan Fritsch, Alexander Fiveg.
6 * Copyright (c) 2010 Minoura Makoto.
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: virtio.c,v 1.51 2021/10/21 05:37:43 yamaguchi Exp $");
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/kernel.h>
36 #include <sys/atomic.h>
37 #include <sys/bus.h>
38 #include <sys/device.h>
39 #include <sys/kmem.h>
40 #include <sys/module.h>
41
42 #define VIRTIO_PRIVATE
43
44 #include <dev/pci/virtioreg.h> /* XXX: move to non-pci */
45 #include <dev/pci/virtiovar.h> /* XXX: move to non-pci */
46
47 #define MINSEG_INDIRECT 2 /* use indirect if nsegs >= this value */
48
49 /* incomplete list */
50 static const char *virtio_device_name[] = {
51 "unknown (0)", /* 0 */
52 "network", /* 1 */
53 "block", /* 2 */
54 "console", /* 3 */
55 "entropy", /* 4 */
56 "memory balloon", /* 5 */
57 "I/O memory", /* 6 */
58 "remote processor messaging", /* 7 */
59 "SCSI", /* 8 */
60 "9P transport", /* 9 */
61 };
62 #define NDEVNAMES __arraycount(virtio_device_name)
63
64 static void virtio_init_vq(struct virtio_softc *,
65 struct virtqueue *, const bool);
66
67 void
68 virtio_set_status(struct virtio_softc *sc, int status)
69 {
70 sc->sc_ops->set_status(sc, status);
71 }
72
73 /*
74 * Reset the device.
75 */
76 /*
77 * To reset the device to a known state, do following:
78 * virtio_reset(sc); // this will stop the device activity
79 * <dequeue finished requests>; // virtio_dequeue() still can be called
80 * <revoke pending requests in the vqs if any>;
81 * virtio_reinit_start(sc); // dequeue prohibitted
82 * newfeatures = virtio_negotiate_features(sc, requestedfeatures);
83 * <some other initialization>;
84 * virtio_reinit_end(sc); // device activated; enqueue allowed
85 * Once attached, feature negotiation can only be allowed after virtio_reset.
86 */
87 void
88 virtio_reset(struct virtio_softc *sc)
89 {
90 virtio_device_reset(sc);
91 }
92
93 void
94 virtio_reinit_start(struct virtio_softc *sc)
95 {
96 int i, r;
97
98 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_ACK);
99 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER);
100 for (i = 0; i < sc->sc_nvqs; i++) {
101 int n;
102 struct virtqueue *vq = &sc->sc_vqs[i];
103 n = sc->sc_ops->read_queue_size(sc, vq->vq_index);
104 if (n == 0) /* vq disappeared */
105 continue;
106 if (n != vq->vq_num) {
107 panic("%s: virtqueue size changed, vq index %d\n",
108 device_xname(sc->sc_dev),
109 vq->vq_index);
110 }
111 virtio_init_vq(sc, vq, true);
112 sc->sc_ops->setup_queue(sc, vq->vq_index,
113 vq->vq_dmamap->dm_segs[0].ds_addr);
114 }
115
116 r = sc->sc_ops->setup_interrupts(sc, 1);
117 if (r != 0) {
118 printf("%s: failed to setup interrupts\n",
119 device_xname(sc->sc_dev));
120 }
121 }
122
123 void
124 virtio_reinit_end(struct virtio_softc *sc)
125 {
126 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK);
127 }
128
129 /*
130 * Feature negotiation.
131 */
132 void
133 virtio_negotiate_features(struct virtio_softc *sc, uint64_t guest_features)
134 {
135 if (!(device_cfdata(sc->sc_dev)->cf_flags & 1) &&
136 !(device_cfdata(sc->sc_child)->cf_flags & 1)) /* XXX */
137 guest_features |= VIRTIO_F_RING_INDIRECT_DESC;
138 sc->sc_ops->neg_features(sc, guest_features);
139 if (sc->sc_active_features & VIRTIO_F_RING_INDIRECT_DESC)
140 sc->sc_indirect = true;
141 else
142 sc->sc_indirect = false;
143 }
144
145
146 /*
147 * Device configuration registers readers/writers
148 */
149 #if 0
150 #define DPRINTFR(n, fmt, val, index, num) \
151 printf("\n%s (", n); \
152 for (int i = 0; i < num; i++) \
153 printf("%02x ", bus_space_read_1(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index+i)); \
154 printf(") -> "); printf(fmt, val); printf("\n");
155 #define DPRINTFR2(n, fmt, val_s, val_n) \
156 printf("%s ", n); \
157 printf("\n stream "); printf(fmt, val_s); printf(" norm "); printf(fmt, val_n); printf("\n");
158 #else
159 #define DPRINTFR(n, fmt, val, index, num)
160 #define DPRINTFR2(n, fmt, val_s, val_n)
161 #endif
162
163
164 uint8_t
165 virtio_read_device_config_1(struct virtio_softc *sc, int index) {
166 bus_space_tag_t iot = sc->sc_devcfg_iot;
167 bus_space_handle_t ioh = sc->sc_devcfg_ioh;
168 uint8_t val;
169
170 val = bus_space_read_1(iot, ioh, index);
171
172 DPRINTFR("read_1", "%02x", val, index, 1);
173 return val;
174 }
175
176 uint16_t
177 virtio_read_device_config_2(struct virtio_softc *sc, int index) {
178 bus_space_tag_t iot = sc->sc_devcfg_iot;
179 bus_space_handle_t ioh = sc->sc_devcfg_ioh;
180 uint16_t val;
181
182 val = bus_space_read_2(iot, ioh, index);
183 if (BYTE_ORDER != sc->sc_bus_endian)
184 val = bswap16(val);
185
186 DPRINTFR("read_2", "%04x", val, index, 2);
187 DPRINTFR2("read_2", "%04x",
188 bus_space_read_stream_2(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index),
189 bus_space_read_2(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index));
190 return val;
191 }
192
193 uint32_t
194 virtio_read_device_config_4(struct virtio_softc *sc, int index) {
195 bus_space_tag_t iot = sc->sc_devcfg_iot;
196 bus_space_handle_t ioh = sc->sc_devcfg_ioh;
197 uint32_t val;
198
199 val = bus_space_read_4(iot, ioh, index);
200 if (BYTE_ORDER != sc->sc_bus_endian)
201 val = bswap32(val);
202
203 DPRINTFR("read_4", "%08x", val, index, 4);
204 DPRINTFR2("read_4", "%08x",
205 bus_space_read_stream_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index),
206 bus_space_read_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index));
207 return val;
208 }
209
210 /*
211 * The Virtio spec explicitly tells that reading and writing 8 bytes are not
212 * considered atomic and no triggers may be connected to reading or writing
213 * it. We access it using two 32 reads. See virtio spec 4.1.3.1.
214 */
215 uint64_t
216 virtio_read_device_config_8(struct virtio_softc *sc, int index) {
217 bus_space_tag_t iot = sc->sc_devcfg_iot;
218 bus_space_handle_t ioh = sc->sc_devcfg_ioh;
219 union {
220 uint64_t u64;
221 uint32_t l[2];
222 } v;
223 uint64_t val;
224
225 v.l[0] = bus_space_read_4(iot, ioh, index);
226 v.l[1] = bus_space_read_4(iot, ioh, index + 4);
227 if (sc->sc_bus_endian != sc->sc_struct_endian) {
228 v.l[0] = bswap32(v.l[0]);
229 v.l[1] = bswap32(v.l[1]);
230 }
231 val = v.u64;
232
233 if (BYTE_ORDER != sc->sc_struct_endian)
234 val = bswap64(val);
235
236 DPRINTFR("read_8", "%08lx", val, index, 8);
237 DPRINTFR2("read_8 low ", "%08x",
238 bus_space_read_stream_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index),
239 bus_space_read_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index));
240 DPRINTFR2("read_8 high ", "%08x",
241 bus_space_read_stream_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index + 4),
242 bus_space_read_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index + 4));
243 return val;
244 }
245
246 /*
247 * In the older virtio spec, device config registers are host endian. On newer
248 * they are little endian. Some newer devices however explicitly specify their
249 * register to always be little endian. These fuctions cater for these.
250 */
251 uint16_t
252 virtio_read_device_config_le_2(struct virtio_softc *sc, int index) {
253 bus_space_tag_t iot = sc->sc_devcfg_iot;
254 bus_space_handle_t ioh = sc->sc_devcfg_ioh;
255 uint16_t val;
256
257 val = bus_space_read_2(iot, ioh, index);
258 if (sc->sc_bus_endian != LITTLE_ENDIAN)
259 val = bswap16(val);
260
261 DPRINTFR("read_le_2", "%04x", val, index, 2);
262 DPRINTFR2("read_le_2", "%04x",
263 bus_space_read_stream_2(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, 0),
264 bus_space_read_2(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, 0));
265 return val;
266 }
267
268 uint32_t
269 virtio_read_device_config_le_4(struct virtio_softc *sc, int index) {
270 bus_space_tag_t iot = sc->sc_devcfg_iot;
271 bus_space_handle_t ioh = sc->sc_devcfg_ioh;
272 uint32_t val;
273
274 val = bus_space_read_4(iot, ioh, index);
275 if (sc->sc_bus_endian != LITTLE_ENDIAN)
276 val = bswap32(val);
277
278 DPRINTFR("read_le_4", "%08x", val, index, 4);
279 DPRINTFR2("read_le_4", "%08x",
280 bus_space_read_stream_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, 0),
281 bus_space_read_4(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, 0));
282 return val;
283 }
284
285 void
286 virtio_write_device_config_1(struct virtio_softc *sc, int index, uint8_t value)
287 {
288 bus_space_tag_t iot = sc->sc_devcfg_iot;
289 bus_space_handle_t ioh = sc->sc_devcfg_ioh;
290
291 bus_space_write_1(iot, ioh, index, value);
292 }
293
294 void
295 virtio_write_device_config_2(struct virtio_softc *sc, int index, uint16_t value)
296 {
297 bus_space_tag_t iot = sc->sc_devcfg_iot;
298 bus_space_handle_t ioh = sc->sc_devcfg_ioh;
299
300 if (BYTE_ORDER != sc->sc_bus_endian)
301 value = bswap16(value);
302 bus_space_write_2(iot, ioh, index, value);
303 }
304
305 void
306 virtio_write_device_config_4(struct virtio_softc *sc, int index, uint32_t value)
307 {
308 bus_space_tag_t iot = sc->sc_devcfg_iot;
309 bus_space_handle_t ioh = sc->sc_devcfg_ioh;
310
311 if (BYTE_ORDER != sc->sc_bus_endian)
312 value = bswap32(value);
313 bus_space_write_4(iot, ioh, index, value);
314 }
315
316 /*
317 * The Virtio spec explicitly tells that reading and writing 8 bytes are not
318 * considered atomic and no triggers may be connected to reading or writing
319 * it. We access it using two 32 bit writes. For good measure it is stated to
320 * always write lsb first just in case of a hypervisor bug. See See virtio
321 * spec 4.1.3.1.
322 */
323 void
324 virtio_write_device_config_8(struct virtio_softc *sc, int index, uint64_t value)
325 {
326 bus_space_tag_t iot = sc->sc_devcfg_iot;
327 bus_space_handle_t ioh = sc->sc_devcfg_ioh;
328 union {
329 uint64_t u64;
330 uint32_t l[2];
331 } v;
332
333 if (BYTE_ORDER != sc->sc_struct_endian)
334 value = bswap64(value);
335
336 v.u64 = value;
337 if (sc->sc_bus_endian != sc->sc_struct_endian) {
338 v.l[0] = bswap32(v.l[0]);
339 v.l[1] = bswap32(v.l[1]);
340 }
341
342 if (sc->sc_struct_endian == LITTLE_ENDIAN) {
343 bus_space_write_4(iot, ioh, index, v.l[0]);
344 bus_space_write_4(iot, ioh, index + 4, v.l[1]);
345 } else {
346 bus_space_write_4(iot, ioh, index + 4, v.l[1]);
347 bus_space_write_4(iot, ioh, index, v.l[0]);
348 }
349 }
350
351 /*
352 * In the older virtio spec, device config registers are host endian. On newer
353 * they are little endian. Some newer devices however explicitly specify their
354 * register to always be little endian. These fuctions cater for these.
355 */
356 void
357 virtio_write_device_config_le_2(struct virtio_softc *sc, int index, uint16_t value)
358 {
359 bus_space_tag_t iot = sc->sc_devcfg_iot;
360 bus_space_handle_t ioh = sc->sc_devcfg_ioh;
361
362 if (sc->sc_bus_endian != LITTLE_ENDIAN)
363 value = bswap16(value);
364 bus_space_write_2(iot, ioh, index, value);
365 }
366
367 void
368 virtio_write_device_config_le_4(struct virtio_softc *sc, int index, uint32_t value)
369 {
370 bus_space_tag_t iot = sc->sc_devcfg_iot;
371 bus_space_handle_t ioh = sc->sc_devcfg_ioh;
372
373 if (sc->sc_bus_endian != LITTLE_ENDIAN)
374 value = bswap32(value);
375 bus_space_write_4(iot, ioh, index, value);
376 }
377
378
379 /*
380 * data structures endian helpers
381 */
382 uint16_t virtio_rw16(struct virtio_softc *sc, uint16_t val)
383 {
384 KASSERT(sc);
385 return BYTE_ORDER != sc->sc_struct_endian ? bswap16(val) : val;
386 }
387
388 uint32_t virtio_rw32(struct virtio_softc *sc, uint32_t val)
389 {
390 KASSERT(sc);
391 return BYTE_ORDER != sc->sc_struct_endian ? bswap32(val) : val;
392 }
393
394 uint64_t virtio_rw64(struct virtio_softc *sc, uint64_t val)
395 {
396 KASSERT(sc);
397 return BYTE_ORDER != sc->sc_struct_endian ? bswap64(val) : val;
398 }
399
400
401 /*
402 * Interrupt handler.
403 */
404 static void
405 virtio_soft_intr(void *arg)
406 {
407 struct virtio_softc *sc = arg;
408
409 KASSERT(sc->sc_intrhand != NULL);
410
411 (sc->sc_intrhand)(sc);
412 }
413
414 /*
415 * dmamap sync operations for a virtqueue.
416 */
417 static inline void
418 vq_sync_descs(struct virtio_softc *sc, struct virtqueue *vq, int ops)
419 {
420 /* availoffset == sizeof(vring_desc)*vq_num */
421 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap, 0, vq->vq_availoffset,
422 ops);
423 }
424
425 static inline void
426 vq_sync_aring(struct virtio_softc *sc, struct virtqueue *vq, int ops)
427 {
428 uint16_t hdrlen = offsetof(struct vring_avail, ring);
429 if (sc->sc_active_features & VIRTIO_F_RING_EVENT_IDX)
430 hdrlen += sizeof(uint16_t);
431
432 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap,
433 vq->vq_availoffset,
434 hdrlen + sc->sc_nvqs * sizeof(uint16_t),
435 ops);
436 }
437
438 static inline void
439 vq_sync_uring(struct virtio_softc *sc, struct virtqueue *vq, int ops)
440 {
441 uint16_t hdrlen = offsetof(struct vring_used, ring);
442 if (sc->sc_active_features & VIRTIO_F_RING_EVENT_IDX)
443 hdrlen += sizeof(uint16_t);
444
445 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap,
446 vq->vq_usedoffset,
447 hdrlen + sc->sc_nvqs * sizeof(struct vring_used_elem),
448 ops);
449 }
450
451 static inline void
452 vq_sync_indirect(struct virtio_softc *sc, struct virtqueue *vq, int slot,
453 int ops)
454 {
455 int offset = vq->vq_indirectoffset
456 + sizeof(struct vring_desc) * vq->vq_maxnsegs * slot;
457
458 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap,
459 offset, sizeof(struct vring_desc) * vq->vq_maxnsegs,
460 ops);
461 }
462
463 /*
464 * Can be used as sc_intrhand.
465 */
466 /*
467 * Scan vq, bus_dmamap_sync for the vqs (not for the payload),
468 * and calls (*vq_done)() if some entries are consumed.
469 */
470 bool
471 virtio_vq_is_enqueued(struct virtio_softc *sc, struct virtqueue *vq)
472 {
473
474 if (vq->vq_queued) {
475 vq->vq_queued = 0;
476 vq_sync_aring(sc, vq, BUS_DMASYNC_POSTWRITE);
477 }
478 vq_sync_uring(sc, vq, BUS_DMASYNC_POSTREAD);
479 membar_consumer();
480
481 return (vq->vq_used_idx != virtio_rw16(sc, vq->vq_used->idx)) ? 1 : 0;
482 }
483
484 int
485 virtio_vq_intr(struct virtio_softc *sc)
486 {
487 struct virtqueue *vq;
488 int i, r = 0;
489
490 for (i = 0; i < sc->sc_nvqs; i++) {
491 vq = &sc->sc_vqs[i];
492 if (virtio_vq_is_enqueued(sc, vq) == 1) {
493 if (vq->vq_done)
494 r |= (vq->vq_done)(vq);
495 }
496 }
497
498 return r;
499 }
500
501 int
502 virtio_vq_intrhand(struct virtio_softc *sc)
503 {
504 struct virtqueue *vq;
505 int i, r = 0;
506
507 for (i = 0; i < sc->sc_nvqs; i++) {
508 vq = &sc->sc_vqs[i];
509 r |= (vq->vq_intrhand)(vq->vq_intrhand_arg);
510 }
511
512 return r;
513 }
514
515
516 /*
517 * Increase the event index in order to delay interrupts.
518 */
519 int
520 virtio_postpone_intr(struct virtio_softc *sc, struct virtqueue *vq,
521 uint16_t nslots)
522 {
523 uint16_t idx, nused;
524
525 idx = vq->vq_used_idx + nslots;
526
527 /* set the new event index: avail_ring->used_event = idx */
528 *vq->vq_used_event = virtio_rw16(sc, idx);
529 membar_producer();
530
531 vq_sync_aring(vq->vq_owner, vq, BUS_DMASYNC_PREWRITE);
532 vq->vq_queued++;
533
534 nused = (uint16_t)
535 (virtio_rw16(sc, vq->vq_used->idx) - vq->vq_used_idx);
536 KASSERT(nused <= vq->vq_num);
537
538 return nslots < nused;
539 }
540
541 /*
542 * Postpone interrupt until 3/4 of the available descriptors have been
543 * consumed.
544 */
545 int
546 virtio_postpone_intr_smart(struct virtio_softc *sc, struct virtqueue *vq)
547 {
548 uint16_t nslots;
549
550 nslots = (uint16_t)
551 (virtio_rw16(sc, vq->vq_avail->idx) - vq->vq_used_idx) * 3 / 4;
552
553 return virtio_postpone_intr(sc, vq, nslots);
554 }
555
556 /*
557 * Postpone interrupt until all of the available descriptors have been
558 * consumed.
559 */
560 int
561 virtio_postpone_intr_far(struct virtio_softc *sc, struct virtqueue *vq)
562 {
563 uint16_t nslots;
564
565 nslots = (uint16_t)
566 (virtio_rw16(sc, vq->vq_avail->idx) - vq->vq_used_idx);
567
568 return virtio_postpone_intr(sc, vq, nslots);
569 }
570
571 /*
572 * Start/stop vq interrupt. No guarantee.
573 */
574 void
575 virtio_stop_vq_intr(struct virtio_softc *sc, struct virtqueue *vq)
576 {
577 if (sc->sc_active_features & VIRTIO_F_RING_EVENT_IDX) {
578 /*
579 * No way to disable the interrupt completely with
580 * RingEventIdx. Instead advance used_event by half the
581 * possible value. This won't happen soon and is far enough in
582 * the past to not trigger a spurios interrupt.
583 */
584 *vq->vq_used_event = virtio_rw16(sc, vq->vq_used_idx + 0x8000);
585 } else {
586 vq->vq_avail->flags |= virtio_rw16(sc, VRING_AVAIL_F_NO_INTERRUPT);
587 }
588 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
589 vq->vq_queued++;
590 }
591
592 int
593 virtio_start_vq_intr(struct virtio_softc *sc, struct virtqueue *vq)
594 {
595 if (sc->sc_active_features & VIRTIO_F_RING_EVENT_IDX) {
596 /*
597 * If event index feature is negotiated, enabling interrupts
598 * is done through setting the latest consumed index in the
599 * used_event field
600 */
601 *vq->vq_used_event = virtio_rw16(sc, vq->vq_used_idx);
602 } else {
603 vq->vq_avail->flags &= ~virtio_rw16(sc, VRING_AVAIL_F_NO_INTERRUPT);
604 }
605 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
606 vq->vq_queued++;
607
608 return vq->vq_used_idx != virtio_rw16(sc, vq->vq_used->idx);
609 }
610
611 /*
612 * Initialize vq structure.
613 */
614 static void
615 virtio_init_vq(struct virtio_softc *sc, struct virtqueue *vq,
616 const bool reinit)
617 {
618 int i, j;
619 int vq_size = vq->vq_num;
620
621 memset(vq->vq_vaddr, 0, vq->vq_bytesize);
622
623 /* build the indirect descriptor chain */
624 if (vq->vq_indirect != NULL) {
625 struct vring_desc *vd;
626
627 for (i = 0; i < vq_size; i++) {
628 vd = vq->vq_indirect;
629 vd += vq->vq_maxnsegs * i;
630 for (j = 0; j < vq->vq_maxnsegs-1; j++) {
631 vd[j].next = virtio_rw16(sc, j + 1);
632 }
633 }
634 }
635
636 /* free slot management */
637 SIMPLEQ_INIT(&vq->vq_freelist);
638 for (i = 0; i < vq_size; i++) {
639 SIMPLEQ_INSERT_TAIL(&vq->vq_freelist,
640 &vq->vq_entries[i], qe_list);
641 vq->vq_entries[i].qe_index = i;
642 }
643 if (!reinit)
644 mutex_init(&vq->vq_freelist_lock, MUTEX_SPIN, sc->sc_ipl);
645
646 /* enqueue/dequeue status */
647 vq->vq_avail_idx = 0;
648 vq->vq_used_idx = 0;
649 vq->vq_queued = 0;
650 if (!reinit) {
651 mutex_init(&vq->vq_aring_lock, MUTEX_SPIN, sc->sc_ipl);
652 mutex_init(&vq->vq_uring_lock, MUTEX_SPIN, sc->sc_ipl);
653 }
654 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
655 vq_sync_uring(sc, vq, BUS_DMASYNC_PREREAD);
656 vq->vq_queued++;
657 }
658
659 /*
660 * Allocate/free a vq.
661 */
662 int
663 virtio_alloc_vq(struct virtio_softc *sc, struct virtqueue *vq, int index,
664 int maxsegsize, int maxnsegs, const char *name)
665 {
666 int vq_size, allocsize1, allocsize2, allocsize3, allocsize = 0;
667 int rsegs, r, hdrlen;
668 #define VIRTQUEUE_ALIGN(n) (((n)+(VIRTIO_PAGE_SIZE-1))& \
669 ~(VIRTIO_PAGE_SIZE-1))
670
671 /* Make sure callers allocate vqs in order */
672 KASSERT(sc->sc_nvqs == index);
673
674 memset(vq, 0, sizeof(*vq));
675
676 vq_size = sc->sc_ops->read_queue_size(sc, index);
677 if (vq_size == 0) {
678 aprint_error_dev(sc->sc_dev,
679 "virtqueue not exist, index %d for %s\n",
680 index, name);
681 goto err;
682 }
683
684 hdrlen = sc->sc_active_features & VIRTIO_F_RING_EVENT_IDX ? 3 : 2;
685
686 /* allocsize1: descriptor table + avail ring + pad */
687 allocsize1 = VIRTQUEUE_ALIGN(sizeof(struct vring_desc)*vq_size
688 + sizeof(uint16_t)*(hdrlen + vq_size));
689 /* allocsize2: used ring + pad */
690 allocsize2 = VIRTQUEUE_ALIGN(sizeof(uint16_t) * hdrlen
691 + sizeof(struct vring_used_elem)*vq_size);
692 /* allocsize3: indirect table */
693 if (sc->sc_indirect && maxnsegs >= MINSEG_INDIRECT)
694 allocsize3 = sizeof(struct vring_desc) * maxnsegs * vq_size;
695 else
696 allocsize3 = 0;
697 allocsize = allocsize1 + allocsize2 + allocsize3;
698
699 /* alloc and map the memory */
700 r = bus_dmamem_alloc(sc->sc_dmat, allocsize, VIRTIO_PAGE_SIZE, 0,
701 &vq->vq_segs[0], 1, &rsegs, BUS_DMA_WAITOK);
702 if (r != 0) {
703 aprint_error_dev(sc->sc_dev,
704 "virtqueue %d for %s allocation failed, "
705 "error code %d\n", index, name, r);
706 goto err;
707 }
708 r = bus_dmamem_map(sc->sc_dmat, &vq->vq_segs[0], rsegs, allocsize,
709 &vq->vq_vaddr, BUS_DMA_WAITOK);
710 if (r != 0) {
711 aprint_error_dev(sc->sc_dev,
712 "virtqueue %d for %s map failed, "
713 "error code %d\n", index, name, r);
714 goto err;
715 }
716 r = bus_dmamap_create(sc->sc_dmat, allocsize, 1, allocsize, 0,
717 BUS_DMA_WAITOK, &vq->vq_dmamap);
718 if (r != 0) {
719 aprint_error_dev(sc->sc_dev,
720 "virtqueue %d for %s dmamap creation failed, "
721 "error code %d\n", index, name, r);
722 goto err;
723 }
724 r = bus_dmamap_load(sc->sc_dmat, vq->vq_dmamap,
725 vq->vq_vaddr, allocsize, NULL, BUS_DMA_WAITOK);
726 if (r != 0) {
727 aprint_error_dev(sc->sc_dev,
728 "virtqueue %d for %s dmamap load failed, "
729 "error code %d\n", index, name, r);
730 goto err;
731 }
732
733 /* remember addresses and offsets for later use */
734 vq->vq_owner = sc;
735 vq->vq_num = vq_size;
736 vq->vq_index = index;
737 vq->vq_desc = vq->vq_vaddr;
738 vq->vq_availoffset = sizeof(struct vring_desc)*vq_size;
739 vq->vq_avail = (void*)(((char*)vq->vq_desc) + vq->vq_availoffset);
740 vq->vq_used_event = (uint16_t *) ((char *)vq->vq_avail +
741 offsetof(struct vring_avail, ring[vq->vq_num]));
742 vq->vq_usedoffset = allocsize1;
743 vq->vq_used = (void*)(((char*)vq->vq_desc) + vq->vq_usedoffset);
744 vq->vq_avail_event = (uint16_t *)((char *)vq->vq_used +
745 offsetof(struct vring_used, ring[vq->vq_num]));
746
747 if (allocsize3 > 0) {
748 vq->vq_indirectoffset = allocsize1 + allocsize2;
749 vq->vq_indirect = (void*)(((char*)vq->vq_desc)
750 + vq->vq_indirectoffset);
751 }
752 vq->vq_bytesize = allocsize;
753 vq->vq_maxsegsize = maxsegsize;
754 vq->vq_maxnsegs = maxnsegs;
755
756 /* free slot management */
757 vq->vq_entries = kmem_zalloc(sizeof(struct vq_entry)*vq_size,
758 KM_SLEEP);
759 virtio_init_vq(sc, vq, false);
760
761 /* set the vq address */
762 sc->sc_ops->setup_queue(sc, index,
763 vq->vq_dmamap->dm_segs[0].ds_addr);
764
765 aprint_verbose_dev(sc->sc_dev,
766 "allocated %u byte for virtqueue %d for %s, "
767 "size %d\n", allocsize, index, name, vq_size);
768 if (allocsize3 > 0)
769 aprint_verbose_dev(sc->sc_dev,
770 "using %d byte (%d entries) "
771 "indirect descriptors\n",
772 allocsize3, maxnsegs * vq_size);
773
774 sc->sc_nvqs++;
775
776 return 0;
777
778 err:
779 sc->sc_ops->setup_queue(sc, index, 0);
780 if (vq->vq_dmamap)
781 bus_dmamap_destroy(sc->sc_dmat, vq->vq_dmamap);
782 if (vq->vq_vaddr)
783 bus_dmamem_unmap(sc->sc_dmat, vq->vq_vaddr, allocsize);
784 if (vq->vq_segs[0].ds_addr)
785 bus_dmamem_free(sc->sc_dmat, &vq->vq_segs[0], 1);
786 memset(vq, 0, sizeof(*vq));
787
788 return -1;
789 }
790
791 int
792 virtio_free_vq(struct virtio_softc *sc, struct virtqueue *vq)
793 {
794 struct vq_entry *qe;
795 int i = 0;
796
797 /* device must be already deactivated */
798 /* confirm the vq is empty */
799 SIMPLEQ_FOREACH(qe, &vq->vq_freelist, qe_list) {
800 i++;
801 }
802 if (i != vq->vq_num) {
803 printf("%s: freeing non-empty vq, index %d\n",
804 device_xname(sc->sc_dev), vq->vq_index);
805 return EBUSY;
806 }
807
808 /* tell device that there's no virtqueue any longer */
809 sc->sc_ops->setup_queue(sc, vq->vq_index, 0);
810
811 kmem_free(vq->vq_entries, sizeof(*vq->vq_entries) * vq->vq_num);
812 bus_dmamap_unload(sc->sc_dmat, vq->vq_dmamap);
813 bus_dmamap_destroy(sc->sc_dmat, vq->vq_dmamap);
814 bus_dmamem_unmap(sc->sc_dmat, vq->vq_vaddr, vq->vq_bytesize);
815 bus_dmamem_free(sc->sc_dmat, &vq->vq_segs[0], 1);
816 mutex_destroy(&vq->vq_freelist_lock);
817 mutex_destroy(&vq->vq_uring_lock);
818 mutex_destroy(&vq->vq_aring_lock);
819 memset(vq, 0, sizeof(*vq));
820
821 sc->sc_nvqs--;
822
823 return 0;
824 }
825
826 /*
827 * Free descriptor management.
828 */
829 static struct vq_entry *
830 vq_alloc_entry(struct virtqueue *vq)
831 {
832 struct vq_entry *qe;
833
834 mutex_enter(&vq->vq_freelist_lock);
835 if (SIMPLEQ_EMPTY(&vq->vq_freelist)) {
836 mutex_exit(&vq->vq_freelist_lock);
837 return NULL;
838 }
839 qe = SIMPLEQ_FIRST(&vq->vq_freelist);
840 SIMPLEQ_REMOVE_HEAD(&vq->vq_freelist, qe_list);
841 mutex_exit(&vq->vq_freelist_lock);
842
843 return qe;
844 }
845
846 static void
847 vq_free_entry(struct virtqueue *vq, struct vq_entry *qe)
848 {
849 mutex_enter(&vq->vq_freelist_lock);
850 SIMPLEQ_INSERT_TAIL(&vq->vq_freelist, qe, qe_list);
851 mutex_exit(&vq->vq_freelist_lock);
852
853 return;
854 }
855
856 /*
857 * Enqueue several dmamaps as a single request.
858 */
859 /*
860 * Typical usage:
861 * <queue size> number of followings are stored in arrays
862 * - command blocks (in dmamem) should be pre-allocated and mapped
863 * - dmamaps for command blocks should be pre-allocated and loaded
864 * - dmamaps for payload should be pre-allocated
865 * r = virtio_enqueue_prep(sc, vq, &slot); // allocate a slot
866 * if (r) // currently 0 or EAGAIN
867 * return r;
868 * r = bus_dmamap_load(dmat, dmamap_payload[slot], data, count, ..);
869 * if (r) {
870 * virtio_enqueue_abort(sc, vq, slot);
871 * return r;
872 * }
873 * r = virtio_enqueue_reserve(sc, vq, slot,
874 * dmamap_payload[slot]->dm_nsegs+1);
875 * // ^ +1 for command
876 * if (r) { // currently 0 or EAGAIN
877 * bus_dmamap_unload(dmat, dmamap_payload[slot]);
878 * return r; // do not call abort()
879 * }
880 * <setup and prepare commands>
881 * bus_dmamap_sync(dmat, dmamap_cmd[slot],... BUS_DMASYNC_PREWRITE);
882 * bus_dmamap_sync(dmat, dmamap_payload[slot],...);
883 * virtio_enqueue(sc, vq, slot, dmamap_cmd[slot], false);
884 * virtio_enqueue(sc, vq, slot, dmamap_payload[slot], iswrite);
885 * virtio_enqueue_commit(sc, vq, slot, true);
886 */
887
888 /*
889 * enqueue_prep: allocate a slot number
890 */
891 int
892 virtio_enqueue_prep(struct virtio_softc *sc, struct virtqueue *vq, int *slotp)
893 {
894 struct vq_entry *qe1;
895
896 KASSERT(slotp != NULL);
897
898 qe1 = vq_alloc_entry(vq);
899 if (qe1 == NULL)
900 return EAGAIN;
901 /* next slot is not allocated yet */
902 qe1->qe_next = -1;
903 *slotp = qe1->qe_index;
904
905 return 0;
906 }
907
908 /*
909 * enqueue_reserve: allocate remaining slots and build the descriptor chain.
910 */
911 int
912 virtio_enqueue_reserve(struct virtio_softc *sc, struct virtqueue *vq,
913 int slot, int nsegs)
914 {
915 int indirect;
916 struct vq_entry *qe1 = &vq->vq_entries[slot];
917
918 KASSERT(qe1->qe_next == -1);
919 KASSERT(1 <= nsegs && nsegs <= vq->vq_num);
920
921 if ((vq->vq_indirect != NULL) &&
922 (nsegs >= MINSEG_INDIRECT) &&
923 (nsegs <= vq->vq_maxnsegs))
924 indirect = 1;
925 else
926 indirect = 0;
927 qe1->qe_indirect = indirect;
928
929 if (indirect) {
930 struct vring_desc *vd;
931 uint64_t addr;
932 int i;
933
934 vd = &vq->vq_desc[qe1->qe_index];
935 addr = vq->vq_dmamap->dm_segs[0].ds_addr
936 + vq->vq_indirectoffset;
937 addr += sizeof(struct vring_desc)
938 * vq->vq_maxnsegs * qe1->qe_index;
939 vd->addr = virtio_rw64(sc, addr);
940 vd->len = virtio_rw32(sc, sizeof(struct vring_desc) * nsegs);
941 vd->flags = virtio_rw16(sc, VRING_DESC_F_INDIRECT);
942
943 vd = vq->vq_indirect;
944 vd += vq->vq_maxnsegs * qe1->qe_index;
945 qe1->qe_desc_base = vd;
946
947 for (i = 0; i < nsegs-1; i++) {
948 vd[i].flags = virtio_rw16(sc, VRING_DESC_F_NEXT);
949 }
950 vd[i].flags = virtio_rw16(sc, 0);
951 qe1->qe_next = 0;
952
953 return 0;
954 } else {
955 struct vring_desc *vd;
956 struct vq_entry *qe;
957 int i, s;
958
959 vd = &vq->vq_desc[0];
960 qe1->qe_desc_base = vd;
961 qe1->qe_next = qe1->qe_index;
962 s = slot;
963 for (i = 0; i < nsegs - 1; i++) {
964 qe = vq_alloc_entry(vq);
965 if (qe == NULL) {
966 vd[s].flags = virtio_rw16(sc, 0);
967 virtio_enqueue_abort(sc, vq, slot);
968 return EAGAIN;
969 }
970 vd[s].flags = virtio_rw16(sc, VRING_DESC_F_NEXT);
971 vd[s].next = virtio_rw16(sc, qe->qe_index);
972 s = qe->qe_index;
973 }
974 vd[s].flags = virtio_rw16(sc, 0);
975
976 return 0;
977 }
978 }
979
980 /*
981 * enqueue: enqueue a single dmamap.
982 */
983 int
984 virtio_enqueue(struct virtio_softc *sc, struct virtqueue *vq, int slot,
985 bus_dmamap_t dmamap, bool write)
986 {
987 struct vq_entry *qe1 = &vq->vq_entries[slot];
988 struct vring_desc *vd = qe1->qe_desc_base;
989 int i;
990 int s = qe1->qe_next;
991
992 KASSERT(s >= 0);
993 KASSERT(dmamap->dm_nsegs > 0);
994
995 for (i = 0; i < dmamap->dm_nsegs; i++) {
996 vd[s].addr = virtio_rw64(sc, dmamap->dm_segs[i].ds_addr);
997 vd[s].len = virtio_rw32(sc, dmamap->dm_segs[i].ds_len);
998 if (!write)
999 vd[s].flags |= virtio_rw16(sc, VRING_DESC_F_WRITE);
1000 s = virtio_rw16(sc, vd[s].next);
1001 }
1002 qe1->qe_next = s;
1003
1004 return 0;
1005 }
1006
1007 int
1008 virtio_enqueue_p(struct virtio_softc *sc, struct virtqueue *vq, int slot,
1009 bus_dmamap_t dmamap, bus_addr_t start, bus_size_t len,
1010 bool write)
1011 {
1012 struct vq_entry *qe1 = &vq->vq_entries[slot];
1013 struct vring_desc *vd = qe1->qe_desc_base;
1014 int s = qe1->qe_next;
1015
1016 KASSERT(s >= 0);
1017 KASSERT(dmamap->dm_nsegs == 1); /* XXX */
1018 KASSERT((dmamap->dm_segs[0].ds_len > start) &&
1019 (dmamap->dm_segs[0].ds_len >= start + len));
1020
1021 vd[s].addr = virtio_rw64(sc, dmamap->dm_segs[0].ds_addr + start);
1022 vd[s].len = virtio_rw32(sc, len);
1023 if (!write)
1024 vd[s].flags |= virtio_rw16(sc, VRING_DESC_F_WRITE);
1025 qe1->qe_next = virtio_rw16(sc, vd[s].next);
1026
1027 return 0;
1028 }
1029
1030 /*
1031 * enqueue_commit: add it to the aring.
1032 */
1033 int
1034 virtio_enqueue_commit(struct virtio_softc *sc, struct virtqueue *vq, int slot,
1035 bool notifynow)
1036 {
1037 struct vq_entry *qe1;
1038
1039 if (slot < 0) {
1040 mutex_enter(&vq->vq_aring_lock);
1041 goto notify;
1042 }
1043 vq_sync_descs(sc, vq, BUS_DMASYNC_PREWRITE);
1044 qe1 = &vq->vq_entries[slot];
1045 if (qe1->qe_indirect)
1046 vq_sync_indirect(sc, vq, slot, BUS_DMASYNC_PREWRITE);
1047 mutex_enter(&vq->vq_aring_lock);
1048 vq->vq_avail->ring[(vq->vq_avail_idx++) % vq->vq_num] =
1049 virtio_rw16(sc, slot);
1050
1051 notify:
1052 if (notifynow) {
1053 uint16_t o, n, t;
1054 uint16_t flags;
1055 o = virtio_rw16(sc, vq->vq_avail->idx);
1056 n = vq->vq_avail_idx;
1057
1058 /* publish avail idx */
1059 membar_producer();
1060 vq->vq_avail->idx = virtio_rw16(sc, vq->vq_avail_idx);
1061 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
1062 vq->vq_queued++;
1063
1064 membar_consumer();
1065 vq_sync_uring(sc, vq, BUS_DMASYNC_PREREAD);
1066 if (sc->sc_active_features & VIRTIO_F_RING_EVENT_IDX) {
1067 t = virtio_rw16(sc, *vq->vq_avail_event) + 1;
1068 if ((uint16_t) (n - t) < (uint16_t) (n - o))
1069 sc->sc_ops->kick(sc, vq->vq_index);
1070 } else {
1071 flags = virtio_rw16(sc, vq->vq_used->flags);
1072 if (!(flags & VRING_USED_F_NO_NOTIFY))
1073 sc->sc_ops->kick(sc, vq->vq_index);
1074 }
1075 vq_sync_uring(sc, vq, BUS_DMASYNC_POSTREAD);
1076 vq_sync_aring(sc, vq, BUS_DMASYNC_POSTWRITE);
1077 }
1078 mutex_exit(&vq->vq_aring_lock);
1079
1080 return 0;
1081 }
1082
1083 /*
1084 * enqueue_abort: rollback.
1085 */
1086 int
1087 virtio_enqueue_abort(struct virtio_softc *sc, struct virtqueue *vq, int slot)
1088 {
1089 struct vq_entry *qe = &vq->vq_entries[slot];
1090 struct vring_desc *vd;
1091 int s;
1092
1093 if (qe->qe_next < 0) {
1094 vq_free_entry(vq, qe);
1095 return 0;
1096 }
1097
1098 s = slot;
1099 vd = &vq->vq_desc[0];
1100 while (virtio_rw16(sc, vd[s].flags) & VRING_DESC_F_NEXT) {
1101 s = virtio_rw16(sc, vd[s].next);
1102 vq_free_entry(vq, qe);
1103 qe = &vq->vq_entries[s];
1104 }
1105 vq_free_entry(vq, qe);
1106 return 0;
1107 }
1108
1109 /*
1110 * Dequeue a request.
1111 */
1112 /*
1113 * dequeue: dequeue a request from uring; dmamap_sync for uring is
1114 * already done in the interrupt handler.
1115 */
1116 int
1117 virtio_dequeue(struct virtio_softc *sc, struct virtqueue *vq,
1118 int *slotp, int *lenp)
1119 {
1120 uint16_t slot, usedidx;
1121 struct vq_entry *qe;
1122
1123 if (vq->vq_used_idx == virtio_rw16(sc, vq->vq_used->idx))
1124 return ENOENT;
1125 mutex_enter(&vq->vq_uring_lock);
1126 usedidx = vq->vq_used_idx++;
1127 mutex_exit(&vq->vq_uring_lock);
1128 usedidx %= vq->vq_num;
1129 slot = virtio_rw32(sc, vq->vq_used->ring[usedidx].id);
1130 qe = &vq->vq_entries[slot];
1131
1132 if (qe->qe_indirect)
1133 vq_sync_indirect(sc, vq, slot, BUS_DMASYNC_POSTWRITE);
1134
1135 if (slotp)
1136 *slotp = slot;
1137 if (lenp)
1138 *lenp = virtio_rw32(sc, vq->vq_used->ring[usedidx].len);
1139
1140 return 0;
1141 }
1142
1143 /*
1144 * dequeue_commit: complete dequeue; the slot is recycled for future use.
1145 * if you forget to call this the slot will be leaked.
1146 */
1147 int
1148 virtio_dequeue_commit(struct virtio_softc *sc, struct virtqueue *vq, int slot)
1149 {
1150 struct vq_entry *qe = &vq->vq_entries[slot];
1151 struct vring_desc *vd = &vq->vq_desc[0];
1152 int s = slot;
1153
1154 while (virtio_rw16(sc, vd[s].flags) & VRING_DESC_F_NEXT) {
1155 s = virtio_rw16(sc, vd[s].next);
1156 vq_free_entry(vq, qe);
1157 qe = &vq->vq_entries[s];
1158 }
1159 vq_free_entry(vq, qe);
1160
1161 return 0;
1162 }
1163
1164 /*
1165 * Attach a child, fill all the members.
1166 */
1167 void
1168 virtio_child_attach_start(struct virtio_softc *sc, device_t child, int ipl,
1169 struct virtqueue *vqs,
1170 virtio_callback config_change,
1171 virtio_callback intr_hand,
1172 int req_flags, int req_features, const char *feat_bits)
1173 {
1174 char buf[1024];
1175
1176 sc->sc_child = child;
1177 sc->sc_ipl = ipl;
1178 sc->sc_vqs = vqs;
1179 sc->sc_config_change = config_change;
1180 sc->sc_intrhand = intr_hand;
1181 sc->sc_flags = req_flags;
1182
1183 virtio_negotiate_features(sc, req_features);
1184 snprintb(buf, sizeof(buf), feat_bits, sc->sc_active_features);
1185 aprint_normal(": features: %s\n", buf);
1186 aprint_naive("\n");
1187 }
1188
1189 void
1190 virtio_child_attach_set_vqs(struct virtio_softc *sc,
1191 struct virtqueue *vqs, int nvq_pairs)
1192 {
1193
1194 KASSERT(nvq_pairs == 1 ||
1195 (sc->sc_flags & VIRTIO_F_INTR_SOFTINT) == 0);
1196 if (nvq_pairs > 1)
1197 sc->sc_child_mq = true;
1198
1199 sc->sc_vqs = vqs;
1200 }
1201
1202 int
1203 virtio_child_attach_finish(struct virtio_softc *sc)
1204 {
1205 int r;
1206
1207 sc->sc_finished_called = true;
1208 r = sc->sc_ops->alloc_interrupts(sc);
1209 if (r != 0) {
1210 aprint_error_dev(sc->sc_dev, "failed to allocate interrupts\n");
1211 goto fail;
1212 }
1213
1214 r = sc->sc_ops->setup_interrupts(sc, 0);
1215 if (r != 0) {
1216 aprint_error_dev(sc->sc_dev, "failed to setup interrupts\n");
1217 }
1218
1219 KASSERT(sc->sc_soft_ih == NULL);
1220 if (sc->sc_flags & VIRTIO_F_INTR_SOFTINT) {
1221 u_int flags = SOFTINT_NET;
1222 if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE)
1223 flags |= SOFTINT_MPSAFE;
1224
1225 sc->sc_soft_ih = softint_establish(flags, virtio_soft_intr, sc);
1226 if (sc->sc_soft_ih == NULL) {
1227 sc->sc_ops->free_interrupts(sc);
1228 aprint_error_dev(sc->sc_dev,
1229 "failed to establish soft interrupt\n");
1230 goto fail;
1231 }
1232 }
1233
1234 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK);
1235 return 0;
1236
1237 fail:
1238 if (sc->sc_soft_ih) {
1239 softint_disestablish(sc->sc_soft_ih);
1240 sc->sc_soft_ih = NULL;
1241 }
1242
1243 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
1244 return 1;
1245 }
1246
1247 void
1248 virtio_child_detach(struct virtio_softc *sc)
1249 {
1250 sc->sc_child = NULL;
1251 sc->sc_vqs = NULL;
1252
1253 virtio_device_reset(sc);
1254
1255 sc->sc_ops->free_interrupts(sc);
1256
1257 if (sc->sc_soft_ih) {
1258 softint_disestablish(sc->sc_soft_ih);
1259 sc->sc_soft_ih = NULL;
1260 }
1261 }
1262
1263 void
1264 virtio_child_attach_failed(struct virtio_softc *sc)
1265 {
1266 virtio_child_detach(sc);
1267
1268 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
1269
1270 sc->sc_child = VIRTIO_CHILD_FAILED;
1271 }
1272
1273 bus_dma_tag_t
1274 virtio_dmat(struct virtio_softc *sc)
1275 {
1276 return sc->sc_dmat;
1277 }
1278
1279 device_t
1280 virtio_child(struct virtio_softc *sc)
1281 {
1282 return sc->sc_child;
1283 }
1284
1285 int
1286 virtio_intrhand(struct virtio_softc *sc)
1287 {
1288 return (sc->sc_intrhand)(sc);
1289 }
1290
1291 uint64_t
1292 virtio_features(struct virtio_softc *sc)
1293 {
1294 return sc->sc_active_features;
1295 }
1296
1297 int
1298 virtio_attach_failed(struct virtio_softc *sc)
1299 {
1300 device_t self = sc->sc_dev;
1301
1302 /* no error if its not connected, but its failed */
1303 if (sc->sc_childdevid == 0)
1304 return 1;
1305
1306 if (sc->sc_child == NULL) {
1307 aprint_error_dev(self,
1308 "no matching child driver; not configured\n");
1309 return 1;
1310 }
1311
1312 if (sc->sc_child == VIRTIO_CHILD_FAILED) {
1313 aprint_error_dev(self, "virtio configuration failed\n");
1314 return 1;
1315 }
1316
1317 /* sanity check */
1318 if (!sc->sc_finished_called) {
1319 aprint_error_dev(self, "virtio internal error, child driver "
1320 "signaled OK but didn't initialize interrupts\n");
1321 return 1;
1322 }
1323
1324 return 0;
1325 }
1326
1327 void
1328 virtio_print_device_type(device_t self, int id, int revision)
1329 {
1330 aprint_normal_dev(self, "%s device (rev. 0x%02x)\n",
1331 (id < NDEVNAMES ? virtio_device_name[id] : "Unknown"),
1332 revision);
1333 }
1334
1335
1336 MODULE(MODULE_CLASS_DRIVER, virtio, NULL);
1337
1338 #ifdef _MODULE
1339 #include "ioconf.c"
1340 #endif
1341
1342 static int
1343 virtio_modcmd(modcmd_t cmd, void *opaque)
1344 {
1345 int error = 0;
1346
1347 #ifdef _MODULE
1348 switch (cmd) {
1349 case MODULE_CMD_INIT:
1350 error = config_init_component(cfdriver_ioconf_virtio,
1351 cfattach_ioconf_virtio, cfdata_ioconf_virtio);
1352 break;
1353 case MODULE_CMD_FINI:
1354 error = config_fini_component(cfdriver_ioconf_virtio,
1355 cfattach_ioconf_virtio, cfdata_ioconf_virtio);
1356 break;
1357 default:
1358 error = ENOTTY;
1359 break;
1360 }
1361 #endif
1362
1363 return error;
1364 }
1365