virtio.c revision 1.12 1 /* $NetBSD: virtio.c,v 1.12 2015/10/27 23:08:27 christos Exp $ */
2
3 /*
4 * Copyright (c) 2010 Minoura Makoto.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: virtio.c,v 1.12 2015/10/27 23:08:27 christos Exp $");
30
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/atomic.h>
35 #include <sys/bus.h>
36 #include <sys/device.h>
37 #include <sys/kmem.h>
38
39 #include <dev/pci/pcidevs.h>
40 #include <dev/pci/pcireg.h>
41 #include <dev/pci/pcivar.h>
42
43 #include <dev/pci/virtioreg.h>
44 #include <dev/pci/virtiovar.h>
45
46 #define MINSEG_INDIRECT 2 /* use indirect if nsegs >= this value */
47
48 static int virtio_match(device_t, cfdata_t, void *);
49 static void virtio_attach(device_t, device_t, void *);
50 static int virtio_detach(device_t, int);
51 static int virtio_intr(void *arg);
52 static int virtio_msix_queue_intr(void *);
53 static int virtio_msix_config_intr(void *);
54 static int virtio_setup_msix_vectors(struct virtio_softc *);
55 static int virtio_setup_msix_interrupts(struct virtio_softc *,
56 struct pci_attach_args *);
57 static int virtio_setup_intx_interrupt(struct virtio_softc *,
58 struct pci_attach_args *);
59 static int virtio_setup_interrupts(struct virtio_softc *,
60 struct pci_attach_args *);
61 static void virtio_soft_intr(void *arg);
62 static void virtio_init_vq(struct virtio_softc *,
63 struct virtqueue *, const bool);
64
65 CFATTACH_DECL3_NEW(virtio, sizeof(struct virtio_softc),
66 virtio_match, virtio_attach, virtio_detach, NULL, NULL, NULL,
67 DVF_DETACH_SHUTDOWN);
68
69 static void
70 virtio_set_status(struct virtio_softc *sc, int status)
71 {
72 int old = 0;
73
74 if (status != 0)
75 old = bus_space_read_1(sc->sc_iot, sc->sc_ioh,
76 VIRTIO_CONFIG_DEVICE_STATUS);
77 bus_space_write_1(sc->sc_iot, sc->sc_ioh, VIRTIO_CONFIG_DEVICE_STATUS,
78 status|old);
79 }
80
81 #define virtio_device_reset(sc) virtio_set_status((sc), 0)
82
83 static int
84 virtio_match(device_t parent, cfdata_t match, void *aux)
85 {
86 struct pci_attach_args *pa;
87
88 pa = (struct pci_attach_args *)aux;
89 switch (PCI_VENDOR(pa->pa_id)) {
90 case PCI_VENDOR_QUMRANET:
91 if ((PCI_PRODUCT_QUMRANET_VIRTIO_1000 <=
92 PCI_PRODUCT(pa->pa_id)) &&
93 (PCI_PRODUCT(pa->pa_id) <=
94 PCI_PRODUCT_QUMRANET_VIRTIO_103F))
95 return 1;
96 break;
97 }
98
99 return 0;
100 }
101
102 static const char *virtio_device_name[] = {
103 "Unknown (0)", /* 0 */
104 "Network", /* 1 */
105 "Block", /* 2 */
106 "Console", /* 3 */
107 "Entropy", /* 4 */
108 "Memory Balloon", /* 5 */
109 "Unknown (6)", /* 6 */
110 "Unknown (7)", /* 7 */
111 "Unknown (8)", /* 8 */
112 "9P Transport" /* 9 */
113 };
114 #define NDEVNAMES (sizeof(virtio_device_name)/sizeof(char*))
115
116 #define VIRTIO_MSIX_CONFIG_VECTOR_INDEX 0
117 #define VIRTIO_MSIX_QUEUE_VECTOR_INDEX 1
118
119 static int
120 virtio_setup_msix_vectors(struct virtio_softc *sc)
121 {
122 int offset, vector, ret, qid;
123
124 offset = VIRTIO_CONFIG_MSI_CONFIG_VECTOR;
125 vector = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
126
127 bus_space_write_2(sc->sc_iot, sc->sc_ioh, offset, vector);
128 ret = bus_space_read_2(sc->sc_iot, sc->sc_ioh, offset);
129 aprint_debug_dev(sc->sc_dev, "expected=%d, actual=%d\n",
130 vector, ret);
131 if (ret != vector)
132 return -1;
133
134 for (qid = 0; qid < sc->sc_nvqs; qid++) {
135 offset = VIRTIO_CONFIG_QUEUE_SELECT;
136 bus_space_write_2(sc->sc_iot, sc->sc_ioh, offset, qid);
137
138 offset = VIRTIO_CONFIG_MSI_QUEUE_VECTOR;
139 vector = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
140
141 bus_space_write_2(sc->sc_iot, sc->sc_ioh, offset, vector);
142 ret = bus_space_read_2(sc->sc_iot, sc->sc_ioh, offset);
143 aprint_debug_dev(sc->sc_dev, "expected=%d, actual=%d\n",
144 vector, ret);
145 if (ret != vector)
146 return -1;
147 }
148
149 return 0;
150 }
151
152 static int
153 virtio_setup_msix_interrupts(struct virtio_softc *sc,
154 struct pci_attach_args *pa)
155 {
156 device_t self = sc->sc_dev;
157 pci_chipset_tag_t pc = pa->pa_pc;
158 char intrbuf[PCI_INTRSTR_LEN];
159 char const *intrstr;
160 int idx;
161
162 idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
163 if (sc->sc_flags & VIRTIO_F_PCI_INTR_MPSAFE)
164 pci_intr_setattr(pc, &sc->sc_ihp[idx], PCI_INTR_MPSAFE, true);
165
166 sc->sc_ihs[idx] = pci_intr_establish_xname(pc, sc->sc_ihp[idx], IPL_NET,
167 virtio_msix_config_intr, sc, device_xname(sc->sc_dev));
168 if (sc->sc_ihs[idx] == NULL) {
169 aprint_error_dev(self, "couldn't establish MSI-X for config\n");
170 goto error;
171 }
172
173 idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
174 if (sc->sc_flags & VIRTIO_F_PCI_INTR_MPSAFE)
175 pci_intr_setattr(pc, &sc->sc_ihp[idx], PCI_INTR_MPSAFE, true);
176
177 sc->sc_ihs[idx] = pci_intr_establish_xname(pc, sc->sc_ihp[idx], IPL_NET,
178 virtio_msix_queue_intr, sc, device_xname(sc->sc_dev));
179 if (sc->sc_ihs[idx] == NULL) {
180 aprint_error_dev(self, "couldn't establish MSI-X for queues\n");
181 goto error;
182 }
183
184 if (virtio_setup_msix_vectors(sc) != 0) {
185 aprint_error_dev(self, "couldn't setup MSI-X vectors\n");
186 goto error;
187 }
188
189 idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
190 intrstr = pci_intr_string(pc, sc->sc_ihp[idx], intrbuf, sizeof(intrbuf));
191 aprint_normal_dev(self, "config interrupting at %s\n", intrstr);
192 idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
193 intrstr = pci_intr_string(pc, sc->sc_ihp[idx], intrbuf, sizeof(intrbuf));
194 aprint_normal_dev(self, "queues interrupting at %s\n", intrstr);
195
196 return 0;
197
198 error:
199 idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
200 if (sc->sc_ihs[idx] != NULL)
201 pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[idx]);
202 idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
203 if (sc->sc_ihs[idx] != NULL)
204 pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[idx]);
205
206 return -1;
207 }
208
209 static int
210 virtio_setup_intx_interrupt(struct virtio_softc *sc, struct pci_attach_args *pa)
211 {
212 device_t self = sc->sc_dev;
213 pci_chipset_tag_t pc = pa->pa_pc;
214 char intrbuf[PCI_INTRSTR_LEN];
215 char const *intrstr;
216
217 if (sc->sc_flags & VIRTIO_F_PCI_INTR_MPSAFE)
218 pci_intr_setattr(pc, &sc->sc_ihp[0], PCI_INTR_MPSAFE, true);
219
220 sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_ihp[0],
221 IPL_NET, virtio_intr, sc, device_xname(sc->sc_dev));
222 if (sc->sc_ihs[0] == NULL) {
223 aprint_error_dev(self, "couldn't establish INTx\n");
224 return -1;
225 }
226
227 intrstr = pci_intr_string(pc, sc->sc_ihp[0], intrbuf, sizeof(intrbuf));
228 aprint_normal_dev(self, "interrupting at %s\n", intrstr);
229
230 return 0;
231 }
232
233 static int
234 virtio_setup_interrupts(struct virtio_softc *sc, struct pci_attach_args *pa)
235 {
236 device_t self = sc->sc_dev;
237 pci_chipset_tag_t pc = pa->pa_pc;
238 int error;
239 int nmsix;
240 int counts[PCI_INTR_TYPE_SIZE];
241 pci_intr_type_t max_type;
242
243 nmsix = pci_msix_count(pa->pa_pc, pa->pa_tag);
244 aprint_debug_dev(self, "pci_msix_count=%d\n", nmsix);
245
246 /* We need at least two: one for config and the other for queues */
247 if ((sc->sc_flags & VIRTIO_F_PCI_INTR_MSIX) == 0 || nmsix < 2) {
248 /* Try INTx only */
249 max_type = PCI_INTR_TYPE_INTX;
250 counts[PCI_INTR_TYPE_INTX] = 1;
251 } else {
252 /* Try MSI-X first and INTx second */
253 max_type = PCI_INTR_TYPE_MSIX;
254 counts[PCI_INTR_TYPE_MSIX] = 2;
255 counts[PCI_INTR_TYPE_MSI] = 0;
256 counts[PCI_INTR_TYPE_INTX] = 1;
257 }
258
259 retry:
260 error = pci_intr_alloc(pa, &sc->sc_ihp, counts, max_type);
261 if (error != 0) {
262 aprint_error_dev(self, "couldn't map interrupt\n");
263 return -1;
264 }
265
266 if (pci_intr_type(sc->sc_ihp[0]) == PCI_INTR_TYPE_MSIX) {
267 sc->sc_ihs = kmem_alloc(sizeof(*sc->sc_ihs) * 2,
268 KM_SLEEP);
269 if (sc->sc_ihs == NULL) {
270 pci_intr_release(pc, sc->sc_ihp, 2);
271
272 /* Retry INTx */
273 max_type = PCI_INTR_TYPE_INTX;
274 counts[PCI_INTR_TYPE_INTX] = 1;
275 goto retry;
276 }
277
278 error = virtio_setup_msix_interrupts(sc, pa);
279 if (error != 0) {
280 kmem_free(sc->sc_ihs, sizeof(*sc->sc_ihs) * 2);
281 pci_intr_release(pc, sc->sc_ihp, 2);
282
283 /* Retry INTx */
284 max_type = PCI_INTR_TYPE_INTX;
285 counts[PCI_INTR_TYPE_INTX] = 1;
286 goto retry;
287 }
288
289 sc->sc_ihs_num = 2;
290 sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_MSI;
291 } else if (pci_intr_type(sc->sc_ihp[0]) == PCI_INTR_TYPE_INTX) {
292 sc->sc_ihs = kmem_alloc(sizeof(*sc->sc_ihs) * 1,
293 KM_SLEEP);
294 if (sc->sc_ihs == NULL) {
295 pci_intr_release(pc, sc->sc_ihp, 1);
296 return -1;
297 }
298
299 error = virtio_setup_intx_interrupt(sc, pa);
300 if (error != 0) {
301 kmem_free(sc->sc_ihs, sizeof(*sc->sc_ihs) * 1);
302 pci_intr_release(pc, sc->sc_ihp, 1);
303 return -1;
304 }
305
306 sc->sc_ihs_num = 1;
307 sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
308 }
309
310 return 0;
311 }
312
313 static void
314 virtio_attach(device_t parent, device_t self, void *aux)
315 {
316 struct virtio_softc *sc = device_private(self);
317 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
318 pci_chipset_tag_t pc = pa->pa_pc;
319 pcitag_t tag = pa->pa_tag;
320 int revision;
321 pcireg_t id;
322 int r;
323
324 revision = PCI_REVISION(pa->pa_class);
325 if (revision != 0) {
326 aprint_normal(": unknown revision 0x%02x; giving up\n",
327 revision);
328 return;
329 }
330 aprint_normal("\n");
331 aprint_naive("\n");
332
333 /* subsystem ID shows what I am */
334 id = pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG);
335 aprint_normal_dev(self, "Virtio %s Device (rev. 0x%02x)\n",
336 (PCI_SUBSYS_ID(id) < NDEVNAMES?
337 virtio_device_name[PCI_SUBSYS_ID(id)] : "Unknown"),
338 revision);
339
340 sc->sc_dev = self;
341 sc->sc_pc = pc;
342 sc->sc_tag = tag;
343 sc->sc_iot = pa->pa_iot;
344 if (pci_dma64_available(pa))
345 sc->sc_dmat = pa->pa_dmat64;
346 else
347 sc->sc_dmat = pa->pa_dmat;
348 sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
349
350 if (pci_mapreg_map(pa, PCI_MAPREG_START, PCI_MAPREG_TYPE_IO, 0,
351 &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_iosize)) {
352 aprint_error_dev(self, "can't map i/o space\n");
353 return;
354 }
355
356 virtio_device_reset(sc);
357 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_ACK);
358 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER);
359
360 /* XXX: use softc as aux... */
361 sc->sc_childdevid = PCI_SUBSYS_ID(id);
362 sc->sc_child = NULL;
363 config_found(self, sc, NULL);
364 if (sc->sc_child == NULL) {
365 aprint_error_dev(self,
366 "no matching child driver; not configured\n");
367 return;
368 }
369 if (sc->sc_child == (void*)1) { /* this shows error */
370 aprint_error_dev(self,
371 "virtio configuration failed\n");
372 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
373 return;
374 }
375
376 r = virtio_setup_interrupts(sc, pa);
377 if (r != 0) {
378 aprint_error_dev(self, "failed to setup interrupts\n");
379 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
380 return;
381 }
382
383 sc->sc_soft_ih = NULL;
384 if (sc->sc_flags & VIRTIO_F_PCI_INTR_SOFTINT) {
385 u_int flags = SOFTINT_NET;
386 if (sc->sc_flags & VIRTIO_F_PCI_INTR_MPSAFE)
387 flags |= SOFTINT_MPSAFE;
388
389 sc->sc_soft_ih = softint_establish(flags, virtio_soft_intr, sc);
390 if (sc->sc_soft_ih == NULL)
391 aprint_error(": failed to establish soft interrupt\n");
392 }
393
394 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK);
395
396 return;
397 }
398
399 static int
400 virtio_detach(device_t self, int flags)
401 {
402 struct virtio_softc *sc = device_private(self);
403 int r;
404 int i;
405
406 if (sc->sc_child != 0 && sc->sc_child != (void*)1) {
407 r = config_detach(sc->sc_child, flags);
408 if (r)
409 return r;
410 }
411 KASSERT(sc->sc_child == 0 || sc->sc_child == (void*)1);
412 KASSERT(sc->sc_vqs == 0);
413 for (i = 0; i < sc->sc_ihs_num; i++) {
414 if (sc->sc_ihs[i] == NULL)
415 continue;
416 pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
417 }
418 pci_intr_release(sc->sc_pc, sc->sc_ihp, sc->sc_ihs_num);
419 kmem_free(sc->sc_ihs, sizeof(*sc->sc_ihs) * sc->sc_ihs_num);
420 sc->sc_ihs_num = 0;
421 if (sc->sc_iosize)
422 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_iosize);
423 sc->sc_iosize = 0;
424
425 return 0;
426 }
427
428 /*
429 * Reset the device.
430 */
431 /*
432 * To reset the device to a known state, do following:
433 * virtio_reset(sc); // this will stop the device activity
434 * <dequeue finished requests>; // virtio_dequeue() still can be called
435 * <revoke pending requests in the vqs if any>;
436 * virtio_reinit_begin(sc); // dequeue prohibitted
437 * newfeatures = virtio_negotiate_features(sc, requestedfeatures);
438 * <some other initialization>;
439 * virtio_reinit_end(sc); // device activated; enqueue allowed
440 * Once attached, feature negotiation can only be allowed after virtio_reset.
441 */
442 void
443 virtio_reset(struct virtio_softc *sc)
444 {
445 virtio_device_reset(sc);
446 }
447
448 void
449 virtio_reinit_start(struct virtio_softc *sc)
450 {
451 int i;
452
453 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_ACK);
454 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER);
455 for (i = 0; i < sc->sc_nvqs; i++) {
456 int n;
457 struct virtqueue *vq = &sc->sc_vqs[i];
458 bus_space_write_2(sc->sc_iot, sc->sc_ioh,
459 VIRTIO_CONFIG_QUEUE_SELECT,
460 vq->vq_index);
461 n = bus_space_read_2(sc->sc_iot, sc->sc_ioh,
462 VIRTIO_CONFIG_QUEUE_SIZE);
463 if (n == 0) /* vq disappeared */
464 continue;
465 if (n != vq->vq_num) {
466 panic("%s: virtqueue size changed, vq index %d\n",
467 device_xname(sc->sc_dev),
468 vq->vq_index);
469 }
470 virtio_init_vq(sc, vq, true);
471 bus_space_write_4(sc->sc_iot, sc->sc_ioh,
472 VIRTIO_CONFIG_QUEUE_ADDRESS,
473 (vq->vq_dmamap->dm_segs[0].ds_addr
474 / VIRTIO_PAGE_SIZE));
475 }
476
477 /* MSI-X should have more than one handles where INTx has just one */
478 if (sc->sc_ihs_num > 1) {
479 if (virtio_setup_msix_vectors(sc) != 0) {
480 aprint_error_dev(sc->sc_dev, "couldn't setup MSI-X vectors\n");
481 return;
482 }
483 }
484 }
485
486 void
487 virtio_reinit_end(struct virtio_softc *sc)
488 {
489 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK);
490 }
491
492 /*
493 * Feature negotiation.
494 */
495 uint32_t
496 virtio_negotiate_features(struct virtio_softc *sc, uint32_t guest_features)
497 {
498 uint32_t r;
499
500 if (!(device_cfdata(sc->sc_dev)->cf_flags & 1) &&
501 !(device_cfdata(sc->sc_child)->cf_flags & 1)) /* XXX */
502 guest_features |= VIRTIO_F_RING_INDIRECT_DESC;
503 r = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
504 VIRTIO_CONFIG_DEVICE_FEATURES);
505 r &= guest_features;
506 bus_space_write_4(sc->sc_iot, sc->sc_ioh,
507 VIRTIO_CONFIG_GUEST_FEATURES, r);
508 sc->sc_features = r;
509 if (r & VIRTIO_F_RING_INDIRECT_DESC)
510 sc->sc_indirect = true;
511 else
512 sc->sc_indirect = false;
513
514 return r;
515 }
516
517 /*
518 * Device configuration registers.
519 */
520 uint8_t
521 virtio_read_device_config_1(struct virtio_softc *sc, int index)
522 {
523 return bus_space_read_1(sc->sc_iot, sc->sc_ioh,
524 sc->sc_config_offset + index);
525 }
526
527 uint16_t
528 virtio_read_device_config_2(struct virtio_softc *sc, int index)
529 {
530 return bus_space_read_2(sc->sc_iot, sc->sc_ioh,
531 sc->sc_config_offset + index);
532 }
533
534 uint32_t
535 virtio_read_device_config_4(struct virtio_softc *sc, int index)
536 {
537 return bus_space_read_4(sc->sc_iot, sc->sc_ioh,
538 sc->sc_config_offset + index);
539 }
540
541 uint64_t
542 virtio_read_device_config_8(struct virtio_softc *sc, int index)
543 {
544 uint64_t r;
545
546 r = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
547 sc->sc_config_offset + index + sizeof(uint32_t));
548 r <<= 32;
549 r += bus_space_read_4(sc->sc_iot, sc->sc_ioh,
550 sc->sc_config_offset + index);
551 return r;
552 }
553
554 void
555 virtio_write_device_config_1(struct virtio_softc *sc,
556 int index, uint8_t value)
557 {
558 bus_space_write_1(sc->sc_iot, sc->sc_ioh,
559 sc->sc_config_offset + index, value);
560 }
561
562 void
563 virtio_write_device_config_2(struct virtio_softc *sc,
564 int index, uint16_t value)
565 {
566 bus_space_write_2(sc->sc_iot, sc->sc_ioh,
567 sc->sc_config_offset + index, value);
568 }
569
570 void
571 virtio_write_device_config_4(struct virtio_softc *sc,
572 int index, uint32_t value)
573 {
574 bus_space_write_4(sc->sc_iot, sc->sc_ioh,
575 sc->sc_config_offset + index, value);
576 }
577
578 void
579 virtio_write_device_config_8(struct virtio_softc *sc,
580 int index, uint64_t value)
581 {
582 bus_space_write_4(sc->sc_iot, sc->sc_ioh,
583 sc->sc_config_offset + index,
584 value & 0xffffffff);
585 bus_space_write_4(sc->sc_iot, sc->sc_ioh,
586 sc->sc_config_offset + index + sizeof(uint32_t),
587 value >> 32);
588 }
589
590 /*
591 * Interrupt handler.
592 */
593 static int
594 virtio_intr(void *arg)
595 {
596 struct virtio_softc *sc = arg;
597 int isr, r = 0;
598
599 /* check and ack the interrupt */
600 isr = bus_space_read_1(sc->sc_iot, sc->sc_ioh,
601 VIRTIO_CONFIG_ISR_STATUS);
602 if (isr == 0)
603 return 0;
604 if ((isr & VIRTIO_CONFIG_ISR_CONFIG_CHANGE) &&
605 (sc->sc_config_change != NULL))
606 r = (sc->sc_config_change)(sc);
607 if (sc->sc_intrhand != NULL) {
608 if (sc->sc_soft_ih != NULL)
609 softint_schedule(sc->sc_soft_ih);
610 else
611 r |= (sc->sc_intrhand)(sc);
612 }
613
614 return r;
615 }
616
617 static int
618 virtio_msix_queue_intr(void *arg)
619 {
620 struct virtio_softc *sc = arg;
621 int r = 0;
622
623 if (sc->sc_intrhand != NULL) {
624 if (sc->sc_soft_ih != NULL)
625 softint_schedule(sc->sc_soft_ih);
626 else
627 r |= (sc->sc_intrhand)(sc);
628 }
629
630 return r;
631 }
632
633 static int
634 virtio_msix_config_intr(void *arg)
635 {
636 struct virtio_softc *sc = arg;
637
638 /* TODO: handle events */
639 aprint_debug_dev(sc->sc_dev, "%s\n", __func__);
640 return 1;
641 }
642
643 static void
644 virtio_soft_intr(void *arg)
645 {
646 struct virtio_softc *sc = arg;
647
648 KASSERT(sc->sc_intrhand != NULL);
649
650 (sc->sc_intrhand)(sc);
651 }
652
653 /*
654 * dmamap sync operations for a virtqueue.
655 */
656 static inline void
657 vq_sync_descs(struct virtio_softc *sc, struct virtqueue *vq, int ops)
658 {
659 /* availoffset == sizeof(vring_desc)*vq_num */
660 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap, 0, vq->vq_availoffset,
661 ops);
662 }
663
664 static inline void
665 vq_sync_aring(struct virtio_softc *sc, struct virtqueue *vq, int ops)
666 {
667 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap,
668 vq->vq_availoffset,
669 offsetof(struct vring_avail, ring)
670 + vq->vq_num * sizeof(uint16_t),
671 ops);
672 }
673
674 static inline void
675 vq_sync_uring(struct virtio_softc *sc, struct virtqueue *vq, int ops)
676 {
677 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap,
678 vq->vq_usedoffset,
679 offsetof(struct vring_used, ring)
680 + vq->vq_num * sizeof(struct vring_used_elem),
681 ops);
682 }
683
684 static inline void
685 vq_sync_indirect(struct virtio_softc *sc, struct virtqueue *vq, int slot,
686 int ops)
687 {
688 int offset = vq->vq_indirectoffset
689 + sizeof(struct vring_desc) * vq->vq_maxnsegs * slot;
690
691 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap,
692 offset, sizeof(struct vring_desc) * vq->vq_maxnsegs,
693 ops);
694 }
695
696 /*
697 * Can be used as sc_intrhand.
698 */
699 /*
700 * Scan vq, bus_dmamap_sync for the vqs (not for the payload),
701 * and calls (*vq_done)() if some entries are consumed.
702 */
703 int
704 virtio_vq_intr(struct virtio_softc *sc)
705 {
706 struct virtqueue *vq;
707 int i, r = 0;
708
709 for (i = 0; i < sc->sc_nvqs; i++) {
710 vq = &sc->sc_vqs[i];
711 if (vq->vq_queued) {
712 vq->vq_queued = 0;
713 vq_sync_aring(sc, vq, BUS_DMASYNC_POSTWRITE);
714 }
715 vq_sync_uring(sc, vq, BUS_DMASYNC_POSTREAD);
716 membar_consumer();
717 if (vq->vq_used_idx != vq->vq_used->idx) {
718 if (vq->vq_done)
719 r |= (vq->vq_done)(vq);
720 }
721 }
722
723 return r;
724 }
725
726 /*
727 * Start/stop vq interrupt. No guarantee.
728 */
729 void
730 virtio_stop_vq_intr(struct virtio_softc *sc, struct virtqueue *vq)
731 {
732 vq->vq_avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
733 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
734 vq->vq_queued++;
735 }
736
737 void
738 virtio_start_vq_intr(struct virtio_softc *sc, struct virtqueue *vq)
739 {
740 vq->vq_avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
741 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
742 vq->vq_queued++;
743 }
744
745 /*
746 * Initialize vq structure.
747 */
748 static void
749 virtio_init_vq(struct virtio_softc *sc, struct virtqueue *vq, const bool reinit)
750 {
751 int i, j;
752 int vq_size = vq->vq_num;
753
754 memset(vq->vq_vaddr, 0, vq->vq_bytesize);
755
756 /* build the indirect descriptor chain */
757 if (vq->vq_indirect != NULL) {
758 struct vring_desc *vd;
759
760 for (i = 0; i < vq_size; i++) {
761 vd = vq->vq_indirect;
762 vd += vq->vq_maxnsegs * i;
763 for (j = 0; j < vq->vq_maxnsegs-1; j++)
764 vd[j].next = j + 1;
765 }
766 }
767
768 /* free slot management */
769 SIMPLEQ_INIT(&vq->vq_freelist);
770 for (i = 0; i < vq_size; i++) {
771 SIMPLEQ_INSERT_TAIL(&vq->vq_freelist,
772 &vq->vq_entries[i], qe_list);
773 vq->vq_entries[i].qe_index = i;
774 }
775 if (!reinit)
776 mutex_init(&vq->vq_freelist_lock, MUTEX_SPIN, sc->sc_ipl);
777
778 /* enqueue/dequeue status */
779 vq->vq_avail_idx = 0;
780 vq->vq_used_idx = 0;
781 vq->vq_queued = 0;
782 if (!reinit) {
783 mutex_init(&vq->vq_aring_lock, MUTEX_SPIN, sc->sc_ipl);
784 mutex_init(&vq->vq_uring_lock, MUTEX_SPIN, sc->sc_ipl);
785 }
786 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
787 vq_sync_uring(sc, vq, BUS_DMASYNC_PREREAD);
788 vq->vq_queued++;
789 }
790
791 /*
792 * Allocate/free a vq.
793 */
794 int
795 virtio_alloc_vq(struct virtio_softc *sc,
796 struct virtqueue *vq, int index, int maxsegsize, int maxnsegs,
797 const char *name)
798 {
799 int vq_size, allocsize1, allocsize2, allocsize3, allocsize = 0;
800 int rsegs, r;
801 #define VIRTQUEUE_ALIGN(n) (((n)+(VIRTIO_PAGE_SIZE-1))& \
802 ~(VIRTIO_PAGE_SIZE-1))
803
804 memset(vq, 0, sizeof(*vq));
805
806 bus_space_write_2(sc->sc_iot, sc->sc_ioh,
807 VIRTIO_CONFIG_QUEUE_SELECT, index);
808 vq_size = bus_space_read_2(sc->sc_iot, sc->sc_ioh,
809 VIRTIO_CONFIG_QUEUE_SIZE);
810 if (vq_size == 0) {
811 aprint_error_dev(sc->sc_dev,
812 "virtqueue not exist, index %d for %s\n",
813 index, name);
814 goto err;
815 }
816 /* allocsize1: descriptor table + avail ring + pad */
817 allocsize1 = VIRTQUEUE_ALIGN(sizeof(struct vring_desc)*vq_size
818 + sizeof(uint16_t)*(2+vq_size));
819 /* allocsize2: used ring + pad */
820 allocsize2 = VIRTQUEUE_ALIGN(sizeof(uint16_t)*2
821 + sizeof(struct vring_used_elem)*vq_size);
822 /* allocsize3: indirect table */
823 if (sc->sc_indirect && maxnsegs >= MINSEG_INDIRECT)
824 allocsize3 = sizeof(struct vring_desc) * maxnsegs * vq_size;
825 else
826 allocsize3 = 0;
827 allocsize = allocsize1 + allocsize2 + allocsize3;
828
829 /* alloc and map the memory */
830 r = bus_dmamem_alloc(sc->sc_dmat, allocsize, VIRTIO_PAGE_SIZE, 0,
831 &vq->vq_segs[0], 1, &rsegs, BUS_DMA_NOWAIT);
832 if (r != 0) {
833 aprint_error_dev(sc->sc_dev,
834 "virtqueue %d for %s allocation failed, "
835 "error code %d\n", index, name, r);
836 goto err;
837 }
838 r = bus_dmamem_map(sc->sc_dmat, &vq->vq_segs[0], 1, allocsize,
839 &vq->vq_vaddr, BUS_DMA_NOWAIT);
840 if (r != 0) {
841 aprint_error_dev(sc->sc_dev,
842 "virtqueue %d for %s map failed, "
843 "error code %d\n", index, name, r);
844 goto err;
845 }
846 r = bus_dmamap_create(sc->sc_dmat, allocsize, 1, allocsize, 0,
847 BUS_DMA_NOWAIT, &vq->vq_dmamap);
848 if (r != 0) {
849 aprint_error_dev(sc->sc_dev,
850 "virtqueue %d for %s dmamap creation failed, "
851 "error code %d\n", index, name, r);
852 goto err;
853 }
854 r = bus_dmamap_load(sc->sc_dmat, vq->vq_dmamap,
855 vq->vq_vaddr, allocsize, NULL, BUS_DMA_NOWAIT);
856 if (r != 0) {
857 aprint_error_dev(sc->sc_dev,
858 "virtqueue %d for %s dmamap load failed, "
859 "error code %d\n", index, name, r);
860 goto err;
861 }
862
863 /* set the vq address */
864 bus_space_write_4(sc->sc_iot, sc->sc_ioh,
865 VIRTIO_CONFIG_QUEUE_ADDRESS,
866 (vq->vq_dmamap->dm_segs[0].ds_addr
867 / VIRTIO_PAGE_SIZE));
868
869 /* remember addresses and offsets for later use */
870 vq->vq_owner = sc;
871 vq->vq_num = vq_size;
872 vq->vq_index = index;
873 vq->vq_desc = vq->vq_vaddr;
874 vq->vq_availoffset = sizeof(struct vring_desc)*vq_size;
875 vq->vq_avail = (void*)(((char*)vq->vq_desc) + vq->vq_availoffset);
876 vq->vq_usedoffset = allocsize1;
877 vq->vq_used = (void*)(((char*)vq->vq_desc) + vq->vq_usedoffset);
878 if (allocsize3 > 0) {
879 vq->vq_indirectoffset = allocsize1 + allocsize2;
880 vq->vq_indirect = (void*)(((char*)vq->vq_desc)
881 + vq->vq_indirectoffset);
882 }
883 vq->vq_bytesize = allocsize;
884 vq->vq_maxsegsize = maxsegsize;
885 vq->vq_maxnsegs = maxnsegs;
886
887 /* free slot management */
888 vq->vq_entries = kmem_zalloc(sizeof(struct vq_entry)*vq_size,
889 KM_NOSLEEP);
890 if (vq->vq_entries == NULL) {
891 r = ENOMEM;
892 goto err;
893 }
894
895 virtio_init_vq(sc, vq, false);
896
897 aprint_verbose_dev(sc->sc_dev,
898 "allocated %u byte for virtqueue %d for %s, "
899 "size %d\n", allocsize, index, name, vq_size);
900 if (allocsize3 > 0)
901 aprint_verbose_dev(sc->sc_dev,
902 "using %d byte (%d entries) "
903 "indirect descriptors\n",
904 allocsize3, maxnsegs * vq_size);
905 return 0;
906
907 err:
908 bus_space_write_4(sc->sc_iot, sc->sc_ioh,
909 VIRTIO_CONFIG_QUEUE_ADDRESS, 0);
910 if (vq->vq_dmamap)
911 bus_dmamap_destroy(sc->sc_dmat, vq->vq_dmamap);
912 if (vq->vq_vaddr)
913 bus_dmamem_unmap(sc->sc_dmat, vq->vq_vaddr, allocsize);
914 if (vq->vq_segs[0].ds_addr)
915 bus_dmamem_free(sc->sc_dmat, &vq->vq_segs[0], 1);
916 memset(vq, 0, sizeof(*vq));
917
918 return -1;
919 }
920
921 int
922 virtio_free_vq(struct virtio_softc *sc, struct virtqueue *vq)
923 {
924 struct vq_entry *qe;
925 int i = 0;
926
927 /* device must be already deactivated */
928 /* confirm the vq is empty */
929 SIMPLEQ_FOREACH(qe, &vq->vq_freelist, qe_list) {
930 i++;
931 }
932 if (i != vq->vq_num) {
933 printf("%s: freeing non-empty vq, index %d\n",
934 device_xname(sc->sc_dev), vq->vq_index);
935 return EBUSY;
936 }
937
938 /* tell device that there's no virtqueue any longer */
939 bus_space_write_2(sc->sc_iot, sc->sc_ioh,
940 VIRTIO_CONFIG_QUEUE_SELECT, vq->vq_index);
941 bus_space_write_4(sc->sc_iot, sc->sc_ioh,
942 VIRTIO_CONFIG_QUEUE_ADDRESS, 0);
943
944 kmem_free(vq->vq_entries, vq->vq_bytesize);
945 bus_dmamap_unload(sc->sc_dmat, vq->vq_dmamap);
946 bus_dmamap_destroy(sc->sc_dmat, vq->vq_dmamap);
947 bus_dmamem_unmap(sc->sc_dmat, vq->vq_vaddr, vq->vq_bytesize);
948 bus_dmamem_free(sc->sc_dmat, &vq->vq_segs[0], 1);
949 mutex_destroy(&vq->vq_freelist_lock);
950 mutex_destroy(&vq->vq_uring_lock);
951 mutex_destroy(&vq->vq_aring_lock);
952 memset(vq, 0, sizeof(*vq));
953
954 return 0;
955 }
956
957 /*
958 * Free descriptor management.
959 */
960 static struct vq_entry *
961 vq_alloc_entry(struct virtqueue *vq)
962 {
963 struct vq_entry *qe;
964
965 mutex_enter(&vq->vq_freelist_lock);
966 if (SIMPLEQ_EMPTY(&vq->vq_freelist)) {
967 mutex_exit(&vq->vq_freelist_lock);
968 return NULL;
969 }
970 qe = SIMPLEQ_FIRST(&vq->vq_freelist);
971 SIMPLEQ_REMOVE_HEAD(&vq->vq_freelist, qe_list);
972 mutex_exit(&vq->vq_freelist_lock);
973
974 return qe;
975 }
976
977 static void
978 vq_free_entry(struct virtqueue *vq, struct vq_entry *qe)
979 {
980 mutex_enter(&vq->vq_freelist_lock);
981 SIMPLEQ_INSERT_TAIL(&vq->vq_freelist, qe, qe_list);
982 mutex_exit(&vq->vq_freelist_lock);
983
984 return;
985 }
986
987 /*
988 * Enqueue several dmamaps as a single request.
989 */
990 /*
991 * Typical usage:
992 * <queue size> number of followings are stored in arrays
993 * - command blocks (in dmamem) should be pre-allocated and mapped
994 * - dmamaps for command blocks should be pre-allocated and loaded
995 * - dmamaps for payload should be pre-allocated
996 * r = virtio_enqueue_prep(sc, vq, &slot); // allocate a slot
997 * if (r) // currently 0 or EAGAIN
998 * return r;
999 * r = bus_dmamap_load(dmat, dmamap_payload[slot], data, count, ..);
1000 * if (r) {
1001 * virtio_enqueue_abort(sc, vq, slot);
1002 * bus_dmamap_unload(dmat, dmamap_payload[slot]);
1003 * return r;
1004 * }
1005 * r = virtio_enqueue_reserve(sc, vq, slot,
1006 * dmamap_payload[slot]->dm_nsegs+1);
1007 * // ^ +1 for command
1008 * if (r) { // currently 0 or EAGAIN
1009 * bus_dmamap_unload(dmat, dmamap_payload[slot]);
1010 * return r; // do not call abort()
1011 * }
1012 * <setup and prepare commands>
1013 * bus_dmamap_sync(dmat, dmamap_cmd[slot],... BUS_DMASYNC_PREWRITE);
1014 * bus_dmamap_sync(dmat, dmamap_payload[slot],...);
1015 * virtio_enqueue(sc, vq, slot, dmamap_cmd[slot], false);
1016 * virtio_enqueue(sc, vq, slot, dmamap_payload[slot], iswrite);
1017 * virtio_enqueue_commit(sc, vq, slot, true);
1018 */
1019
1020 /*
1021 * enqueue_prep: allocate a slot number
1022 */
1023 int
1024 virtio_enqueue_prep(struct virtio_softc *sc, struct virtqueue *vq, int *slotp)
1025 {
1026 struct vq_entry *qe1;
1027
1028 KASSERT(slotp != NULL);
1029
1030 qe1 = vq_alloc_entry(vq);
1031 if (qe1 == NULL)
1032 return EAGAIN;
1033 /* next slot is not allocated yet */
1034 qe1->qe_next = -1;
1035 *slotp = qe1->qe_index;
1036
1037 return 0;
1038 }
1039
1040 /*
1041 * enqueue_reserve: allocate remaining slots and build the descriptor chain.
1042 */
1043 int
1044 virtio_enqueue_reserve(struct virtio_softc *sc, struct virtqueue *vq,
1045 int slot, int nsegs)
1046 {
1047 int indirect;
1048 struct vq_entry *qe1 = &vq->vq_entries[slot];
1049
1050 KASSERT(qe1->qe_next == -1);
1051 KASSERT(1 <= nsegs && nsegs <= vq->vq_num);
1052
1053 if ((vq->vq_indirect != NULL) &&
1054 (nsegs >= MINSEG_INDIRECT) &&
1055 (nsegs <= vq->vq_maxnsegs))
1056 indirect = 1;
1057 else
1058 indirect = 0;
1059 qe1->qe_indirect = indirect;
1060
1061 if (indirect) {
1062 struct vring_desc *vd;
1063 int i;
1064
1065 vd = &vq->vq_desc[qe1->qe_index];
1066 vd->addr = vq->vq_dmamap->dm_segs[0].ds_addr
1067 + vq->vq_indirectoffset;
1068 vd->addr += sizeof(struct vring_desc)
1069 * vq->vq_maxnsegs * qe1->qe_index;
1070 vd->len = sizeof(struct vring_desc) * nsegs;
1071 vd->flags = VRING_DESC_F_INDIRECT;
1072
1073 vd = vq->vq_indirect;
1074 vd += vq->vq_maxnsegs * qe1->qe_index;
1075 qe1->qe_desc_base = vd;
1076
1077 for (i = 0; i < nsegs-1; i++) {
1078 vd[i].flags = VRING_DESC_F_NEXT;
1079 }
1080 vd[i].flags = 0;
1081 qe1->qe_next = 0;
1082
1083 return 0;
1084 } else {
1085 struct vring_desc *vd;
1086 struct vq_entry *qe;
1087 int i, s;
1088
1089 vd = &vq->vq_desc[0];
1090 qe1->qe_desc_base = vd;
1091 qe1->qe_next = qe1->qe_index;
1092 s = slot;
1093 for (i = 0; i < nsegs - 1; i++) {
1094 qe = vq_alloc_entry(vq);
1095 if (qe == NULL) {
1096 vd[s].flags = 0;
1097 virtio_enqueue_abort(sc, vq, slot);
1098 return EAGAIN;
1099 }
1100 vd[s].flags = VRING_DESC_F_NEXT;
1101 vd[s].next = qe->qe_index;
1102 s = qe->qe_index;
1103 }
1104 vd[s].flags = 0;
1105
1106 return 0;
1107 }
1108 }
1109
1110 /*
1111 * enqueue: enqueue a single dmamap.
1112 */
1113 int
1114 virtio_enqueue(struct virtio_softc *sc, struct virtqueue *vq, int slot,
1115 bus_dmamap_t dmamap, bool write)
1116 {
1117 struct vq_entry *qe1 = &vq->vq_entries[slot];
1118 struct vring_desc *vd = qe1->qe_desc_base;
1119 int i;
1120 int s = qe1->qe_next;
1121
1122 KASSERT(s >= 0);
1123 KASSERT(dmamap->dm_nsegs > 0);
1124
1125 for (i = 0; i < dmamap->dm_nsegs; i++) {
1126 vd[s].addr = dmamap->dm_segs[i].ds_addr;
1127 vd[s].len = dmamap->dm_segs[i].ds_len;
1128 if (!write)
1129 vd[s].flags |= VRING_DESC_F_WRITE;
1130 s = vd[s].next;
1131 }
1132 qe1->qe_next = s;
1133
1134 return 0;
1135 }
1136
1137 int
1138 virtio_enqueue_p(struct virtio_softc *sc, struct virtqueue *vq, int slot,
1139 bus_dmamap_t dmamap, bus_addr_t start, bus_size_t len,
1140 bool write)
1141 {
1142 struct vq_entry *qe1 = &vq->vq_entries[slot];
1143 struct vring_desc *vd = qe1->qe_desc_base;
1144 int s = qe1->qe_next;
1145
1146 KASSERT(s >= 0);
1147 KASSERT(dmamap->dm_nsegs == 1); /* XXX */
1148 KASSERT((dmamap->dm_segs[0].ds_len > start) &&
1149 (dmamap->dm_segs[0].ds_len >= start + len));
1150
1151 vd[s].addr = dmamap->dm_segs[0].ds_addr + start;
1152 vd[s].len = len;
1153 if (!write)
1154 vd[s].flags |= VRING_DESC_F_WRITE;
1155 qe1->qe_next = vd[s].next;
1156
1157 return 0;
1158 }
1159
1160 /*
1161 * enqueue_commit: add it to the aring.
1162 */
1163 int
1164 virtio_enqueue_commit(struct virtio_softc *sc, struct virtqueue *vq, int slot,
1165 bool notifynow)
1166 {
1167 struct vq_entry *qe1;
1168
1169 if (slot < 0) {
1170 mutex_enter(&vq->vq_aring_lock);
1171 goto notify;
1172 }
1173 vq_sync_descs(sc, vq, BUS_DMASYNC_PREWRITE);
1174 qe1 = &vq->vq_entries[slot];
1175 if (qe1->qe_indirect)
1176 vq_sync_indirect(sc, vq, slot, BUS_DMASYNC_PREWRITE);
1177 mutex_enter(&vq->vq_aring_lock);
1178 vq->vq_avail->ring[(vq->vq_avail_idx++) % vq->vq_num] = slot;
1179
1180 notify:
1181 if (notifynow) {
1182 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
1183 vq_sync_uring(sc, vq, BUS_DMASYNC_PREREAD);
1184 membar_producer();
1185 vq->vq_avail->idx = vq->vq_avail_idx;
1186 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
1187 membar_producer();
1188 vq->vq_queued++;
1189 vq_sync_uring(sc, vq, BUS_DMASYNC_POSTREAD);
1190 membar_consumer();
1191 if (!(vq->vq_used->flags & VRING_USED_F_NO_NOTIFY))
1192 bus_space_write_2(sc->sc_iot, sc->sc_ioh,
1193 VIRTIO_CONFIG_QUEUE_NOTIFY,
1194 vq->vq_index);
1195 }
1196 mutex_exit(&vq->vq_aring_lock);
1197
1198 return 0;
1199 }
1200
1201 /*
1202 * enqueue_abort: rollback.
1203 */
1204 int
1205 virtio_enqueue_abort(struct virtio_softc *sc, struct virtqueue *vq, int slot)
1206 {
1207 struct vq_entry *qe = &vq->vq_entries[slot];
1208 struct vring_desc *vd;
1209 int s;
1210
1211 if (qe->qe_next < 0) {
1212 vq_free_entry(vq, qe);
1213 return 0;
1214 }
1215
1216 s = slot;
1217 vd = &vq->vq_desc[0];
1218 while (vd[s].flags & VRING_DESC_F_NEXT) {
1219 s = vd[s].next;
1220 vq_free_entry(vq, qe);
1221 qe = &vq->vq_entries[s];
1222 }
1223 vq_free_entry(vq, qe);
1224 return 0;
1225 }
1226
1227 /*
1228 * Dequeue a request.
1229 */
1230 /*
1231 * dequeue: dequeue a request from uring; dmamap_sync for uring is
1232 * already done in the interrupt handler.
1233 */
1234 int
1235 virtio_dequeue(struct virtio_softc *sc, struct virtqueue *vq,
1236 int *slotp, int *lenp)
1237 {
1238 uint16_t slot, usedidx;
1239 struct vq_entry *qe;
1240
1241 if (vq->vq_used_idx == vq->vq_used->idx)
1242 return ENOENT;
1243 mutex_enter(&vq->vq_uring_lock);
1244 usedidx = vq->vq_used_idx++;
1245 mutex_exit(&vq->vq_uring_lock);
1246 usedidx %= vq->vq_num;
1247 slot = vq->vq_used->ring[usedidx].id;
1248 qe = &vq->vq_entries[slot];
1249
1250 if (qe->qe_indirect)
1251 vq_sync_indirect(sc, vq, slot, BUS_DMASYNC_POSTWRITE);
1252
1253 if (slotp)
1254 *slotp = slot;
1255 if (lenp)
1256 *lenp = vq->vq_used->ring[usedidx].len;
1257
1258 return 0;
1259 }
1260
1261 /*
1262 * dequeue_commit: complete dequeue; the slot is recycled for future use.
1263 * if you forget to call this the slot will be leaked.
1264 */
1265 int
1266 virtio_dequeue_commit(struct virtio_softc *sc, struct virtqueue *vq, int slot)
1267 {
1268 struct vq_entry *qe = &vq->vq_entries[slot];
1269 struct vring_desc *vd = &vq->vq_desc[0];
1270 int s = slot;
1271
1272 while (vd[s].flags & VRING_DESC_F_NEXT) {
1273 s = vd[s].next;
1274 vq_free_entry(vq, qe);
1275 qe = &vq->vq_entries[s];
1276 }
1277 vq_free_entry(vq, qe);
1278
1279 return 0;
1280 }
1281