virtio.c revision 1.39 1 /* $NetBSD: virtio.c,v 1.39 2020/05/25 07:29:52 yamaguchi Exp $ */
2
3 /*
4 * Copyright (c) 2010 Minoura Makoto.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: virtio.c,v 1.39 2020/05/25 07:29:52 yamaguchi Exp $");
30
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/atomic.h>
35 #include <sys/bus.h>
36 #include <sys/device.h>
37 #include <sys/kmem.h>
38 #include <sys/module.h>
39
40 #define VIRTIO_PRIVATE
41
42 #include <dev/pci/virtioreg.h> /* XXX: move to non-pci */
43 #include <dev/pci/virtiovar.h> /* XXX: move to non-pci */
44
45 #define MINSEG_INDIRECT 2 /* use indirect if nsegs >= this value */
46
47 static void virtio_init_vq(struct virtio_softc *,
48 struct virtqueue *, const bool);
49
50 void
51 virtio_set_status(struct virtio_softc *sc, int status)
52 {
53 sc->sc_ops->set_status(sc, status);
54 }
55
56 /*
57 * Reset the device.
58 */
59 /*
60 * To reset the device to a known state, do following:
61 * virtio_reset(sc); // this will stop the device activity
62 * <dequeue finished requests>; // virtio_dequeue() still can be called
63 * <revoke pending requests in the vqs if any>;
64 * virtio_reinit_begin(sc); // dequeue prohibitted
65 * newfeatures = virtio_negotiate_features(sc, requestedfeatures);
66 * <some other initialization>;
67 * virtio_reinit_end(sc); // device activated; enqueue allowed
68 * Once attached, feature negotiation can only be allowed after virtio_reset.
69 */
70 void
71 virtio_reset(struct virtio_softc *sc)
72 {
73 virtio_device_reset(sc);
74 }
75
76 void
77 virtio_reinit_start(struct virtio_softc *sc)
78 {
79 int i;
80
81 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_ACK);
82 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER);
83 for (i = 0; i < sc->sc_nvqs; i++) {
84 int n;
85 struct virtqueue *vq = &sc->sc_vqs[i];
86 n = sc->sc_ops->read_queue_size(sc, vq->vq_index);
87 if (n == 0) /* vq disappeared */
88 continue;
89 if (n != vq->vq_num) {
90 panic("%s: virtqueue size changed, vq index %d\n",
91 device_xname(sc->sc_dev),
92 vq->vq_index);
93 }
94 virtio_init_vq(sc, vq, true);
95 sc->sc_ops->setup_queue(sc, vq->vq_index,
96 vq->vq_dmamap->dm_segs[0].ds_addr / VIRTIO_PAGE_SIZE);
97 }
98 }
99
100 void
101 virtio_reinit_end(struct virtio_softc *sc)
102 {
103 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK);
104 }
105
106 /*
107 * Feature negotiation.
108 */
109 uint32_t
110 virtio_negotiate_features(struct virtio_softc *sc, uint32_t guest_features)
111 {
112 uint32_t r;
113
114 if (!(device_cfdata(sc->sc_dev)->cf_flags & 1) &&
115 !(device_cfdata(sc->sc_child)->cf_flags & 1)) /* XXX */
116 guest_features |= VIRTIO_F_RING_INDIRECT_DESC;
117 r = sc->sc_ops->neg_features(sc, guest_features);
118 sc->sc_features = r;
119 if (r & VIRTIO_F_RING_INDIRECT_DESC)
120 sc->sc_indirect = true;
121 else
122 sc->sc_indirect = false;
123
124 return r;
125 }
126
127 /*
128 * Device configuration registers.
129 */
130 uint8_t
131 virtio_read_device_config_1(struct virtio_softc *sc, int index)
132 {
133 return sc->sc_ops->read_dev_cfg_1(sc, index);
134 }
135
136 uint16_t
137 virtio_read_device_config_2(struct virtio_softc *sc, int index)
138 {
139 return sc->sc_ops->read_dev_cfg_2(sc, index);
140 }
141
142 uint32_t
143 virtio_read_device_config_4(struct virtio_softc *sc, int index)
144 {
145 return sc->sc_ops->read_dev_cfg_4(sc, index);
146 }
147
148 uint64_t
149 virtio_read_device_config_8(struct virtio_softc *sc, int index)
150 {
151 return sc->sc_ops->read_dev_cfg_8(sc, index);
152 }
153
154 void
155 virtio_write_device_config_1(struct virtio_softc *sc,
156 int index, uint8_t value)
157 {
158 return sc->sc_ops->write_dev_cfg_1(sc, index, value);
159 }
160
161 void
162 virtio_write_device_config_2(struct virtio_softc *sc,
163 int index, uint16_t value)
164 {
165 return sc->sc_ops->write_dev_cfg_2(sc, index, value);
166 }
167
168 void
169 virtio_write_device_config_4(struct virtio_softc *sc,
170 int index, uint32_t value)
171 {
172 return sc->sc_ops->write_dev_cfg_4(sc, index, value);
173 }
174
175 void
176 virtio_write_device_config_8(struct virtio_softc *sc,
177 int index, uint64_t value)
178 {
179 return sc->sc_ops->write_dev_cfg_8(sc, index, value);
180 }
181
182 /*
183 * Interrupt handler.
184 */
185 static void
186 virtio_soft_intr(void *arg)
187 {
188 struct virtio_softc *sc = arg;
189
190 KASSERT(sc->sc_intrhand != NULL);
191
192 (sc->sc_intrhand)(sc);
193 }
194
195 /*
196 * dmamap sync operations for a virtqueue.
197 */
198 static inline void
199 vq_sync_descs(struct virtio_softc *sc, struct virtqueue *vq, int ops)
200 {
201 /* availoffset == sizeof(vring_desc)*vq_num */
202 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap, 0, vq->vq_availoffset,
203 ops);
204 }
205
206 static inline void
207 vq_sync_aring(struct virtio_softc *sc, struct virtqueue *vq, int ops)
208 {
209 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap,
210 vq->vq_availoffset,
211 offsetof(struct vring_avail, ring)
212 + vq->vq_num * sizeof(uint16_t),
213 ops);
214 }
215
216 static inline void
217 vq_sync_uring(struct virtio_softc *sc, struct virtqueue *vq, int ops)
218 {
219 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap,
220 vq->vq_usedoffset,
221 offsetof(struct vring_used, ring)
222 + vq->vq_num * sizeof(struct vring_used_elem),
223 ops);
224 }
225
226 static inline void
227 vq_sync_indirect(struct virtio_softc *sc, struct virtqueue *vq, int slot,
228 int ops)
229 {
230 int offset = vq->vq_indirectoffset
231 + sizeof(struct vring_desc) * vq->vq_maxnsegs * slot;
232
233 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap,
234 offset, sizeof(struct vring_desc) * vq->vq_maxnsegs,
235 ops);
236 }
237
238 /*
239 * Can be used as sc_intrhand.
240 */
241 /*
242 * Scan vq, bus_dmamap_sync for the vqs (not for the payload),
243 * and calls (*vq_done)() if some entries are consumed.
244 */
245 static int
246 virtio_vq_intr_common(struct virtqueue *vq)
247 {
248 struct virtio_softc *sc = vq->vq_owner;
249 int r = 0;
250
251 if (vq->vq_queued) {
252 vq->vq_queued = 0;
253 vq_sync_aring(sc, vq, BUS_DMASYNC_POSTWRITE);
254 }
255 vq_sync_uring(sc, vq, BUS_DMASYNC_POSTREAD);
256 membar_consumer();
257 if (vq->vq_used_idx != vq->vq_used->idx) {
258 if (vq->vq_done)
259 r |= (vq->vq_done)(vq);
260 }
261
262 return r;
263 }
264
265 int
266 virtio_vq_intr(struct virtio_softc *sc)
267 {
268 struct virtqueue *vq;
269 int i, r = 0;
270
271 for (i = 0; i < sc->sc_nvqs; i++) {
272 vq = &sc->sc_vqs[i];
273 r |= virtio_vq_intr_common(vq);
274 }
275
276 return r;
277 }
278
279 static int
280 virtio_vq_mq_intr(struct virtqueue *vq)
281 {
282
283 return virtio_vq_intr_common(vq);
284 }
285
286 /*
287 * Start/stop vq interrupt. No guarantee.
288 */
289 void
290 virtio_stop_vq_intr(struct virtio_softc *sc, struct virtqueue *vq)
291 {
292 vq->vq_avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
293 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
294 vq->vq_queued++;
295 }
296
297 void
298 virtio_start_vq_intr(struct virtio_softc *sc, struct virtqueue *vq)
299 {
300 vq->vq_avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
301 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
302 vq->vq_queued++;
303 }
304
305 /*
306 * Initialize vq structure.
307 */
308 static void
309 virtio_init_vq(struct virtio_softc *sc, struct virtqueue *vq,
310 const bool reinit)
311 {
312 int i, j;
313 int vq_size = vq->vq_num;
314
315 memset(vq->vq_vaddr, 0, vq->vq_bytesize);
316
317 /* build the indirect descriptor chain */
318 if (vq->vq_indirect != NULL) {
319 struct vring_desc *vd;
320
321 for (i = 0; i < vq_size; i++) {
322 vd = vq->vq_indirect;
323 vd += vq->vq_maxnsegs * i;
324 for (j = 0; j < vq->vq_maxnsegs-1; j++) {
325 vd[j].next = j + 1;
326 }
327 }
328 }
329
330 /* free slot management */
331 SIMPLEQ_INIT(&vq->vq_freelist);
332 for (i = 0; i < vq_size; i++) {
333 SIMPLEQ_INSERT_TAIL(&vq->vq_freelist,
334 &vq->vq_entries[i], qe_list);
335 vq->vq_entries[i].qe_index = i;
336 }
337 if (!reinit)
338 mutex_init(&vq->vq_freelist_lock, MUTEX_SPIN, sc->sc_ipl);
339
340 /* enqueue/dequeue status */
341 vq->vq_avail_idx = 0;
342 vq->vq_used_idx = 0;
343 vq->vq_queued = 0;
344 if (!reinit) {
345 mutex_init(&vq->vq_aring_lock, MUTEX_SPIN, sc->sc_ipl);
346 mutex_init(&vq->vq_uring_lock, MUTEX_SPIN, sc->sc_ipl);
347 }
348 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
349 vq_sync_uring(sc, vq, BUS_DMASYNC_PREREAD);
350 vq->vq_queued++;
351 }
352
353 /*
354 * Allocate/free a vq.
355 */
356 int
357 virtio_alloc_vq(struct virtio_softc *sc, struct virtqueue *vq, int index,
358 int maxsegsize, int maxnsegs, const char *name)
359 {
360 int vq_size, allocsize1, allocsize2, allocsize3, allocsize = 0;
361 int rsegs, r;
362 #define VIRTQUEUE_ALIGN(n) (((n)+(VIRTIO_PAGE_SIZE-1))& \
363 ~(VIRTIO_PAGE_SIZE-1))
364
365 /* Make sure callers allocate vqs in order */
366 KASSERT(sc->sc_nvqs == index);
367
368 memset(vq, 0, sizeof(*vq));
369
370 vq_size = sc->sc_ops->read_queue_size(sc, index);
371 if (vq_size == 0) {
372 aprint_error_dev(sc->sc_dev,
373 "virtqueue not exist, index %d for %s\n",
374 index, name);
375 goto err;
376 }
377 /* allocsize1: descriptor table + avail ring + pad */
378 allocsize1 = VIRTQUEUE_ALIGN(sizeof(struct vring_desc)*vq_size
379 + sizeof(uint16_t)*(2+vq_size));
380 /* allocsize2: used ring + pad */
381 allocsize2 = VIRTQUEUE_ALIGN(sizeof(uint16_t)*2
382 + sizeof(struct vring_used_elem)*vq_size);
383 /* allocsize3: indirect table */
384 if (sc->sc_indirect && maxnsegs >= MINSEG_INDIRECT)
385 allocsize3 = sizeof(struct vring_desc) * maxnsegs * vq_size;
386 else
387 allocsize3 = 0;
388 allocsize = allocsize1 + allocsize2 + allocsize3;
389
390 /* alloc and map the memory */
391 r = bus_dmamem_alloc(sc->sc_dmat, allocsize, VIRTIO_PAGE_SIZE, 0,
392 &vq->vq_segs[0], 1, &rsegs, BUS_DMA_NOWAIT);
393 if (r != 0) {
394 aprint_error_dev(sc->sc_dev,
395 "virtqueue %d for %s allocation failed, "
396 "error code %d\n", index, name, r);
397 goto err;
398 }
399 r = bus_dmamem_map(sc->sc_dmat, &vq->vq_segs[0], 1, allocsize,
400 &vq->vq_vaddr, BUS_DMA_NOWAIT);
401 if (r != 0) {
402 aprint_error_dev(sc->sc_dev,
403 "virtqueue %d for %s map failed, "
404 "error code %d\n", index, name, r);
405 goto err;
406 }
407 r = bus_dmamap_create(sc->sc_dmat, allocsize, 1, allocsize, 0,
408 BUS_DMA_NOWAIT, &vq->vq_dmamap);
409 if (r != 0) {
410 aprint_error_dev(sc->sc_dev,
411 "virtqueue %d for %s dmamap creation failed, "
412 "error code %d\n", index, name, r);
413 goto err;
414 }
415 r = bus_dmamap_load(sc->sc_dmat, vq->vq_dmamap,
416 vq->vq_vaddr, allocsize, NULL, BUS_DMA_NOWAIT);
417 if (r != 0) {
418 aprint_error_dev(sc->sc_dev,
419 "virtqueue %d for %s dmamap load failed, "
420 "error code %d\n", index, name, r);
421 goto err;
422 }
423
424 /* set the vq address */
425 sc->sc_ops->setup_queue(sc, index,
426 vq->vq_dmamap->dm_segs[0].ds_addr / VIRTIO_PAGE_SIZE);
427
428 /* remember addresses and offsets for later use */
429 vq->vq_owner = sc;
430 vq->vq_intrhand = virtio_vq_mq_intr;
431 vq->vq_num = vq_size;
432 vq->vq_index = index;
433 vq->vq_desc = vq->vq_vaddr;
434 vq->vq_availoffset = sizeof(struct vring_desc)*vq_size;
435 vq->vq_avail = (void*)(((char*)vq->vq_desc) + vq->vq_availoffset);
436 vq->vq_usedoffset = allocsize1;
437 vq->vq_used = (void*)(((char*)vq->vq_desc) + vq->vq_usedoffset);
438 if (allocsize3 > 0) {
439 vq->vq_indirectoffset = allocsize1 + allocsize2;
440 vq->vq_indirect = (void*)(((char*)vq->vq_desc)
441 + vq->vq_indirectoffset);
442 }
443 vq->vq_bytesize = allocsize;
444 vq->vq_maxsegsize = maxsegsize;
445 vq->vq_maxnsegs = maxnsegs;
446
447 /* free slot management */
448 vq->vq_entries = kmem_zalloc(sizeof(struct vq_entry)*vq_size,
449 KM_SLEEP);
450 virtio_init_vq(sc, vq, false);
451
452 aprint_verbose_dev(sc->sc_dev,
453 "allocated %u byte for virtqueue %d for %s, "
454 "size %d\n", allocsize, index, name, vq_size);
455 if (allocsize3 > 0)
456 aprint_verbose_dev(sc->sc_dev,
457 "using %d byte (%d entries) "
458 "indirect descriptors\n",
459 allocsize3, maxnsegs * vq_size);
460
461 sc->sc_nvqs++;
462
463 return 0;
464
465 err:
466 sc->sc_ops->setup_queue(sc, index, 0);
467 if (vq->vq_dmamap)
468 bus_dmamap_destroy(sc->sc_dmat, vq->vq_dmamap);
469 if (vq->vq_vaddr)
470 bus_dmamem_unmap(sc->sc_dmat, vq->vq_vaddr, allocsize);
471 if (vq->vq_segs[0].ds_addr)
472 bus_dmamem_free(sc->sc_dmat, &vq->vq_segs[0], 1);
473 memset(vq, 0, sizeof(*vq));
474
475 return -1;
476 }
477
478 int
479 virtio_free_vq(struct virtio_softc *sc, struct virtqueue *vq)
480 {
481 struct vq_entry *qe;
482 int i = 0;
483
484 /* device must be already deactivated */
485 /* confirm the vq is empty */
486 SIMPLEQ_FOREACH(qe, &vq->vq_freelist, qe_list) {
487 i++;
488 }
489 if (i != vq->vq_num) {
490 printf("%s: freeing non-empty vq, index %d\n",
491 device_xname(sc->sc_dev), vq->vq_index);
492 return EBUSY;
493 }
494
495 /* tell device that there's no virtqueue any longer */
496 sc->sc_ops->setup_queue(sc, vq->vq_index, 0);
497
498 kmem_free(vq->vq_entries, sizeof(*vq->vq_entries) * vq->vq_num);
499 bus_dmamap_unload(sc->sc_dmat, vq->vq_dmamap);
500 bus_dmamap_destroy(sc->sc_dmat, vq->vq_dmamap);
501 bus_dmamem_unmap(sc->sc_dmat, vq->vq_vaddr, vq->vq_bytesize);
502 bus_dmamem_free(sc->sc_dmat, &vq->vq_segs[0], 1);
503 mutex_destroy(&vq->vq_freelist_lock);
504 mutex_destroy(&vq->vq_uring_lock);
505 mutex_destroy(&vq->vq_aring_lock);
506 memset(vq, 0, sizeof(*vq));
507
508 sc->sc_nvqs--;
509
510 return 0;
511 }
512
513 /*
514 * Free descriptor management.
515 */
516 static struct vq_entry *
517 vq_alloc_entry(struct virtqueue *vq)
518 {
519 struct vq_entry *qe;
520
521 mutex_enter(&vq->vq_freelist_lock);
522 if (SIMPLEQ_EMPTY(&vq->vq_freelist)) {
523 mutex_exit(&vq->vq_freelist_lock);
524 return NULL;
525 }
526 qe = SIMPLEQ_FIRST(&vq->vq_freelist);
527 SIMPLEQ_REMOVE_HEAD(&vq->vq_freelist, qe_list);
528 mutex_exit(&vq->vq_freelist_lock);
529
530 return qe;
531 }
532
533 static void
534 vq_free_entry(struct virtqueue *vq, struct vq_entry *qe)
535 {
536 mutex_enter(&vq->vq_freelist_lock);
537 SIMPLEQ_INSERT_TAIL(&vq->vq_freelist, qe, qe_list);
538 mutex_exit(&vq->vq_freelist_lock);
539
540 return;
541 }
542
543 /*
544 * Enqueue several dmamaps as a single request.
545 */
546 /*
547 * Typical usage:
548 * <queue size> number of followings are stored in arrays
549 * - command blocks (in dmamem) should be pre-allocated and mapped
550 * - dmamaps for command blocks should be pre-allocated and loaded
551 * - dmamaps for payload should be pre-allocated
552 * r = virtio_enqueue_prep(sc, vq, &slot); // allocate a slot
553 * if (r) // currently 0 or EAGAIN
554 * return r;
555 * r = bus_dmamap_load(dmat, dmamap_payload[slot], data, count, ..);
556 * if (r) {
557 * virtio_enqueue_abort(sc, vq, slot);
558 * return r;
559 * }
560 * r = virtio_enqueue_reserve(sc, vq, slot,
561 * dmamap_payload[slot]->dm_nsegs+1);
562 * // ^ +1 for command
563 * if (r) { // currently 0 or EAGAIN
564 * bus_dmamap_unload(dmat, dmamap_payload[slot]);
565 * return r; // do not call abort()
566 * }
567 * <setup and prepare commands>
568 * bus_dmamap_sync(dmat, dmamap_cmd[slot],... BUS_DMASYNC_PREWRITE);
569 * bus_dmamap_sync(dmat, dmamap_payload[slot],...);
570 * virtio_enqueue(sc, vq, slot, dmamap_cmd[slot], false);
571 * virtio_enqueue(sc, vq, slot, dmamap_payload[slot], iswrite);
572 * virtio_enqueue_commit(sc, vq, slot, true);
573 */
574
575 /*
576 * enqueue_prep: allocate a slot number
577 */
578 int
579 virtio_enqueue_prep(struct virtio_softc *sc, struct virtqueue *vq, int *slotp)
580 {
581 struct vq_entry *qe1;
582
583 KASSERT(slotp != NULL);
584
585 qe1 = vq_alloc_entry(vq);
586 if (qe1 == NULL)
587 return EAGAIN;
588 /* next slot is not allocated yet */
589 qe1->qe_next = -1;
590 *slotp = qe1->qe_index;
591
592 return 0;
593 }
594
595 /*
596 * enqueue_reserve: allocate remaining slots and build the descriptor chain.
597 */
598 int
599 virtio_enqueue_reserve(struct virtio_softc *sc, struct virtqueue *vq,
600 int slot, int nsegs)
601 {
602 int indirect;
603 struct vq_entry *qe1 = &vq->vq_entries[slot];
604
605 KASSERT(qe1->qe_next == -1);
606 KASSERT(1 <= nsegs && nsegs <= vq->vq_num);
607
608 if ((vq->vq_indirect != NULL) &&
609 (nsegs >= MINSEG_INDIRECT) &&
610 (nsegs <= vq->vq_maxnsegs))
611 indirect = 1;
612 else
613 indirect = 0;
614 qe1->qe_indirect = indirect;
615
616 if (indirect) {
617 struct vring_desc *vd;
618 int i;
619
620 vd = &vq->vq_desc[qe1->qe_index];
621 vd->addr = vq->vq_dmamap->dm_segs[0].ds_addr
622 + vq->vq_indirectoffset;
623 vd->addr += sizeof(struct vring_desc)
624 * vq->vq_maxnsegs * qe1->qe_index;
625 vd->len = sizeof(struct vring_desc) * nsegs;
626 vd->flags = VRING_DESC_F_INDIRECT;
627
628 vd = vq->vq_indirect;
629 vd += vq->vq_maxnsegs * qe1->qe_index;
630 qe1->qe_desc_base = vd;
631
632 for (i = 0; i < nsegs-1; i++) {
633 vd[i].flags = VRING_DESC_F_NEXT;
634 }
635 vd[i].flags = 0;
636 qe1->qe_next = 0;
637
638 return 0;
639 } else {
640 struct vring_desc *vd;
641 struct vq_entry *qe;
642 int i, s;
643
644 vd = &vq->vq_desc[0];
645 qe1->qe_desc_base = vd;
646 qe1->qe_next = qe1->qe_index;
647 s = slot;
648 for (i = 0; i < nsegs - 1; i++) {
649 qe = vq_alloc_entry(vq);
650 if (qe == NULL) {
651 vd[s].flags = 0;
652 virtio_enqueue_abort(sc, vq, slot);
653 return EAGAIN;
654 }
655 vd[s].flags = VRING_DESC_F_NEXT;
656 vd[s].next = qe->qe_index;
657 s = qe->qe_index;
658 }
659 vd[s].flags = 0;
660
661 return 0;
662 }
663 }
664
665 /*
666 * enqueue: enqueue a single dmamap.
667 */
668 int
669 virtio_enqueue(struct virtio_softc *sc, struct virtqueue *vq, int slot,
670 bus_dmamap_t dmamap, bool write)
671 {
672 struct vq_entry *qe1 = &vq->vq_entries[slot];
673 struct vring_desc *vd = qe1->qe_desc_base;
674 int i;
675 int s = qe1->qe_next;
676
677 KASSERT(s >= 0);
678 KASSERT(dmamap->dm_nsegs > 0);
679
680 for (i = 0; i < dmamap->dm_nsegs; i++) {
681 vd[s].addr = dmamap->dm_segs[i].ds_addr;
682 vd[s].len = dmamap->dm_segs[i].ds_len;
683 if (!write)
684 vd[s].flags |= VRING_DESC_F_WRITE;
685 s = vd[s].next;
686 }
687 qe1->qe_next = s;
688
689 return 0;
690 }
691
692 int
693 virtio_enqueue_p(struct virtio_softc *sc, struct virtqueue *vq, int slot,
694 bus_dmamap_t dmamap, bus_addr_t start, bus_size_t len,
695 bool write)
696 {
697 struct vq_entry *qe1 = &vq->vq_entries[slot];
698 struct vring_desc *vd = qe1->qe_desc_base;
699 int s = qe1->qe_next;
700
701 KASSERT(s >= 0);
702 KASSERT(dmamap->dm_nsegs == 1); /* XXX */
703 KASSERT((dmamap->dm_segs[0].ds_len > start) &&
704 (dmamap->dm_segs[0].ds_len >= start + len));
705
706 vd[s].addr = dmamap->dm_segs[0].ds_addr + start;
707 vd[s].len = len;
708 if (!write)
709 vd[s].flags |= VRING_DESC_F_WRITE;
710 qe1->qe_next = vd[s].next;
711
712 return 0;
713 }
714
715 /*
716 * enqueue_commit: add it to the aring.
717 */
718 int
719 virtio_enqueue_commit(struct virtio_softc *sc, struct virtqueue *vq, int slot,
720 bool notifynow)
721 {
722 struct vq_entry *qe1;
723
724 if (slot < 0) {
725 mutex_enter(&vq->vq_aring_lock);
726 goto notify;
727 }
728 vq_sync_descs(sc, vq, BUS_DMASYNC_PREWRITE);
729 qe1 = &vq->vq_entries[slot];
730 if (qe1->qe_indirect)
731 vq_sync_indirect(sc, vq, slot, BUS_DMASYNC_PREWRITE);
732 mutex_enter(&vq->vq_aring_lock);
733 vq->vq_avail->ring[(vq->vq_avail_idx++) % vq->vq_num] = slot;
734
735 notify:
736 if (notifynow) {
737 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
738 vq_sync_uring(sc, vq, BUS_DMASYNC_PREREAD);
739 membar_producer();
740 vq->vq_avail->idx = vq->vq_avail_idx;
741 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
742 membar_producer();
743 vq->vq_queued++;
744 vq_sync_uring(sc, vq, BUS_DMASYNC_POSTREAD);
745 membar_consumer();
746 if (!(vq->vq_used->flags & VRING_USED_F_NO_NOTIFY))
747 sc->sc_ops->kick(sc, vq->vq_index);
748 }
749 mutex_exit(&vq->vq_aring_lock);
750
751 return 0;
752 }
753
754 /*
755 * enqueue_abort: rollback.
756 */
757 int
758 virtio_enqueue_abort(struct virtio_softc *sc, struct virtqueue *vq, int slot)
759 {
760 struct vq_entry *qe = &vq->vq_entries[slot];
761 struct vring_desc *vd;
762 int s;
763
764 if (qe->qe_next < 0) {
765 vq_free_entry(vq, qe);
766 return 0;
767 }
768
769 s = slot;
770 vd = &vq->vq_desc[0];
771 while (vd[s].flags & VRING_DESC_F_NEXT) {
772 s = vd[s].next;
773 vq_free_entry(vq, qe);
774 qe = &vq->vq_entries[s];
775 }
776 vq_free_entry(vq, qe);
777 return 0;
778 }
779
780 /*
781 * Dequeue a request.
782 */
783 /*
784 * dequeue: dequeue a request from uring; dmamap_sync for uring is
785 * already done in the interrupt handler.
786 */
787 int
788 virtio_dequeue(struct virtio_softc *sc, struct virtqueue *vq,
789 int *slotp, int *lenp)
790 {
791 uint16_t slot, usedidx;
792 struct vq_entry *qe;
793
794 if (vq->vq_used_idx == vq->vq_used->idx)
795 return ENOENT;
796 mutex_enter(&vq->vq_uring_lock);
797 usedidx = vq->vq_used_idx++;
798 mutex_exit(&vq->vq_uring_lock);
799 usedidx %= vq->vq_num;
800 slot = vq->vq_used->ring[usedidx].id;
801 qe = &vq->vq_entries[slot];
802
803 if (qe->qe_indirect)
804 vq_sync_indirect(sc, vq, slot, BUS_DMASYNC_POSTWRITE);
805
806 if (slotp)
807 *slotp = slot;
808 if (lenp)
809 *lenp = vq->vq_used->ring[usedidx].len;
810
811 return 0;
812 }
813
814 /*
815 * dequeue_commit: complete dequeue; the slot is recycled for future use.
816 * if you forget to call this the slot will be leaked.
817 */
818 int
819 virtio_dequeue_commit(struct virtio_softc *sc, struct virtqueue *vq, int slot)
820 {
821 struct vq_entry *qe = &vq->vq_entries[slot];
822 struct vring_desc *vd = &vq->vq_desc[0];
823 int s = slot;
824
825 while (vd[s].flags & VRING_DESC_F_NEXT) {
826 s = vd[s].next;
827 vq_free_entry(vq, qe);
828 qe = &vq->vq_entries[s];
829 }
830 vq_free_entry(vq, qe);
831
832 return 0;
833 }
834
835 /*
836 * Attach a child, fill all the members.
837 */
838 void
839 virtio_child_attach_start(struct virtio_softc *sc, device_t child, int ipl,
840 struct virtqueue *vqs,
841 virtio_callback config_change,
842 virtio_callback intr_hand,
843 int req_flags, int req_features, const char *feat_bits)
844 {
845 char buf[256];
846 int features;
847
848 sc->sc_child = child;
849 sc->sc_ipl = ipl;
850 sc->sc_vqs = vqs;
851 sc->sc_config_change = config_change;
852 sc->sc_intrhand = intr_hand;
853 sc->sc_flags = req_flags;
854
855 features = virtio_negotiate_features(sc, req_features);
856 snprintb(buf, sizeof(buf), feat_bits, features);
857 aprint_normal(": Features: %s\n", buf);
858 aprint_naive("\n");
859 }
860
861 void
862 virtio_child_attach_set_vqs(struct virtio_softc *sc,
863 struct virtqueue *vqs, int nvq_pairs)
864 {
865
866 KASSERT(nvq_pairs == 1 ||
867 (sc->sc_flags & VIRTIO_F_PCI_INTR_SOFTINT) == 0);
868 if (nvq_pairs > 1)
869 sc->sc_child_mq = true;
870
871 sc->sc_vqs = vqs;
872 }
873
874 int
875 virtio_child_attach_finish(struct virtio_softc *sc)
876 {
877 int r;
878
879 r = sc->sc_ops->setup_interrupts(sc);
880 if (r != 0) {
881 aprint_error_dev(sc->sc_dev, "failed to setup interrupts\n");
882 goto fail;
883 }
884
885 KASSERT(sc->sc_soft_ih == NULL);
886 if (sc->sc_flags & VIRTIO_F_PCI_INTR_SOFTINT) {
887 u_int flags = SOFTINT_NET;
888 if (sc->sc_flags & VIRTIO_F_PCI_INTR_MPSAFE)
889 flags |= SOFTINT_MPSAFE;
890
891 sc->sc_soft_ih = softint_establish(flags, virtio_soft_intr, sc);
892 if (sc->sc_soft_ih == NULL) {
893 sc->sc_ops->free_interrupts(sc);
894 aprint_error_dev(sc->sc_dev,
895 "failed to establish soft interrupt\n");
896 goto fail;
897 }
898 }
899
900 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK);
901 return 0;
902
903 fail:
904 if (sc->sc_soft_ih) {
905 softint_disestablish(sc->sc_soft_ih);
906 sc->sc_soft_ih = NULL;
907 }
908
909 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
910 return 1;
911 }
912
913 void
914 virtio_child_detach(struct virtio_softc *sc)
915 {
916 sc->sc_child = NULL;
917 sc->sc_vqs = NULL;
918
919 virtio_device_reset(sc);
920
921 sc->sc_ops->free_interrupts(sc);
922
923 if (sc->sc_soft_ih) {
924 softint_disestablish(sc->sc_soft_ih);
925 sc->sc_soft_ih = NULL;
926 }
927 }
928
929 void
930 virtio_child_attach_failed(struct virtio_softc *sc)
931 {
932 virtio_child_detach(sc);
933
934 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
935
936 sc->sc_child = VIRTIO_CHILD_FAILED;
937 }
938
939 bus_dma_tag_t
940 virtio_dmat(struct virtio_softc *sc)
941 {
942 return sc->sc_dmat;
943 }
944
945 device_t
946 virtio_child(struct virtio_softc *sc)
947 {
948 return sc->sc_child;
949 }
950
951 int
952 virtio_intrhand(struct virtio_softc *sc)
953 {
954 return (sc->sc_intrhand)(sc);
955 }
956
957 uint32_t
958 virtio_features(struct virtio_softc *sc)
959 {
960 return sc->sc_features;
961 }
962
963 int
964 virtiobusprint(void *aux, const char *pnp)
965 {
966 struct virtio_attach_args * const va = aux;
967
968 if (va->sc_childdevid == 0)
969 return QUIET; /* No device present */
970
971 if (pnp)
972 aprint_normal("Device ID %d at %s", va->sc_childdevid, pnp);
973
974 return UNCONF;
975 }
976
977 MODULE(MODULE_CLASS_DRIVER, virtio, NULL);
978
979 #ifdef _MODULE
980 #include "ioconf.c"
981 #endif
982
983 static int
984 virtio_modcmd(modcmd_t cmd, void *opaque)
985 {
986 int error = 0;
987
988 #ifdef _MODULE
989 switch (cmd) {
990 case MODULE_CMD_INIT:
991 error = config_init_component(cfdriver_ioconf_virtio,
992 cfattach_ioconf_virtio, cfdata_ioconf_virtio);
993 break;
994 case MODULE_CMD_FINI:
995 error = config_fini_component(cfdriver_ioconf_virtio,
996 cfattach_ioconf_virtio, cfdata_ioconf_virtio);
997 break;
998 default:
999 error = ENOTTY;
1000 break;
1001 }
1002 #endif
1003
1004 return error;
1005 }
1006