virtio.c revision 1.35.2.1 1 /* $NetBSD: virtio.c,v 1.35.2.1 2019/06/10 22:07:27 christos Exp $ */
2
3 /*
4 * Copyright (c) 2010 Minoura Makoto.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: virtio.c,v 1.35.2.1 2019/06/10 22:07:27 christos Exp $");
30
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/atomic.h>
35 #include <sys/bus.h>
36 #include <sys/device.h>
37 #include <sys/kmem.h>
38 #include <sys/module.h>
39
40 #define VIRTIO_PRIVATE
41
42 #include <dev/pci/virtioreg.h> /* XXX: move to non-pci */
43 #include <dev/pci/virtiovar.h> /* XXX: move to non-pci */
44
45 #define MINSEG_INDIRECT 2 /* use indirect if nsegs >= this value */
46
47 static void virtio_init_vq(struct virtio_softc *,
48 struct virtqueue *, const bool);
49
50 void
51 virtio_set_status(struct virtio_softc *sc, int status)
52 {
53 sc->sc_ops->set_status(sc, status);
54 }
55
56 /*
57 * Reset the device.
58 */
59 /*
60 * To reset the device to a known state, do following:
61 * virtio_reset(sc); // this will stop the device activity
62 * <dequeue finished requests>; // virtio_dequeue() still can be called
63 * <revoke pending requests in the vqs if any>;
64 * virtio_reinit_begin(sc); // dequeue prohibitted
65 * newfeatures = virtio_negotiate_features(sc, requestedfeatures);
66 * <some other initialization>;
67 * virtio_reinit_end(sc); // device activated; enqueue allowed
68 * Once attached, feature negotiation can only be allowed after virtio_reset.
69 */
70 void
71 virtio_reset(struct virtio_softc *sc)
72 {
73 virtio_device_reset(sc);
74 }
75
76 void
77 virtio_reinit_start(struct virtio_softc *sc)
78 {
79 int i;
80
81 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_ACK);
82 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER);
83 for (i = 0; i < sc->sc_nvqs; i++) {
84 int n;
85 struct virtqueue *vq = &sc->sc_vqs[i];
86 n = sc->sc_ops->read_queue_size(sc, vq->vq_index);
87 if (n == 0) /* vq disappeared */
88 continue;
89 if (n != vq->vq_num) {
90 panic("%s: virtqueue size changed, vq index %d\n",
91 device_xname(sc->sc_dev),
92 vq->vq_index);
93 }
94 virtio_init_vq(sc, vq, true);
95 sc->sc_ops->setup_queue(sc, vq->vq_index,
96 vq->vq_dmamap->dm_segs[0].ds_addr / VIRTIO_PAGE_SIZE);
97 }
98 }
99
100 void
101 virtio_reinit_end(struct virtio_softc *sc)
102 {
103 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK);
104 }
105
106 /*
107 * Feature negotiation.
108 */
109 uint32_t
110 virtio_negotiate_features(struct virtio_softc *sc, uint32_t guest_features)
111 {
112 uint32_t r;
113
114 if (!(device_cfdata(sc->sc_dev)->cf_flags & 1) &&
115 !(device_cfdata(sc->sc_child)->cf_flags & 1)) /* XXX */
116 guest_features |= VIRTIO_F_RING_INDIRECT_DESC;
117 r = sc->sc_ops->neg_features(sc, guest_features);
118 sc->sc_features = r;
119 if (r & VIRTIO_F_RING_INDIRECT_DESC)
120 sc->sc_indirect = true;
121 else
122 sc->sc_indirect = false;
123
124 return r;
125 }
126
127 /*
128 * Device configuration registers.
129 */
130 uint8_t
131 virtio_read_device_config_1(struct virtio_softc *sc, int index)
132 {
133 return sc->sc_ops->read_dev_cfg_1(sc, index);
134 }
135
136 uint16_t
137 virtio_read_device_config_2(struct virtio_softc *sc, int index)
138 {
139 return sc->sc_ops->read_dev_cfg_2(sc, index);
140 }
141
142 uint32_t
143 virtio_read_device_config_4(struct virtio_softc *sc, int index)
144 {
145 return sc->sc_ops->read_dev_cfg_4(sc, index);
146 }
147
148 uint64_t
149 virtio_read_device_config_8(struct virtio_softc *sc, int index)
150 {
151 return sc->sc_ops->read_dev_cfg_8(sc, index);
152 }
153
154 void
155 virtio_write_device_config_1(struct virtio_softc *sc,
156 int index, uint8_t value)
157 {
158 return sc->sc_ops->write_dev_cfg_1(sc, index, value);
159 }
160
161 void
162 virtio_write_device_config_2(struct virtio_softc *sc,
163 int index, uint16_t value)
164 {
165 return sc->sc_ops->write_dev_cfg_2(sc, index, value);
166 }
167
168 void
169 virtio_write_device_config_4(struct virtio_softc *sc,
170 int index, uint32_t value)
171 {
172 return sc->sc_ops->write_dev_cfg_4(sc, index, value);
173 }
174
175 void
176 virtio_write_device_config_8(struct virtio_softc *sc,
177 int index, uint64_t value)
178 {
179 return sc->sc_ops->write_dev_cfg_8(sc, index, value);
180 }
181
182 /*
183 * Interrupt handler.
184 */
185 static void
186 virtio_soft_intr(void *arg)
187 {
188 struct virtio_softc *sc = arg;
189
190 KASSERT(sc->sc_intrhand != NULL);
191
192 (sc->sc_intrhand)(sc);
193 }
194
195 /*
196 * dmamap sync operations for a virtqueue.
197 */
198 static inline void
199 vq_sync_descs(struct virtio_softc *sc, struct virtqueue *vq, int ops)
200 {
201 /* availoffset == sizeof(vring_desc)*vq_num */
202 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap, 0, vq->vq_availoffset,
203 ops);
204 }
205
206 static inline void
207 vq_sync_aring(struct virtio_softc *sc, struct virtqueue *vq, int ops)
208 {
209 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap,
210 vq->vq_availoffset,
211 offsetof(struct vring_avail, ring)
212 + vq->vq_num * sizeof(uint16_t),
213 ops);
214 }
215
216 static inline void
217 vq_sync_uring(struct virtio_softc *sc, struct virtqueue *vq, int ops)
218 {
219 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap,
220 vq->vq_usedoffset,
221 offsetof(struct vring_used, ring)
222 + vq->vq_num * sizeof(struct vring_used_elem),
223 ops);
224 }
225
226 static inline void
227 vq_sync_indirect(struct virtio_softc *sc, struct virtqueue *vq, int slot,
228 int ops)
229 {
230 int offset = vq->vq_indirectoffset
231 + sizeof(struct vring_desc) * vq->vq_maxnsegs * slot;
232
233 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap,
234 offset, sizeof(struct vring_desc) * vq->vq_maxnsegs,
235 ops);
236 }
237
238 static void
239 virtio_vq_soft_intr(void *arg)
240 {
241 struct virtqueue *vq = arg;
242
243 KASSERT(vq->vq_intrhand != NULL);
244
245 (vq->vq_intrhand)(vq);
246 }
247
248 static int
249 virtio_vq_softint_establish(struct virtio_softc *sc)
250 {
251 struct virtqueue *vq;
252 int qid;
253 u_int flags;
254
255 flags = SOFTINT_NET;
256 if (sc->sc_flags & VIRTIO_F_PCI_INTR_MPSAFE)
257 flags |= SOFTINT_MPSAFE;
258
259 for (qid = 0; qid < sc->sc_nvqs; qid++) {
260 vq = &sc->sc_vqs[qid];
261 vq->vq_soft_ih =
262 softint_establish(flags, virtio_vq_soft_intr, vq);
263 if (vq->vq_soft_ih == NULL)
264 return -1;
265 }
266
267 return 0;
268 }
269
270 static void
271 virtio_vq_softint_disestablish(struct virtio_softc *sc)
272 {
273 struct virtqueue *vq;
274 int qid;
275
276 for (qid = 0; qid < sc->sc_nvqs; qid++) {
277 vq = &sc->sc_vqs[qid];
278 if (vq->vq_soft_ih == NULL)
279 continue;
280
281 softint_disestablish(vq->vq_soft_ih);
282 vq->vq_soft_ih = NULL;
283 }
284 }
285
286 /*
287 * Can be used as sc_intrhand.
288 */
289 /*
290 * Scan vq, bus_dmamap_sync for the vqs (not for the payload),
291 * and calls (*vq_done)() if some entries are consumed.
292 */
293 static int
294 virtio_vq_intr_common(struct virtqueue *vq)
295 {
296 struct virtio_softc *sc = vq->vq_owner;
297 int r = 0;
298
299 if (vq->vq_queued) {
300 vq->vq_queued = 0;
301 vq_sync_aring(sc, vq, BUS_DMASYNC_POSTWRITE);
302 }
303 vq_sync_uring(sc, vq, BUS_DMASYNC_POSTREAD);
304 membar_consumer();
305 if (vq->vq_used_idx != vq->vq_used->idx) {
306 if (vq->vq_done)
307 r |= (vq->vq_done)(vq);
308 }
309
310 return r;
311 }
312
313 int
314 virtio_vq_intr(struct virtio_softc *sc)
315 {
316 struct virtqueue *vq;
317 int i, r = 0;
318
319 for (i = 0; i < sc->sc_nvqs; i++) {
320 vq = &sc->sc_vqs[i];
321 r |= virtio_vq_intr_common(vq);
322 }
323
324 return r;
325 }
326
327 static int
328 virtio_vq_mq_intr(struct virtqueue *vq)
329 {
330
331 return virtio_vq_intr_common(vq);
332 }
333
334 /*
335 * Start/stop vq interrupt. No guarantee.
336 */
337 void
338 virtio_stop_vq_intr(struct virtio_softc *sc, struct virtqueue *vq)
339 {
340 vq->vq_avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
341 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
342 vq->vq_queued++;
343 }
344
345 void
346 virtio_start_vq_intr(struct virtio_softc *sc, struct virtqueue *vq)
347 {
348 vq->vq_avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
349 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
350 vq->vq_queued++;
351 }
352
353 /*
354 * Initialize vq structure.
355 */
356 static void
357 virtio_init_vq(struct virtio_softc *sc, struct virtqueue *vq,
358 const bool reinit)
359 {
360 int i, j;
361 int vq_size = vq->vq_num;
362
363 memset(vq->vq_vaddr, 0, vq->vq_bytesize);
364
365 /* build the indirect descriptor chain */
366 if (vq->vq_indirect != NULL) {
367 struct vring_desc *vd;
368
369 for (i = 0; i < vq_size; i++) {
370 vd = vq->vq_indirect;
371 vd += vq->vq_maxnsegs * i;
372 for (j = 0; j < vq->vq_maxnsegs-1; j++) {
373 vd[j].next = j + 1;
374 }
375 }
376 }
377
378 /* free slot management */
379 SIMPLEQ_INIT(&vq->vq_freelist);
380 for (i = 0; i < vq_size; i++) {
381 SIMPLEQ_INSERT_TAIL(&vq->vq_freelist,
382 &vq->vq_entries[i], qe_list);
383 vq->vq_entries[i].qe_index = i;
384 }
385 if (!reinit)
386 mutex_init(&vq->vq_freelist_lock, MUTEX_SPIN, sc->sc_ipl);
387
388 /* enqueue/dequeue status */
389 vq->vq_avail_idx = 0;
390 vq->vq_used_idx = 0;
391 vq->vq_queued = 0;
392 if (!reinit) {
393 mutex_init(&vq->vq_aring_lock, MUTEX_SPIN, sc->sc_ipl);
394 mutex_init(&vq->vq_uring_lock, MUTEX_SPIN, sc->sc_ipl);
395 }
396 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
397 vq_sync_uring(sc, vq, BUS_DMASYNC_PREREAD);
398 vq->vq_queued++;
399 }
400
401 /*
402 * Allocate/free a vq.
403 */
404 int
405 virtio_alloc_vq(struct virtio_softc *sc, struct virtqueue *vq, int index,
406 int maxsegsize, int maxnsegs, const char *name)
407 {
408 int vq_size, allocsize1, allocsize2, allocsize3, allocsize = 0;
409 int rsegs, r;
410 #define VIRTQUEUE_ALIGN(n) (((n)+(VIRTIO_PAGE_SIZE-1))& \
411 ~(VIRTIO_PAGE_SIZE-1))
412
413 /* Make sure callers allocate vqs in order */
414 KASSERT(sc->sc_nvqs == index);
415
416 memset(vq, 0, sizeof(*vq));
417
418 vq_size = sc->sc_ops->read_queue_size(sc, index);
419 if (vq_size == 0) {
420 aprint_error_dev(sc->sc_dev,
421 "virtqueue not exist, index %d for %s\n",
422 index, name);
423 goto err;
424 }
425 /* allocsize1: descriptor table + avail ring + pad */
426 allocsize1 = VIRTQUEUE_ALIGN(sizeof(struct vring_desc)*vq_size
427 + sizeof(uint16_t)*(2+vq_size));
428 /* allocsize2: used ring + pad */
429 allocsize2 = VIRTQUEUE_ALIGN(sizeof(uint16_t)*2
430 + sizeof(struct vring_used_elem)*vq_size);
431 /* allocsize3: indirect table */
432 if (sc->sc_indirect && maxnsegs >= MINSEG_INDIRECT)
433 allocsize3 = sizeof(struct vring_desc) * maxnsegs * vq_size;
434 else
435 allocsize3 = 0;
436 allocsize = allocsize1 + allocsize2 + allocsize3;
437
438 /* alloc and map the memory */
439 r = bus_dmamem_alloc(sc->sc_dmat, allocsize, VIRTIO_PAGE_SIZE, 0,
440 &vq->vq_segs[0], 1, &rsegs, BUS_DMA_NOWAIT);
441 if (r != 0) {
442 aprint_error_dev(sc->sc_dev,
443 "virtqueue %d for %s allocation failed, "
444 "error code %d\n", index, name, r);
445 goto err;
446 }
447 r = bus_dmamem_map(sc->sc_dmat, &vq->vq_segs[0], 1, allocsize,
448 &vq->vq_vaddr, BUS_DMA_NOWAIT);
449 if (r != 0) {
450 aprint_error_dev(sc->sc_dev,
451 "virtqueue %d for %s map failed, "
452 "error code %d\n", index, name, r);
453 goto err;
454 }
455 r = bus_dmamap_create(sc->sc_dmat, allocsize, 1, allocsize, 0,
456 BUS_DMA_NOWAIT, &vq->vq_dmamap);
457 if (r != 0) {
458 aprint_error_dev(sc->sc_dev,
459 "virtqueue %d for %s dmamap creation failed, "
460 "error code %d\n", index, name, r);
461 goto err;
462 }
463 r = bus_dmamap_load(sc->sc_dmat, vq->vq_dmamap,
464 vq->vq_vaddr, allocsize, NULL, BUS_DMA_NOWAIT);
465 if (r != 0) {
466 aprint_error_dev(sc->sc_dev,
467 "virtqueue %d for %s dmamap load failed, "
468 "error code %d\n", index, name, r);
469 goto err;
470 }
471
472 /* set the vq address */
473 sc->sc_ops->setup_queue(sc, index,
474 vq->vq_dmamap->dm_segs[0].ds_addr / VIRTIO_PAGE_SIZE);
475
476 /* remember addresses and offsets for later use */
477 vq->vq_owner = sc;
478 vq->vq_intrhand = virtio_vq_mq_intr;
479 vq->vq_num = vq_size;
480 vq->vq_index = index;
481 vq->vq_desc = vq->vq_vaddr;
482 vq->vq_availoffset = sizeof(struct vring_desc)*vq_size;
483 vq->vq_avail = (void*)(((char*)vq->vq_desc) + vq->vq_availoffset);
484 vq->vq_usedoffset = allocsize1;
485 vq->vq_used = (void*)(((char*)vq->vq_desc) + vq->vq_usedoffset);
486 if (allocsize3 > 0) {
487 vq->vq_indirectoffset = allocsize1 + allocsize2;
488 vq->vq_indirect = (void*)(((char*)vq->vq_desc)
489 + vq->vq_indirectoffset);
490 }
491 vq->vq_bytesize = allocsize;
492 vq->vq_maxsegsize = maxsegsize;
493 vq->vq_maxnsegs = maxnsegs;
494
495 /* free slot management */
496 vq->vq_entries = kmem_zalloc(sizeof(struct vq_entry)*vq_size,
497 KM_NOSLEEP);
498 if (vq->vq_entries == NULL) {
499 r = ENOMEM;
500 goto err;
501 }
502
503 virtio_init_vq(sc, vq, false);
504
505 aprint_verbose_dev(sc->sc_dev,
506 "allocated %u byte for virtqueue %d for %s, "
507 "size %d\n", allocsize, index, name, vq_size);
508 if (allocsize3 > 0)
509 aprint_verbose_dev(sc->sc_dev,
510 "using %d byte (%d entries) "
511 "indirect descriptors\n",
512 allocsize3, maxnsegs * vq_size);
513
514 sc->sc_nvqs++;
515
516 return 0;
517
518 err:
519 sc->sc_ops->setup_queue(sc, index, 0);
520 if (vq->vq_dmamap)
521 bus_dmamap_destroy(sc->sc_dmat, vq->vq_dmamap);
522 if (vq->vq_vaddr)
523 bus_dmamem_unmap(sc->sc_dmat, vq->vq_vaddr, allocsize);
524 if (vq->vq_segs[0].ds_addr)
525 bus_dmamem_free(sc->sc_dmat, &vq->vq_segs[0], 1);
526 memset(vq, 0, sizeof(*vq));
527
528 return -1;
529 }
530
531 int
532 virtio_free_vq(struct virtio_softc *sc, struct virtqueue *vq)
533 {
534 struct vq_entry *qe;
535 int i = 0;
536
537 /* device must be already deactivated */
538 /* confirm the vq is empty */
539 SIMPLEQ_FOREACH(qe, &vq->vq_freelist, qe_list) {
540 i++;
541 }
542 if (i != vq->vq_num) {
543 printf("%s: freeing non-empty vq, index %d\n",
544 device_xname(sc->sc_dev), vq->vq_index);
545 return EBUSY;
546 }
547
548 /* tell device that there's no virtqueue any longer */
549 sc->sc_ops->setup_queue(sc, vq->vq_index, 0);
550
551 kmem_free(vq->vq_entries, sizeof(*vq->vq_entries) * vq->vq_num);
552 bus_dmamap_unload(sc->sc_dmat, vq->vq_dmamap);
553 bus_dmamap_destroy(sc->sc_dmat, vq->vq_dmamap);
554 bus_dmamem_unmap(sc->sc_dmat, vq->vq_vaddr, vq->vq_bytesize);
555 bus_dmamem_free(sc->sc_dmat, &vq->vq_segs[0], 1);
556 mutex_destroy(&vq->vq_freelist_lock);
557 mutex_destroy(&vq->vq_uring_lock);
558 mutex_destroy(&vq->vq_aring_lock);
559 memset(vq, 0, sizeof(*vq));
560
561 sc->sc_nvqs--;
562
563 return 0;
564 }
565
566 /*
567 * Free descriptor management.
568 */
569 static struct vq_entry *
570 vq_alloc_entry(struct virtqueue *vq)
571 {
572 struct vq_entry *qe;
573
574 mutex_enter(&vq->vq_freelist_lock);
575 if (SIMPLEQ_EMPTY(&vq->vq_freelist)) {
576 mutex_exit(&vq->vq_freelist_lock);
577 return NULL;
578 }
579 qe = SIMPLEQ_FIRST(&vq->vq_freelist);
580 SIMPLEQ_REMOVE_HEAD(&vq->vq_freelist, qe_list);
581 mutex_exit(&vq->vq_freelist_lock);
582
583 return qe;
584 }
585
586 static void
587 vq_free_entry(struct virtqueue *vq, struct vq_entry *qe)
588 {
589 mutex_enter(&vq->vq_freelist_lock);
590 SIMPLEQ_INSERT_TAIL(&vq->vq_freelist, qe, qe_list);
591 mutex_exit(&vq->vq_freelist_lock);
592
593 return;
594 }
595
596 /*
597 * Enqueue several dmamaps as a single request.
598 */
599 /*
600 * Typical usage:
601 * <queue size> number of followings are stored in arrays
602 * - command blocks (in dmamem) should be pre-allocated and mapped
603 * - dmamaps for command blocks should be pre-allocated and loaded
604 * - dmamaps for payload should be pre-allocated
605 * r = virtio_enqueue_prep(sc, vq, &slot); // allocate a slot
606 * if (r) // currently 0 or EAGAIN
607 * return r;
608 * r = bus_dmamap_load(dmat, dmamap_payload[slot], data, count, ..);
609 * if (r) {
610 * virtio_enqueue_abort(sc, vq, slot);
611 * return r;
612 * }
613 * r = virtio_enqueue_reserve(sc, vq, slot,
614 * dmamap_payload[slot]->dm_nsegs+1);
615 * // ^ +1 for command
616 * if (r) { // currently 0 or EAGAIN
617 * bus_dmamap_unload(dmat, dmamap_payload[slot]);
618 * return r; // do not call abort()
619 * }
620 * <setup and prepare commands>
621 * bus_dmamap_sync(dmat, dmamap_cmd[slot],... BUS_DMASYNC_PREWRITE);
622 * bus_dmamap_sync(dmat, dmamap_payload[slot],...);
623 * virtio_enqueue(sc, vq, slot, dmamap_cmd[slot], false);
624 * virtio_enqueue(sc, vq, slot, dmamap_payload[slot], iswrite);
625 * virtio_enqueue_commit(sc, vq, slot, true);
626 */
627
628 /*
629 * enqueue_prep: allocate a slot number
630 */
631 int
632 virtio_enqueue_prep(struct virtio_softc *sc, struct virtqueue *vq, int *slotp)
633 {
634 struct vq_entry *qe1;
635
636 KASSERT(slotp != NULL);
637
638 qe1 = vq_alloc_entry(vq);
639 if (qe1 == NULL)
640 return EAGAIN;
641 /* next slot is not allocated yet */
642 qe1->qe_next = -1;
643 *slotp = qe1->qe_index;
644
645 return 0;
646 }
647
648 /*
649 * enqueue_reserve: allocate remaining slots and build the descriptor chain.
650 */
651 int
652 virtio_enqueue_reserve(struct virtio_softc *sc, struct virtqueue *vq,
653 int slot, int nsegs)
654 {
655 int indirect;
656 struct vq_entry *qe1 = &vq->vq_entries[slot];
657
658 KASSERT(qe1->qe_next == -1);
659 KASSERT(1 <= nsegs && nsegs <= vq->vq_num);
660
661 if ((vq->vq_indirect != NULL) &&
662 (nsegs >= MINSEG_INDIRECT) &&
663 (nsegs <= vq->vq_maxnsegs))
664 indirect = 1;
665 else
666 indirect = 0;
667 qe1->qe_indirect = indirect;
668
669 if (indirect) {
670 struct vring_desc *vd;
671 int i;
672
673 vd = &vq->vq_desc[qe1->qe_index];
674 vd->addr = vq->vq_dmamap->dm_segs[0].ds_addr
675 + vq->vq_indirectoffset;
676 vd->addr += sizeof(struct vring_desc)
677 * vq->vq_maxnsegs * qe1->qe_index;
678 vd->len = sizeof(struct vring_desc) * nsegs;
679 vd->flags = VRING_DESC_F_INDIRECT;
680
681 vd = vq->vq_indirect;
682 vd += vq->vq_maxnsegs * qe1->qe_index;
683 qe1->qe_desc_base = vd;
684
685 for (i = 0; i < nsegs-1; i++) {
686 vd[i].flags = VRING_DESC_F_NEXT;
687 }
688 vd[i].flags = 0;
689 qe1->qe_next = 0;
690
691 return 0;
692 } else {
693 struct vring_desc *vd;
694 struct vq_entry *qe;
695 int i, s;
696
697 vd = &vq->vq_desc[0];
698 qe1->qe_desc_base = vd;
699 qe1->qe_next = qe1->qe_index;
700 s = slot;
701 for (i = 0; i < nsegs - 1; i++) {
702 qe = vq_alloc_entry(vq);
703 if (qe == NULL) {
704 vd[s].flags = 0;
705 virtio_enqueue_abort(sc, vq, slot);
706 return EAGAIN;
707 }
708 vd[s].flags = VRING_DESC_F_NEXT;
709 vd[s].next = qe->qe_index;
710 s = qe->qe_index;
711 }
712 vd[s].flags = 0;
713
714 return 0;
715 }
716 }
717
718 /*
719 * enqueue: enqueue a single dmamap.
720 */
721 int
722 virtio_enqueue(struct virtio_softc *sc, struct virtqueue *vq, int slot,
723 bus_dmamap_t dmamap, bool write)
724 {
725 struct vq_entry *qe1 = &vq->vq_entries[slot];
726 struct vring_desc *vd = qe1->qe_desc_base;
727 int i;
728 int s = qe1->qe_next;
729
730 KASSERT(s >= 0);
731 KASSERT(dmamap->dm_nsegs > 0);
732
733 for (i = 0; i < dmamap->dm_nsegs; i++) {
734 vd[s].addr = dmamap->dm_segs[i].ds_addr;
735 vd[s].len = dmamap->dm_segs[i].ds_len;
736 if (!write)
737 vd[s].flags |= VRING_DESC_F_WRITE;
738 s = vd[s].next;
739 }
740 qe1->qe_next = s;
741
742 return 0;
743 }
744
745 int
746 virtio_enqueue_p(struct virtio_softc *sc, struct virtqueue *vq, int slot,
747 bus_dmamap_t dmamap, bus_addr_t start, bus_size_t len,
748 bool write)
749 {
750 struct vq_entry *qe1 = &vq->vq_entries[slot];
751 struct vring_desc *vd = qe1->qe_desc_base;
752 int s = qe1->qe_next;
753
754 KASSERT(s >= 0);
755 KASSERT(dmamap->dm_nsegs == 1); /* XXX */
756 KASSERT((dmamap->dm_segs[0].ds_len > start) &&
757 (dmamap->dm_segs[0].ds_len >= start + len));
758
759 vd[s].addr = dmamap->dm_segs[0].ds_addr + start;
760 vd[s].len = len;
761 if (!write)
762 vd[s].flags |= VRING_DESC_F_WRITE;
763 qe1->qe_next = vd[s].next;
764
765 return 0;
766 }
767
768 /*
769 * enqueue_commit: add it to the aring.
770 */
771 int
772 virtio_enqueue_commit(struct virtio_softc *sc, struct virtqueue *vq, int slot,
773 bool notifynow)
774 {
775 struct vq_entry *qe1;
776
777 if (slot < 0) {
778 mutex_enter(&vq->vq_aring_lock);
779 goto notify;
780 }
781 vq_sync_descs(sc, vq, BUS_DMASYNC_PREWRITE);
782 qe1 = &vq->vq_entries[slot];
783 if (qe1->qe_indirect)
784 vq_sync_indirect(sc, vq, slot, BUS_DMASYNC_PREWRITE);
785 mutex_enter(&vq->vq_aring_lock);
786 vq->vq_avail->ring[(vq->vq_avail_idx++) % vq->vq_num] = slot;
787
788 notify:
789 if (notifynow) {
790 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
791 vq_sync_uring(sc, vq, BUS_DMASYNC_PREREAD);
792 membar_producer();
793 vq->vq_avail->idx = vq->vq_avail_idx;
794 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
795 membar_producer();
796 vq->vq_queued++;
797 vq_sync_uring(sc, vq, BUS_DMASYNC_POSTREAD);
798 membar_consumer();
799 if (!(vq->vq_used->flags & VRING_USED_F_NO_NOTIFY))
800 sc->sc_ops->kick(sc, vq->vq_index);
801 }
802 mutex_exit(&vq->vq_aring_lock);
803
804 return 0;
805 }
806
807 /*
808 * enqueue_abort: rollback.
809 */
810 int
811 virtio_enqueue_abort(struct virtio_softc *sc, struct virtqueue *vq, int slot)
812 {
813 struct vq_entry *qe = &vq->vq_entries[slot];
814 struct vring_desc *vd;
815 int s;
816
817 if (qe->qe_next < 0) {
818 vq_free_entry(vq, qe);
819 return 0;
820 }
821
822 s = slot;
823 vd = &vq->vq_desc[0];
824 while (vd[s].flags & VRING_DESC_F_NEXT) {
825 s = vd[s].next;
826 vq_free_entry(vq, qe);
827 qe = &vq->vq_entries[s];
828 }
829 vq_free_entry(vq, qe);
830 return 0;
831 }
832
833 /*
834 * Dequeue a request.
835 */
836 /*
837 * dequeue: dequeue a request from uring; dmamap_sync for uring is
838 * already done in the interrupt handler.
839 */
840 int
841 virtio_dequeue(struct virtio_softc *sc, struct virtqueue *vq,
842 int *slotp, int *lenp)
843 {
844 uint16_t slot, usedidx;
845 struct vq_entry *qe;
846
847 if (vq->vq_used_idx == vq->vq_used->idx)
848 return ENOENT;
849 mutex_enter(&vq->vq_uring_lock);
850 usedidx = vq->vq_used_idx++;
851 mutex_exit(&vq->vq_uring_lock);
852 usedidx %= vq->vq_num;
853 slot = vq->vq_used->ring[usedidx].id;
854 qe = &vq->vq_entries[slot];
855
856 if (qe->qe_indirect)
857 vq_sync_indirect(sc, vq, slot, BUS_DMASYNC_POSTWRITE);
858
859 if (slotp)
860 *slotp = slot;
861 if (lenp)
862 *lenp = vq->vq_used->ring[usedidx].len;
863
864 return 0;
865 }
866
867 /*
868 * dequeue_commit: complete dequeue; the slot is recycled for future use.
869 * if you forget to call this the slot will be leaked.
870 */
871 int
872 virtio_dequeue_commit(struct virtio_softc *sc, struct virtqueue *vq, int slot)
873 {
874 struct vq_entry *qe = &vq->vq_entries[slot];
875 struct vring_desc *vd = &vq->vq_desc[0];
876 int s = slot;
877
878 while (vd[s].flags & VRING_DESC_F_NEXT) {
879 s = vd[s].next;
880 vq_free_entry(vq, qe);
881 qe = &vq->vq_entries[s];
882 }
883 vq_free_entry(vq, qe);
884
885 return 0;
886 }
887
888 /*
889 * Attach a child, fill all the members.
890 */
891 void
892 virtio_child_attach_start(struct virtio_softc *sc, device_t child, int ipl,
893 struct virtqueue *vqs,
894 virtio_callback config_change,
895 virtio_callback intr_hand,
896 int req_flags, int req_features, const char *feat_bits)
897 {
898 char buf[256];
899 int features;
900
901 sc->sc_child = child;
902 sc->sc_ipl = ipl;
903 sc->sc_vqs = vqs;
904 sc->sc_config_change = config_change;
905 sc->sc_intrhand = intr_hand;
906 sc->sc_flags = req_flags;
907
908 features = virtio_negotiate_features(sc, req_features);
909 snprintb(buf, sizeof(buf), feat_bits, features);
910 aprint_normal(": Features: %s\n", buf);
911 aprint_naive("\n");
912 }
913
914 void
915 virtio_child_attach_set_vqs(struct virtio_softc *sc,
916 struct virtqueue *vqs, int nvq_pairs)
917 {
918 if (nvq_pairs > 1)
919 sc->sc_child_mq = true;
920
921 sc->sc_vqs = vqs;
922 }
923
924 int
925 virtio_child_attach_finish(struct virtio_softc *sc)
926 {
927 int r;
928
929 r = sc->sc_ops->setup_interrupts(sc);
930 if (r != 0) {
931 aprint_error_dev(sc->sc_dev, "failed to setup interrupts\n");
932 goto fail;
933 }
934
935 KASSERT(sc->sc_soft_ih == NULL);
936 if (sc->sc_flags & VIRTIO_F_PCI_INTR_SOFTINT) {
937 u_int flags = SOFTINT_NET;
938 if (sc->sc_flags & VIRTIO_F_PCI_INTR_MPSAFE)
939 flags |= SOFTINT_MPSAFE;
940
941 sc->sc_soft_ih = softint_establish(flags, virtio_soft_intr, sc);
942 if (sc->sc_soft_ih == NULL) {
943 sc->sc_ops->free_interrupts(sc);
944 aprint_error_dev(sc->sc_dev,
945 "failed to establish soft interrupt\n");
946 goto fail;
947 }
948
949 if (sc->sc_child_mq) {
950 r = virtio_vq_softint_establish(sc);
951 aprint_error_dev(sc->sc_dev,
952 "failed to establish softint interrupt\n");
953 goto fail;
954 }
955 }
956
957 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK);
958 return 0;
959
960 fail:
961 if (sc->sc_soft_ih) {
962 softint_disestablish(sc->sc_soft_ih);
963 sc->sc_soft_ih = NULL;
964 }
965
966 virtio_vq_softint_disestablish(sc);
967
968 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
969 return 1;
970 }
971
972 void
973 virtio_child_detach(struct virtio_softc *sc)
974 {
975 sc->sc_child = NULL;
976 sc->sc_vqs = NULL;
977
978 virtio_device_reset(sc);
979
980 sc->sc_ops->free_interrupts(sc);
981
982 if (sc->sc_soft_ih) {
983 softint_disestablish(sc->sc_soft_ih);
984 sc->sc_soft_ih = NULL;
985 }
986 }
987
988 void
989 virtio_child_attach_failed(struct virtio_softc *sc)
990 {
991 virtio_child_detach(sc);
992
993 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
994
995 sc->sc_child = VIRTIO_CHILD_FAILED;
996 }
997
998 bus_dma_tag_t
999 virtio_dmat(struct virtio_softc *sc)
1000 {
1001 return sc->sc_dmat;
1002 }
1003
1004 device_t
1005 virtio_child(struct virtio_softc *sc)
1006 {
1007 return sc->sc_child;
1008 }
1009
1010 int
1011 virtio_intrhand(struct virtio_softc *sc)
1012 {
1013 return (sc->sc_intrhand)(sc);
1014 }
1015
1016 uint32_t
1017 virtio_features(struct virtio_softc *sc)
1018 {
1019 return sc->sc_features;
1020 }
1021
1022 int
1023 virtiobusprint(void *aux, const char *pnp)
1024 {
1025 struct virtio_attach_args * const va = aux;
1026
1027 if (va->sc_childdevid == 0)
1028 return QUIET; /* No device present */
1029
1030 if (pnp)
1031 aprint_normal("Device ID %d at %s", va->sc_childdevid, pnp);
1032
1033 return UNCONF;
1034 }
1035
1036 MODULE(MODULE_CLASS_DRIVER, virtio, NULL);
1037
1038 #ifdef _MODULE
1039 #include "ioconf.c"
1040 #endif
1041
1042 static int
1043 virtio_modcmd(modcmd_t cmd, void *opaque)
1044 {
1045 int error = 0;
1046
1047 #ifdef _MODULE
1048 switch (cmd) {
1049 case MODULE_CMD_INIT:
1050 error = config_init_component(cfdriver_ioconf_virtio,
1051 cfattach_ioconf_virtio, cfdata_ioconf_virtio);
1052 break;
1053 case MODULE_CMD_FINI:
1054 error = config_fini_component(cfdriver_ioconf_virtio,
1055 cfattach_ioconf_virtio, cfdata_ioconf_virtio);
1056 break;
1057 default:
1058 error = ENOTTY;
1059 break;
1060 }
1061 #endif
1062
1063 return error;
1064 }
1065