virtio.c revision 1.38 1 /* $NetBSD: virtio.c,v 1.38 2019/10/01 18:00:08 chs Exp $ */
2
3 /*
4 * Copyright (c) 2010 Minoura Makoto.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: virtio.c,v 1.38 2019/10/01 18:00:08 chs Exp $");
30
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/atomic.h>
35 #include <sys/bus.h>
36 #include <sys/device.h>
37 #include <sys/kmem.h>
38 #include <sys/module.h>
39
40 #define VIRTIO_PRIVATE
41
42 #include <dev/pci/virtioreg.h> /* XXX: move to non-pci */
43 #include <dev/pci/virtiovar.h> /* XXX: move to non-pci */
44
45 #define MINSEG_INDIRECT 2 /* use indirect if nsegs >= this value */
46
47 static void virtio_init_vq(struct virtio_softc *,
48 struct virtqueue *, const bool);
49
50 void
51 virtio_set_status(struct virtio_softc *sc, int status)
52 {
53 sc->sc_ops->set_status(sc, status);
54 }
55
56 /*
57 * Reset the device.
58 */
59 /*
60 * To reset the device to a known state, do following:
61 * virtio_reset(sc); // this will stop the device activity
62 * <dequeue finished requests>; // virtio_dequeue() still can be called
63 * <revoke pending requests in the vqs if any>;
64 * virtio_reinit_begin(sc); // dequeue prohibitted
65 * newfeatures = virtio_negotiate_features(sc, requestedfeatures);
66 * <some other initialization>;
67 * virtio_reinit_end(sc); // device activated; enqueue allowed
68 * Once attached, feature negotiation can only be allowed after virtio_reset.
69 */
70 void
71 virtio_reset(struct virtio_softc *sc)
72 {
73 virtio_device_reset(sc);
74 }
75
76 void
77 virtio_reinit_start(struct virtio_softc *sc)
78 {
79 int i;
80
81 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_ACK);
82 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER);
83 for (i = 0; i < sc->sc_nvqs; i++) {
84 int n;
85 struct virtqueue *vq = &sc->sc_vqs[i];
86 n = sc->sc_ops->read_queue_size(sc, vq->vq_index);
87 if (n == 0) /* vq disappeared */
88 continue;
89 if (n != vq->vq_num) {
90 panic("%s: virtqueue size changed, vq index %d\n",
91 device_xname(sc->sc_dev),
92 vq->vq_index);
93 }
94 virtio_init_vq(sc, vq, true);
95 sc->sc_ops->setup_queue(sc, vq->vq_index,
96 vq->vq_dmamap->dm_segs[0].ds_addr / VIRTIO_PAGE_SIZE);
97 }
98 }
99
100 void
101 virtio_reinit_end(struct virtio_softc *sc)
102 {
103 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK);
104 }
105
106 /*
107 * Feature negotiation.
108 */
109 uint32_t
110 virtio_negotiate_features(struct virtio_softc *sc, uint32_t guest_features)
111 {
112 uint32_t r;
113
114 if (!(device_cfdata(sc->sc_dev)->cf_flags & 1) &&
115 !(device_cfdata(sc->sc_child)->cf_flags & 1)) /* XXX */
116 guest_features |= VIRTIO_F_RING_INDIRECT_DESC;
117 r = sc->sc_ops->neg_features(sc, guest_features);
118 sc->sc_features = r;
119 if (r & VIRTIO_F_RING_INDIRECT_DESC)
120 sc->sc_indirect = true;
121 else
122 sc->sc_indirect = false;
123
124 return r;
125 }
126
127 /*
128 * Device configuration registers.
129 */
130 uint8_t
131 virtio_read_device_config_1(struct virtio_softc *sc, int index)
132 {
133 return sc->sc_ops->read_dev_cfg_1(sc, index);
134 }
135
136 uint16_t
137 virtio_read_device_config_2(struct virtio_softc *sc, int index)
138 {
139 return sc->sc_ops->read_dev_cfg_2(sc, index);
140 }
141
142 uint32_t
143 virtio_read_device_config_4(struct virtio_softc *sc, int index)
144 {
145 return sc->sc_ops->read_dev_cfg_4(sc, index);
146 }
147
148 uint64_t
149 virtio_read_device_config_8(struct virtio_softc *sc, int index)
150 {
151 return sc->sc_ops->read_dev_cfg_8(sc, index);
152 }
153
154 void
155 virtio_write_device_config_1(struct virtio_softc *sc,
156 int index, uint8_t value)
157 {
158 return sc->sc_ops->write_dev_cfg_1(sc, index, value);
159 }
160
161 void
162 virtio_write_device_config_2(struct virtio_softc *sc,
163 int index, uint16_t value)
164 {
165 return sc->sc_ops->write_dev_cfg_2(sc, index, value);
166 }
167
168 void
169 virtio_write_device_config_4(struct virtio_softc *sc,
170 int index, uint32_t value)
171 {
172 return sc->sc_ops->write_dev_cfg_4(sc, index, value);
173 }
174
175 void
176 virtio_write_device_config_8(struct virtio_softc *sc,
177 int index, uint64_t value)
178 {
179 return sc->sc_ops->write_dev_cfg_8(sc, index, value);
180 }
181
182 /*
183 * Interrupt handler.
184 */
185 static void
186 virtio_soft_intr(void *arg)
187 {
188 struct virtio_softc *sc = arg;
189
190 KASSERT(sc->sc_intrhand != NULL);
191
192 (sc->sc_intrhand)(sc);
193 }
194
195 /*
196 * dmamap sync operations for a virtqueue.
197 */
198 static inline void
199 vq_sync_descs(struct virtio_softc *sc, struct virtqueue *vq, int ops)
200 {
201 /* availoffset == sizeof(vring_desc)*vq_num */
202 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap, 0, vq->vq_availoffset,
203 ops);
204 }
205
206 static inline void
207 vq_sync_aring(struct virtio_softc *sc, struct virtqueue *vq, int ops)
208 {
209 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap,
210 vq->vq_availoffset,
211 offsetof(struct vring_avail, ring)
212 + vq->vq_num * sizeof(uint16_t),
213 ops);
214 }
215
216 static inline void
217 vq_sync_uring(struct virtio_softc *sc, struct virtqueue *vq, int ops)
218 {
219 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap,
220 vq->vq_usedoffset,
221 offsetof(struct vring_used, ring)
222 + vq->vq_num * sizeof(struct vring_used_elem),
223 ops);
224 }
225
226 static inline void
227 vq_sync_indirect(struct virtio_softc *sc, struct virtqueue *vq, int slot,
228 int ops)
229 {
230 int offset = vq->vq_indirectoffset
231 + sizeof(struct vring_desc) * vq->vq_maxnsegs * slot;
232
233 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap,
234 offset, sizeof(struct vring_desc) * vq->vq_maxnsegs,
235 ops);
236 }
237
238 static void
239 virtio_vq_soft_intr(void *arg)
240 {
241 struct virtqueue *vq = arg;
242
243 KASSERT(vq->vq_intrhand != NULL);
244
245 (vq->vq_intrhand)(vq);
246 }
247
248 static int
249 virtio_vq_softint_establish(struct virtio_softc *sc)
250 {
251 struct virtqueue *vq;
252 int qid;
253 u_int flags;
254
255 flags = SOFTINT_NET;
256 if (sc->sc_flags & VIRTIO_F_PCI_INTR_MPSAFE)
257 flags |= SOFTINT_MPSAFE;
258
259 for (qid = 0; qid < sc->sc_nvqs; qid++) {
260 vq = &sc->sc_vqs[qid];
261 vq->vq_soft_ih =
262 softint_establish(flags, virtio_vq_soft_intr, vq);
263 if (vq->vq_soft_ih == NULL)
264 return -1;
265 }
266
267 return 0;
268 }
269
270 static void
271 virtio_vq_softint_disestablish(struct virtio_softc *sc)
272 {
273 struct virtqueue *vq;
274 int qid;
275
276 for (qid = 0; qid < sc->sc_nvqs; qid++) {
277 vq = &sc->sc_vqs[qid];
278 if (vq->vq_soft_ih == NULL)
279 continue;
280
281 softint_disestablish(vq->vq_soft_ih);
282 vq->vq_soft_ih = NULL;
283 }
284 }
285
286 /*
287 * Can be used as sc_intrhand.
288 */
289 /*
290 * Scan vq, bus_dmamap_sync for the vqs (not for the payload),
291 * and calls (*vq_done)() if some entries are consumed.
292 */
293 static int
294 virtio_vq_intr_common(struct virtqueue *vq)
295 {
296 struct virtio_softc *sc = vq->vq_owner;
297 int r = 0;
298
299 if (vq->vq_queued) {
300 vq->vq_queued = 0;
301 vq_sync_aring(sc, vq, BUS_DMASYNC_POSTWRITE);
302 }
303 vq_sync_uring(sc, vq, BUS_DMASYNC_POSTREAD);
304 membar_consumer();
305 if (vq->vq_used_idx != vq->vq_used->idx) {
306 if (vq->vq_done)
307 r |= (vq->vq_done)(vq);
308 }
309
310 return r;
311 }
312
313 int
314 virtio_vq_intr(struct virtio_softc *sc)
315 {
316 struct virtqueue *vq;
317 int i, r = 0;
318
319 for (i = 0; i < sc->sc_nvqs; i++) {
320 vq = &sc->sc_vqs[i];
321 r |= virtio_vq_intr_common(vq);
322 }
323
324 return r;
325 }
326
327 static int
328 virtio_vq_mq_intr(struct virtqueue *vq)
329 {
330
331 return virtio_vq_intr_common(vq);
332 }
333
334 /*
335 * Start/stop vq interrupt. No guarantee.
336 */
337 void
338 virtio_stop_vq_intr(struct virtio_softc *sc, struct virtqueue *vq)
339 {
340 vq->vq_avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
341 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
342 vq->vq_queued++;
343 }
344
345 void
346 virtio_start_vq_intr(struct virtio_softc *sc, struct virtqueue *vq)
347 {
348 vq->vq_avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
349 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
350 vq->vq_queued++;
351 }
352
353 /*
354 * Initialize vq structure.
355 */
356 static void
357 virtio_init_vq(struct virtio_softc *sc, struct virtqueue *vq,
358 const bool reinit)
359 {
360 int i, j;
361 int vq_size = vq->vq_num;
362
363 memset(vq->vq_vaddr, 0, vq->vq_bytesize);
364
365 /* build the indirect descriptor chain */
366 if (vq->vq_indirect != NULL) {
367 struct vring_desc *vd;
368
369 for (i = 0; i < vq_size; i++) {
370 vd = vq->vq_indirect;
371 vd += vq->vq_maxnsegs * i;
372 for (j = 0; j < vq->vq_maxnsegs-1; j++) {
373 vd[j].next = j + 1;
374 }
375 }
376 }
377
378 /* free slot management */
379 SIMPLEQ_INIT(&vq->vq_freelist);
380 for (i = 0; i < vq_size; i++) {
381 SIMPLEQ_INSERT_TAIL(&vq->vq_freelist,
382 &vq->vq_entries[i], qe_list);
383 vq->vq_entries[i].qe_index = i;
384 }
385 if (!reinit)
386 mutex_init(&vq->vq_freelist_lock, MUTEX_SPIN, sc->sc_ipl);
387
388 /* enqueue/dequeue status */
389 vq->vq_avail_idx = 0;
390 vq->vq_used_idx = 0;
391 vq->vq_queued = 0;
392 if (!reinit) {
393 mutex_init(&vq->vq_aring_lock, MUTEX_SPIN, sc->sc_ipl);
394 mutex_init(&vq->vq_uring_lock, MUTEX_SPIN, sc->sc_ipl);
395 }
396 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
397 vq_sync_uring(sc, vq, BUS_DMASYNC_PREREAD);
398 vq->vq_queued++;
399 }
400
401 /*
402 * Allocate/free a vq.
403 */
404 int
405 virtio_alloc_vq(struct virtio_softc *sc, struct virtqueue *vq, int index,
406 int maxsegsize, int maxnsegs, const char *name)
407 {
408 int vq_size, allocsize1, allocsize2, allocsize3, allocsize = 0;
409 int rsegs, r;
410 #define VIRTQUEUE_ALIGN(n) (((n)+(VIRTIO_PAGE_SIZE-1))& \
411 ~(VIRTIO_PAGE_SIZE-1))
412
413 /* Make sure callers allocate vqs in order */
414 KASSERT(sc->sc_nvqs == index);
415
416 memset(vq, 0, sizeof(*vq));
417
418 vq_size = sc->sc_ops->read_queue_size(sc, index);
419 if (vq_size == 0) {
420 aprint_error_dev(sc->sc_dev,
421 "virtqueue not exist, index %d for %s\n",
422 index, name);
423 goto err;
424 }
425 /* allocsize1: descriptor table + avail ring + pad */
426 allocsize1 = VIRTQUEUE_ALIGN(sizeof(struct vring_desc)*vq_size
427 + sizeof(uint16_t)*(2+vq_size));
428 /* allocsize2: used ring + pad */
429 allocsize2 = VIRTQUEUE_ALIGN(sizeof(uint16_t)*2
430 + sizeof(struct vring_used_elem)*vq_size);
431 /* allocsize3: indirect table */
432 if (sc->sc_indirect && maxnsegs >= MINSEG_INDIRECT)
433 allocsize3 = sizeof(struct vring_desc) * maxnsegs * vq_size;
434 else
435 allocsize3 = 0;
436 allocsize = allocsize1 + allocsize2 + allocsize3;
437
438 /* alloc and map the memory */
439 r = bus_dmamem_alloc(sc->sc_dmat, allocsize, VIRTIO_PAGE_SIZE, 0,
440 &vq->vq_segs[0], 1, &rsegs, BUS_DMA_NOWAIT);
441 if (r != 0) {
442 aprint_error_dev(sc->sc_dev,
443 "virtqueue %d for %s allocation failed, "
444 "error code %d\n", index, name, r);
445 goto err;
446 }
447 r = bus_dmamem_map(sc->sc_dmat, &vq->vq_segs[0], 1, allocsize,
448 &vq->vq_vaddr, BUS_DMA_NOWAIT);
449 if (r != 0) {
450 aprint_error_dev(sc->sc_dev,
451 "virtqueue %d for %s map failed, "
452 "error code %d\n", index, name, r);
453 goto err;
454 }
455 r = bus_dmamap_create(sc->sc_dmat, allocsize, 1, allocsize, 0,
456 BUS_DMA_NOWAIT, &vq->vq_dmamap);
457 if (r != 0) {
458 aprint_error_dev(sc->sc_dev,
459 "virtqueue %d for %s dmamap creation failed, "
460 "error code %d\n", index, name, r);
461 goto err;
462 }
463 r = bus_dmamap_load(sc->sc_dmat, vq->vq_dmamap,
464 vq->vq_vaddr, allocsize, NULL, BUS_DMA_NOWAIT);
465 if (r != 0) {
466 aprint_error_dev(sc->sc_dev,
467 "virtqueue %d for %s dmamap load failed, "
468 "error code %d\n", index, name, r);
469 goto err;
470 }
471
472 /* set the vq address */
473 sc->sc_ops->setup_queue(sc, index,
474 vq->vq_dmamap->dm_segs[0].ds_addr / VIRTIO_PAGE_SIZE);
475
476 /* remember addresses and offsets for later use */
477 vq->vq_owner = sc;
478 vq->vq_intrhand = virtio_vq_mq_intr;
479 vq->vq_num = vq_size;
480 vq->vq_index = index;
481 vq->vq_desc = vq->vq_vaddr;
482 vq->vq_availoffset = sizeof(struct vring_desc)*vq_size;
483 vq->vq_avail = (void*)(((char*)vq->vq_desc) + vq->vq_availoffset);
484 vq->vq_usedoffset = allocsize1;
485 vq->vq_used = (void*)(((char*)vq->vq_desc) + vq->vq_usedoffset);
486 if (allocsize3 > 0) {
487 vq->vq_indirectoffset = allocsize1 + allocsize2;
488 vq->vq_indirect = (void*)(((char*)vq->vq_desc)
489 + vq->vq_indirectoffset);
490 }
491 vq->vq_bytesize = allocsize;
492 vq->vq_maxsegsize = maxsegsize;
493 vq->vq_maxnsegs = maxnsegs;
494
495 /* free slot management */
496 vq->vq_entries = kmem_zalloc(sizeof(struct vq_entry)*vq_size,
497 KM_SLEEP);
498 virtio_init_vq(sc, vq, false);
499
500 aprint_verbose_dev(sc->sc_dev,
501 "allocated %u byte for virtqueue %d for %s, "
502 "size %d\n", allocsize, index, name, vq_size);
503 if (allocsize3 > 0)
504 aprint_verbose_dev(sc->sc_dev,
505 "using %d byte (%d entries) "
506 "indirect descriptors\n",
507 allocsize3, maxnsegs * vq_size);
508
509 sc->sc_nvqs++;
510
511 return 0;
512
513 err:
514 sc->sc_ops->setup_queue(sc, index, 0);
515 if (vq->vq_dmamap)
516 bus_dmamap_destroy(sc->sc_dmat, vq->vq_dmamap);
517 if (vq->vq_vaddr)
518 bus_dmamem_unmap(sc->sc_dmat, vq->vq_vaddr, allocsize);
519 if (vq->vq_segs[0].ds_addr)
520 bus_dmamem_free(sc->sc_dmat, &vq->vq_segs[0], 1);
521 memset(vq, 0, sizeof(*vq));
522
523 return -1;
524 }
525
526 int
527 virtio_free_vq(struct virtio_softc *sc, struct virtqueue *vq)
528 {
529 struct vq_entry *qe;
530 int i = 0;
531
532 /* device must be already deactivated */
533 /* confirm the vq is empty */
534 SIMPLEQ_FOREACH(qe, &vq->vq_freelist, qe_list) {
535 i++;
536 }
537 if (i != vq->vq_num) {
538 printf("%s: freeing non-empty vq, index %d\n",
539 device_xname(sc->sc_dev), vq->vq_index);
540 return EBUSY;
541 }
542
543 /* tell device that there's no virtqueue any longer */
544 sc->sc_ops->setup_queue(sc, vq->vq_index, 0);
545
546 kmem_free(vq->vq_entries, sizeof(*vq->vq_entries) * vq->vq_num);
547 bus_dmamap_unload(sc->sc_dmat, vq->vq_dmamap);
548 bus_dmamap_destroy(sc->sc_dmat, vq->vq_dmamap);
549 bus_dmamem_unmap(sc->sc_dmat, vq->vq_vaddr, vq->vq_bytesize);
550 bus_dmamem_free(sc->sc_dmat, &vq->vq_segs[0], 1);
551 mutex_destroy(&vq->vq_freelist_lock);
552 mutex_destroy(&vq->vq_uring_lock);
553 mutex_destroy(&vq->vq_aring_lock);
554 memset(vq, 0, sizeof(*vq));
555
556 sc->sc_nvqs--;
557
558 return 0;
559 }
560
561 /*
562 * Free descriptor management.
563 */
564 static struct vq_entry *
565 vq_alloc_entry(struct virtqueue *vq)
566 {
567 struct vq_entry *qe;
568
569 mutex_enter(&vq->vq_freelist_lock);
570 if (SIMPLEQ_EMPTY(&vq->vq_freelist)) {
571 mutex_exit(&vq->vq_freelist_lock);
572 return NULL;
573 }
574 qe = SIMPLEQ_FIRST(&vq->vq_freelist);
575 SIMPLEQ_REMOVE_HEAD(&vq->vq_freelist, qe_list);
576 mutex_exit(&vq->vq_freelist_lock);
577
578 return qe;
579 }
580
581 static void
582 vq_free_entry(struct virtqueue *vq, struct vq_entry *qe)
583 {
584 mutex_enter(&vq->vq_freelist_lock);
585 SIMPLEQ_INSERT_TAIL(&vq->vq_freelist, qe, qe_list);
586 mutex_exit(&vq->vq_freelist_lock);
587
588 return;
589 }
590
591 /*
592 * Enqueue several dmamaps as a single request.
593 */
594 /*
595 * Typical usage:
596 * <queue size> number of followings are stored in arrays
597 * - command blocks (in dmamem) should be pre-allocated and mapped
598 * - dmamaps for command blocks should be pre-allocated and loaded
599 * - dmamaps for payload should be pre-allocated
600 * r = virtio_enqueue_prep(sc, vq, &slot); // allocate a slot
601 * if (r) // currently 0 or EAGAIN
602 * return r;
603 * r = bus_dmamap_load(dmat, dmamap_payload[slot], data, count, ..);
604 * if (r) {
605 * virtio_enqueue_abort(sc, vq, slot);
606 * return r;
607 * }
608 * r = virtio_enqueue_reserve(sc, vq, slot,
609 * dmamap_payload[slot]->dm_nsegs+1);
610 * // ^ +1 for command
611 * if (r) { // currently 0 or EAGAIN
612 * bus_dmamap_unload(dmat, dmamap_payload[slot]);
613 * return r; // do not call abort()
614 * }
615 * <setup and prepare commands>
616 * bus_dmamap_sync(dmat, dmamap_cmd[slot],... BUS_DMASYNC_PREWRITE);
617 * bus_dmamap_sync(dmat, dmamap_payload[slot],...);
618 * virtio_enqueue(sc, vq, slot, dmamap_cmd[slot], false);
619 * virtio_enqueue(sc, vq, slot, dmamap_payload[slot], iswrite);
620 * virtio_enqueue_commit(sc, vq, slot, true);
621 */
622
623 /*
624 * enqueue_prep: allocate a slot number
625 */
626 int
627 virtio_enqueue_prep(struct virtio_softc *sc, struct virtqueue *vq, int *slotp)
628 {
629 struct vq_entry *qe1;
630
631 KASSERT(slotp != NULL);
632
633 qe1 = vq_alloc_entry(vq);
634 if (qe1 == NULL)
635 return EAGAIN;
636 /* next slot is not allocated yet */
637 qe1->qe_next = -1;
638 *slotp = qe1->qe_index;
639
640 return 0;
641 }
642
643 /*
644 * enqueue_reserve: allocate remaining slots and build the descriptor chain.
645 */
646 int
647 virtio_enqueue_reserve(struct virtio_softc *sc, struct virtqueue *vq,
648 int slot, int nsegs)
649 {
650 int indirect;
651 struct vq_entry *qe1 = &vq->vq_entries[slot];
652
653 KASSERT(qe1->qe_next == -1);
654 KASSERT(1 <= nsegs && nsegs <= vq->vq_num);
655
656 if ((vq->vq_indirect != NULL) &&
657 (nsegs >= MINSEG_INDIRECT) &&
658 (nsegs <= vq->vq_maxnsegs))
659 indirect = 1;
660 else
661 indirect = 0;
662 qe1->qe_indirect = indirect;
663
664 if (indirect) {
665 struct vring_desc *vd;
666 int i;
667
668 vd = &vq->vq_desc[qe1->qe_index];
669 vd->addr = vq->vq_dmamap->dm_segs[0].ds_addr
670 + vq->vq_indirectoffset;
671 vd->addr += sizeof(struct vring_desc)
672 * vq->vq_maxnsegs * qe1->qe_index;
673 vd->len = sizeof(struct vring_desc) * nsegs;
674 vd->flags = VRING_DESC_F_INDIRECT;
675
676 vd = vq->vq_indirect;
677 vd += vq->vq_maxnsegs * qe1->qe_index;
678 qe1->qe_desc_base = vd;
679
680 for (i = 0; i < nsegs-1; i++) {
681 vd[i].flags = VRING_DESC_F_NEXT;
682 }
683 vd[i].flags = 0;
684 qe1->qe_next = 0;
685
686 return 0;
687 } else {
688 struct vring_desc *vd;
689 struct vq_entry *qe;
690 int i, s;
691
692 vd = &vq->vq_desc[0];
693 qe1->qe_desc_base = vd;
694 qe1->qe_next = qe1->qe_index;
695 s = slot;
696 for (i = 0; i < nsegs - 1; i++) {
697 qe = vq_alloc_entry(vq);
698 if (qe == NULL) {
699 vd[s].flags = 0;
700 virtio_enqueue_abort(sc, vq, slot);
701 return EAGAIN;
702 }
703 vd[s].flags = VRING_DESC_F_NEXT;
704 vd[s].next = qe->qe_index;
705 s = qe->qe_index;
706 }
707 vd[s].flags = 0;
708
709 return 0;
710 }
711 }
712
713 /*
714 * enqueue: enqueue a single dmamap.
715 */
716 int
717 virtio_enqueue(struct virtio_softc *sc, struct virtqueue *vq, int slot,
718 bus_dmamap_t dmamap, bool write)
719 {
720 struct vq_entry *qe1 = &vq->vq_entries[slot];
721 struct vring_desc *vd = qe1->qe_desc_base;
722 int i;
723 int s = qe1->qe_next;
724
725 KASSERT(s >= 0);
726 KASSERT(dmamap->dm_nsegs > 0);
727
728 for (i = 0; i < dmamap->dm_nsegs; i++) {
729 vd[s].addr = dmamap->dm_segs[i].ds_addr;
730 vd[s].len = dmamap->dm_segs[i].ds_len;
731 if (!write)
732 vd[s].flags |= VRING_DESC_F_WRITE;
733 s = vd[s].next;
734 }
735 qe1->qe_next = s;
736
737 return 0;
738 }
739
740 int
741 virtio_enqueue_p(struct virtio_softc *sc, struct virtqueue *vq, int slot,
742 bus_dmamap_t dmamap, bus_addr_t start, bus_size_t len,
743 bool write)
744 {
745 struct vq_entry *qe1 = &vq->vq_entries[slot];
746 struct vring_desc *vd = qe1->qe_desc_base;
747 int s = qe1->qe_next;
748
749 KASSERT(s >= 0);
750 KASSERT(dmamap->dm_nsegs == 1); /* XXX */
751 KASSERT((dmamap->dm_segs[0].ds_len > start) &&
752 (dmamap->dm_segs[0].ds_len >= start + len));
753
754 vd[s].addr = dmamap->dm_segs[0].ds_addr + start;
755 vd[s].len = len;
756 if (!write)
757 vd[s].flags |= VRING_DESC_F_WRITE;
758 qe1->qe_next = vd[s].next;
759
760 return 0;
761 }
762
763 /*
764 * enqueue_commit: add it to the aring.
765 */
766 int
767 virtio_enqueue_commit(struct virtio_softc *sc, struct virtqueue *vq, int slot,
768 bool notifynow)
769 {
770 struct vq_entry *qe1;
771
772 if (slot < 0) {
773 mutex_enter(&vq->vq_aring_lock);
774 goto notify;
775 }
776 vq_sync_descs(sc, vq, BUS_DMASYNC_PREWRITE);
777 qe1 = &vq->vq_entries[slot];
778 if (qe1->qe_indirect)
779 vq_sync_indirect(sc, vq, slot, BUS_DMASYNC_PREWRITE);
780 mutex_enter(&vq->vq_aring_lock);
781 vq->vq_avail->ring[(vq->vq_avail_idx++) % vq->vq_num] = slot;
782
783 notify:
784 if (notifynow) {
785 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
786 vq_sync_uring(sc, vq, BUS_DMASYNC_PREREAD);
787 membar_producer();
788 vq->vq_avail->idx = vq->vq_avail_idx;
789 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
790 membar_producer();
791 vq->vq_queued++;
792 vq_sync_uring(sc, vq, BUS_DMASYNC_POSTREAD);
793 membar_consumer();
794 if (!(vq->vq_used->flags & VRING_USED_F_NO_NOTIFY))
795 sc->sc_ops->kick(sc, vq->vq_index);
796 }
797 mutex_exit(&vq->vq_aring_lock);
798
799 return 0;
800 }
801
802 /*
803 * enqueue_abort: rollback.
804 */
805 int
806 virtio_enqueue_abort(struct virtio_softc *sc, struct virtqueue *vq, int slot)
807 {
808 struct vq_entry *qe = &vq->vq_entries[slot];
809 struct vring_desc *vd;
810 int s;
811
812 if (qe->qe_next < 0) {
813 vq_free_entry(vq, qe);
814 return 0;
815 }
816
817 s = slot;
818 vd = &vq->vq_desc[0];
819 while (vd[s].flags & VRING_DESC_F_NEXT) {
820 s = vd[s].next;
821 vq_free_entry(vq, qe);
822 qe = &vq->vq_entries[s];
823 }
824 vq_free_entry(vq, qe);
825 return 0;
826 }
827
828 /*
829 * Dequeue a request.
830 */
831 /*
832 * dequeue: dequeue a request from uring; dmamap_sync for uring is
833 * already done in the interrupt handler.
834 */
835 int
836 virtio_dequeue(struct virtio_softc *sc, struct virtqueue *vq,
837 int *slotp, int *lenp)
838 {
839 uint16_t slot, usedidx;
840 struct vq_entry *qe;
841
842 if (vq->vq_used_idx == vq->vq_used->idx)
843 return ENOENT;
844 mutex_enter(&vq->vq_uring_lock);
845 usedidx = vq->vq_used_idx++;
846 mutex_exit(&vq->vq_uring_lock);
847 usedidx %= vq->vq_num;
848 slot = vq->vq_used->ring[usedidx].id;
849 qe = &vq->vq_entries[slot];
850
851 if (qe->qe_indirect)
852 vq_sync_indirect(sc, vq, slot, BUS_DMASYNC_POSTWRITE);
853
854 if (slotp)
855 *slotp = slot;
856 if (lenp)
857 *lenp = vq->vq_used->ring[usedidx].len;
858
859 return 0;
860 }
861
862 /*
863 * dequeue_commit: complete dequeue; the slot is recycled for future use.
864 * if you forget to call this the slot will be leaked.
865 */
866 int
867 virtio_dequeue_commit(struct virtio_softc *sc, struct virtqueue *vq, int slot)
868 {
869 struct vq_entry *qe = &vq->vq_entries[slot];
870 struct vring_desc *vd = &vq->vq_desc[0];
871 int s = slot;
872
873 while (vd[s].flags & VRING_DESC_F_NEXT) {
874 s = vd[s].next;
875 vq_free_entry(vq, qe);
876 qe = &vq->vq_entries[s];
877 }
878 vq_free_entry(vq, qe);
879
880 return 0;
881 }
882
883 /*
884 * Attach a child, fill all the members.
885 */
886 void
887 virtio_child_attach_start(struct virtio_softc *sc, device_t child, int ipl,
888 struct virtqueue *vqs,
889 virtio_callback config_change,
890 virtio_callback intr_hand,
891 int req_flags, int req_features, const char *feat_bits)
892 {
893 char buf[256];
894 int features;
895
896 sc->sc_child = child;
897 sc->sc_ipl = ipl;
898 sc->sc_vqs = vqs;
899 sc->sc_config_change = config_change;
900 sc->sc_intrhand = intr_hand;
901 sc->sc_flags = req_flags;
902
903 features = virtio_negotiate_features(sc, req_features);
904 snprintb(buf, sizeof(buf), feat_bits, features);
905 aprint_normal(": Features: %s\n", buf);
906 aprint_naive("\n");
907 }
908
909 void
910 virtio_child_attach_set_vqs(struct virtio_softc *sc,
911 struct virtqueue *vqs, int nvq_pairs)
912 {
913 if (nvq_pairs > 1)
914 sc->sc_child_mq = true;
915
916 sc->sc_vqs = vqs;
917 }
918
919 int
920 virtio_child_attach_finish(struct virtio_softc *sc)
921 {
922 int r;
923
924 r = sc->sc_ops->setup_interrupts(sc);
925 if (r != 0) {
926 aprint_error_dev(sc->sc_dev, "failed to setup interrupts\n");
927 goto fail;
928 }
929
930 KASSERT(sc->sc_soft_ih == NULL);
931 if (sc->sc_flags & VIRTIO_F_PCI_INTR_SOFTINT) {
932 u_int flags = SOFTINT_NET;
933 if (sc->sc_flags & VIRTIO_F_PCI_INTR_MPSAFE)
934 flags |= SOFTINT_MPSAFE;
935
936 sc->sc_soft_ih = softint_establish(flags, virtio_soft_intr, sc);
937 if (sc->sc_soft_ih == NULL) {
938 sc->sc_ops->free_interrupts(sc);
939 aprint_error_dev(sc->sc_dev,
940 "failed to establish soft interrupt\n");
941 goto fail;
942 }
943
944 if (sc->sc_child_mq) {
945 r = virtio_vq_softint_establish(sc);
946 aprint_error_dev(sc->sc_dev,
947 "failed to establish softint interrupt\n");
948 goto fail;
949 }
950 }
951
952 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK);
953 return 0;
954
955 fail:
956 if (sc->sc_soft_ih) {
957 softint_disestablish(sc->sc_soft_ih);
958 sc->sc_soft_ih = NULL;
959 }
960
961 virtio_vq_softint_disestablish(sc);
962
963 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
964 return 1;
965 }
966
967 void
968 virtio_child_detach(struct virtio_softc *sc)
969 {
970 sc->sc_child = NULL;
971 sc->sc_vqs = NULL;
972
973 virtio_device_reset(sc);
974
975 sc->sc_ops->free_interrupts(sc);
976
977 if (sc->sc_soft_ih) {
978 softint_disestablish(sc->sc_soft_ih);
979 sc->sc_soft_ih = NULL;
980 }
981 }
982
983 void
984 virtio_child_attach_failed(struct virtio_softc *sc)
985 {
986 virtio_child_detach(sc);
987
988 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
989
990 sc->sc_child = VIRTIO_CHILD_FAILED;
991 }
992
993 bus_dma_tag_t
994 virtio_dmat(struct virtio_softc *sc)
995 {
996 return sc->sc_dmat;
997 }
998
999 device_t
1000 virtio_child(struct virtio_softc *sc)
1001 {
1002 return sc->sc_child;
1003 }
1004
1005 int
1006 virtio_intrhand(struct virtio_softc *sc)
1007 {
1008 return (sc->sc_intrhand)(sc);
1009 }
1010
1011 uint32_t
1012 virtio_features(struct virtio_softc *sc)
1013 {
1014 return sc->sc_features;
1015 }
1016
1017 int
1018 virtiobusprint(void *aux, const char *pnp)
1019 {
1020 struct virtio_attach_args * const va = aux;
1021
1022 if (va->sc_childdevid == 0)
1023 return QUIET; /* No device present */
1024
1025 if (pnp)
1026 aprint_normal("Device ID %d at %s", va->sc_childdevid, pnp);
1027
1028 return UNCONF;
1029 }
1030
1031 MODULE(MODULE_CLASS_DRIVER, virtio, NULL);
1032
1033 #ifdef _MODULE
1034 #include "ioconf.c"
1035 #endif
1036
1037 static int
1038 virtio_modcmd(modcmd_t cmd, void *opaque)
1039 {
1040 int error = 0;
1041
1042 #ifdef _MODULE
1043 switch (cmd) {
1044 case MODULE_CMD_INIT:
1045 error = config_init_component(cfdriver_ioconf_virtio,
1046 cfattach_ioconf_virtio, cfdata_ioconf_virtio);
1047 break;
1048 case MODULE_CMD_FINI:
1049 error = config_fini_component(cfdriver_ioconf_virtio,
1050 cfattach_ioconf_virtio, cfdata_ioconf_virtio);
1051 break;
1052 default:
1053 error = ENOTTY;
1054 break;
1055 }
1056 #endif
1057
1058 return error;
1059 }
1060