virtio.c revision 1.28 1 /* $NetBSD: virtio.c,v 1.28 2017/06/01 02:45:11 chs Exp $ */
2
3 /*
4 * Copyright (c) 2010 Minoura Makoto.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: virtio.c,v 1.28 2017/06/01 02:45:11 chs Exp $");
30
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/atomic.h>
35 #include <sys/bus.h>
36 #include <sys/device.h>
37 #include <sys/kmem.h>
38 #include <sys/module.h>
39
40 #include <dev/pci/pcidevs.h>
41 #include <dev/pci/pcireg.h>
42 #include <dev/pci/pcivar.h>
43
44 #define VIRTIO_PRIVATE
45
46 #include <dev/pci/virtioreg.h>
47 #include <dev/pci/virtiovar.h>
48
49 #define MINSEG_INDIRECT 2 /* use indirect if nsegs >= this value */
50
51 static int virtio_match(device_t, cfdata_t, void *);
52 static void virtio_attach(device_t, device_t, void *);
53 static int virtio_rescan(device_t, const char *, const int *);
54 static int virtio_detach(device_t, int);
55 static int virtio_intr(void *arg);
56 static int virtio_msix_queue_intr(void *);
57 static int virtio_msix_config_intr(void *);
58 static int virtio_setup_msix_vectors(struct virtio_softc *);
59 static int virtio_setup_msix_interrupts(struct virtio_softc *,
60 struct pci_attach_args *);
61 static int virtio_setup_intx_interrupt(struct virtio_softc *,
62 struct pci_attach_args *);
63 static int virtio_setup_interrupts(struct virtio_softc *);
64 static void virtio_free_interrupts(struct virtio_softc *);
65 static void virtio_soft_intr(void *arg);
66 static void virtio_init_vq(struct virtio_softc *,
67 struct virtqueue *, const bool);
68
69 CFATTACH_DECL3_NEW(virtio, sizeof(struct virtio_softc),
70 virtio_match, virtio_attach, virtio_detach, NULL, virtio_rescan, NULL,
71 DVF_DETACH_SHUTDOWN);
72
73 /* we use the legacy virtio spec, so the pci registers are host native
74 * byte order, not pci (i.e. LE) byte order */
75 static inline uint16_t
76 nbo_bus_space_read_2(bus_space_tag_t space, bus_space_handle_t handle,
77 bus_size_t offset)
78 {
79 return le16toh(bus_space_read_2(space, handle, offset));
80 }
81
82 static inline uint32_t
83 nbo_bus_space_read_4(bus_space_tag_t space, bus_space_handle_t handle,
84 bus_size_t offset)
85 {
86 return le32toh(bus_space_read_4(space, handle, offset));
87 }
88
89 static void
90 nbo_bus_space_write_2(bus_space_tag_t space, bus_space_handle_t handle,
91 bus_size_t offset, uint16_t value)
92 {
93 bus_space_write_2(space, handle, offset, htole16(value));
94 }
95
96 static void
97 nbo_bus_space_write_4(bus_space_tag_t space, bus_space_handle_t handle,
98 bus_size_t offset, uint32_t value)
99 {
100 bus_space_write_4(space, handle, offset, htole32(value));
101 }
102
103 /* some functions access registers at 4 byte offset for little/high halves */
104 #if BYTE_ORDER == BIG_ENDIAN
105 #define REG_HI_OFF 0
106 #define REG_LO_OFF 4
107 #else
108 #define REG_HI_OFF 4
109 #define REG_LO_OFF 0
110 #endif
111
112 static void
113 virtio_set_status(struct virtio_softc *sc, int status)
114 {
115 int old = 0;
116
117 if (status != 0)
118 old = bus_space_read_1(sc->sc_iot, sc->sc_ioh,
119 VIRTIO_CONFIG_DEVICE_STATUS);
120 bus_space_write_1(sc->sc_iot, sc->sc_ioh, VIRTIO_CONFIG_DEVICE_STATUS,
121 status|old);
122 }
123
124 #define virtio_device_reset(sc) virtio_set_status((sc), 0)
125
126 static int
127 virtio_match(device_t parent, cfdata_t match, void *aux)
128 {
129 struct pci_attach_args *pa;
130
131 pa = (struct pci_attach_args *)aux;
132 switch (PCI_VENDOR(pa->pa_id)) {
133 case PCI_VENDOR_QUMRANET:
134 if ((PCI_PRODUCT_QUMRANET_VIRTIO_1000 <=
135 PCI_PRODUCT(pa->pa_id)) &&
136 (PCI_PRODUCT(pa->pa_id) <=
137 PCI_PRODUCT_QUMRANET_VIRTIO_103F))
138 return 1;
139 break;
140 }
141
142 return 0;
143 }
144
145 static const char *virtio_device_name[] = {
146 "Unknown (0)", /* 0 */
147 "Network", /* 1 */
148 "Block", /* 2 */
149 "Console", /* 3 */
150 "Entropy", /* 4 */
151 "Memory Balloon", /* 5 */
152 "I/O Memory", /* 6 */
153 "Remote Processor Messaging", /* 7 */
154 "SCSI", /* 8 */
155 "9P Transport", /* 9 */
156 "mac80211 wlan", /* 10 */
157 };
158 #define NDEVNAMES __arraycount(virtio_device_name)
159
160 #define VIRTIO_MSIX_CONFIG_VECTOR_INDEX 0
161 #define VIRTIO_MSIX_QUEUE_VECTOR_INDEX 1
162
163 static int
164 virtio_setup_msix_vectors(struct virtio_softc *sc)
165 {
166 int offset, vector, ret, qid;
167
168 offset = VIRTIO_CONFIG_MSI_CONFIG_VECTOR;
169 vector = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
170
171 nbo_bus_space_write_2(sc->sc_iot, sc->sc_ioh, offset, vector);
172 ret = nbo_bus_space_read_2(sc->sc_iot, sc->sc_ioh, offset);
173 aprint_debug_dev(sc->sc_dev, "expected=%d, actual=%d\n",
174 vector, ret);
175 if (ret != vector)
176 return -1;
177
178 for (qid = 0; qid < sc->sc_nvqs; qid++) {
179 offset = VIRTIO_CONFIG_QUEUE_SELECT;
180 nbo_bus_space_write_2(sc->sc_iot, sc->sc_ioh, offset, qid);
181
182 offset = VIRTIO_CONFIG_MSI_QUEUE_VECTOR;
183 vector = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
184
185 nbo_bus_space_write_2(sc->sc_iot, sc->sc_ioh, offset, vector);
186 ret = nbo_bus_space_read_2(sc->sc_iot, sc->sc_ioh, offset);
187 aprint_debug_dev(sc->sc_dev, "expected=%d, actual=%d\n",
188 vector, ret);
189 if (ret != vector)
190 return -1;
191 }
192
193 return 0;
194 }
195
196 static int
197 virtio_setup_msix_interrupts(struct virtio_softc *sc,
198 struct pci_attach_args *pa)
199 {
200 device_t self = sc->sc_dev;
201 pci_chipset_tag_t pc = pa->pa_pc;
202 char intrbuf[PCI_INTRSTR_LEN];
203 char const *intrstr;
204 int idx;
205
206 idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
207 if (sc->sc_flags & VIRTIO_F_PCI_INTR_MPSAFE)
208 pci_intr_setattr(pc, &sc->sc_ihp[idx], PCI_INTR_MPSAFE, true);
209
210 sc->sc_ihs[idx] = pci_intr_establish_xname(pc, sc->sc_ihp[idx],
211 sc->sc_ipl, virtio_msix_config_intr, sc, device_xname(sc->sc_dev));
212 if (sc->sc_ihs[idx] == NULL) {
213 aprint_error_dev(self, "couldn't establish MSI-X for config\n");
214 goto error;
215 }
216
217 idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
218 if (sc->sc_flags & VIRTIO_F_PCI_INTR_MPSAFE)
219 pci_intr_setattr(pc, &sc->sc_ihp[idx], PCI_INTR_MPSAFE, true);
220
221 sc->sc_ihs[idx] = pci_intr_establish_xname(pc, sc->sc_ihp[idx],
222 sc->sc_ipl, virtio_msix_queue_intr, sc, device_xname(sc->sc_dev));
223 if (sc->sc_ihs[idx] == NULL) {
224 aprint_error_dev(self, "couldn't establish MSI-X for queues\n");
225 goto error;
226 }
227
228 if (virtio_setup_msix_vectors(sc) != 0) {
229 aprint_error_dev(self, "couldn't setup MSI-X vectors\n");
230 goto error;
231 }
232
233 idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
234 intrstr = pci_intr_string(pc, sc->sc_ihp[idx], intrbuf, sizeof(intrbuf));
235 aprint_normal_dev(self, "config interrupting at %s\n", intrstr);
236 idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
237 intrstr = pci_intr_string(pc, sc->sc_ihp[idx], intrbuf, sizeof(intrbuf));
238 aprint_normal_dev(self, "queues interrupting at %s\n", intrstr);
239
240 return 0;
241
242 error:
243 idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
244 if (sc->sc_ihs[idx] != NULL)
245 pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[idx]);
246 idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
247 if (sc->sc_ihs[idx] != NULL)
248 pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[idx]);
249
250 return -1;
251 }
252
253 static int
254 virtio_setup_intx_interrupt(struct virtio_softc *sc,
255 struct pci_attach_args *pa)
256 {
257 device_t self = sc->sc_dev;
258 pci_chipset_tag_t pc = pa->pa_pc;
259 char intrbuf[PCI_INTRSTR_LEN];
260 char const *intrstr;
261
262 if (sc->sc_flags & VIRTIO_F_PCI_INTR_MPSAFE)
263 pci_intr_setattr(pc, &sc->sc_ihp[0], PCI_INTR_MPSAFE, true);
264
265 sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_ihp[0],
266 sc->sc_ipl, virtio_intr, sc, device_xname(sc->sc_dev));
267 if (sc->sc_ihs[0] == NULL) {
268 aprint_error_dev(self, "couldn't establish INTx\n");
269 return -1;
270 }
271
272 intrstr = pci_intr_string(pc, sc->sc_ihp[0], intrbuf, sizeof(intrbuf));
273 aprint_normal_dev(self, "interrupting at %s\n", intrstr);
274
275 return 0;
276 }
277
278 static int
279 virtio_setup_interrupts(struct virtio_softc *sc)
280 {
281 device_t self = sc->sc_dev;
282 pci_chipset_tag_t pc = sc->sc_pa.pa_pc;
283 int error;
284 int nmsix;
285 int counts[PCI_INTR_TYPE_SIZE];
286 pci_intr_type_t max_type;
287
288 nmsix = pci_msix_count(sc->sc_pa.pa_pc, sc->sc_pa.pa_tag);
289 aprint_debug_dev(self, "pci_msix_count=%d\n", nmsix);
290
291 /* We need at least two: one for config and the other for queues */
292 if ((sc->sc_flags & VIRTIO_F_PCI_INTR_MSIX) == 0 || nmsix < 2) {
293 /* Try INTx only */
294 max_type = PCI_INTR_TYPE_INTX;
295 counts[PCI_INTR_TYPE_INTX] = 1;
296 } else {
297 /* Try MSI-X first and INTx second */
298 max_type = PCI_INTR_TYPE_MSIX;
299 counts[PCI_INTR_TYPE_MSIX] = 2;
300 counts[PCI_INTR_TYPE_MSI] = 0;
301 counts[PCI_INTR_TYPE_INTX] = 1;
302 }
303
304 retry:
305 error = pci_intr_alloc(&sc->sc_pa, &sc->sc_ihp, counts, max_type);
306 if (error != 0) {
307 aprint_error_dev(self, "couldn't map interrupt\n");
308 return -1;
309 }
310
311 if (pci_intr_type(pc, sc->sc_ihp[0]) == PCI_INTR_TYPE_MSIX) {
312 sc->sc_ihs = kmem_alloc(sizeof(*sc->sc_ihs) * 2,
313 KM_SLEEP);
314
315 error = virtio_setup_msix_interrupts(sc, &sc->sc_pa);
316 if (error != 0) {
317 kmem_free(sc->sc_ihs, sizeof(*sc->sc_ihs) * 2);
318 pci_intr_release(pc, sc->sc_ihp, 2);
319
320 /* Retry INTx */
321 max_type = PCI_INTR_TYPE_INTX;
322 counts[PCI_INTR_TYPE_INTX] = 1;
323 goto retry;
324 }
325
326 sc->sc_ihs_num = 2;
327 sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_MSI;
328 } else if (pci_intr_type(pc, sc->sc_ihp[0]) == PCI_INTR_TYPE_INTX) {
329 sc->sc_ihs = kmem_alloc(sizeof(*sc->sc_ihs) * 1,
330 KM_SLEEP);
331
332 error = virtio_setup_intx_interrupt(sc, &sc->sc_pa);
333 if (error != 0) {
334 kmem_free(sc->sc_ihs, sizeof(*sc->sc_ihs) * 1);
335 pci_intr_release(pc, sc->sc_ihp, 1);
336 return -1;
337 }
338
339 sc->sc_ihs_num = 1;
340 sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
341 }
342
343 KASSERT(sc->sc_soft_ih == NULL);
344 if (sc->sc_flags & VIRTIO_F_PCI_INTR_SOFTINT) {
345 u_int flags = SOFTINT_NET;
346 if (sc->sc_flags & VIRTIO_F_PCI_INTR_MPSAFE)
347 flags |= SOFTINT_MPSAFE;
348
349 sc->sc_soft_ih = softint_establish(flags, virtio_soft_intr, sc);
350 if (sc->sc_soft_ih == NULL) {
351 virtio_free_interrupts(sc);
352 aprint_error_dev(sc->sc_dev,
353 "failed to establish soft interrupt\n");
354 return -1;
355 }
356 }
357
358 return 0;
359 }
360
361 static void
362 virtio_free_interrupts(struct virtio_softc *sc)
363 {
364 for (int i = 0; i < sc->sc_ihs_num; i++) {
365 if (sc->sc_ihs[i] == NULL)
366 continue;
367 pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
368 sc->sc_ihs[i] = NULL;
369 }
370
371 if (sc->sc_ihs_num > 0)
372 pci_intr_release(sc->sc_pc, sc->sc_ihp, sc->sc_ihs_num);
373
374 if (sc->sc_soft_ih) {
375 softint_disestablish(sc->sc_soft_ih);
376 sc->sc_soft_ih = NULL;
377 }
378
379 if (sc->sc_ihs != NULL) {
380 kmem_free(sc->sc_ihs, sizeof(*sc->sc_ihs) * sc->sc_ihs_num);
381 sc->sc_ihs = NULL;
382 }
383 sc->sc_ihs_num = 0;
384 }
385
386 static void
387 virtio_attach(device_t parent, device_t self, void *aux)
388 {
389 struct virtio_softc *sc = device_private(self);
390 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
391 pci_chipset_tag_t pc = pa->pa_pc;
392 pcitag_t tag = pa->pa_tag;
393 int revision;
394 pcireg_t id;
395
396 revision = PCI_REVISION(pa->pa_class);
397 if (revision != 0) {
398 aprint_normal(": unknown revision 0x%02x; giving up\n",
399 revision);
400 return;
401 }
402 aprint_normal("\n");
403 aprint_naive("\n");
404
405 /* subsystem ID shows what I am */
406 id = pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG);
407 aprint_normal_dev(self, "Virtio %s Device (rev. 0x%02x)\n",
408 (PCI_SUBSYS_ID(id) < NDEVNAMES?
409 virtio_device_name[PCI_SUBSYS_ID(id)] : "Unknown"),
410 revision);
411
412 sc->sc_dev = self;
413 sc->sc_pc = pc;
414 sc->sc_tag = tag;
415 sc->sc_iot = pa->pa_iot;
416 if (pci_dma64_available(pa))
417 sc->sc_dmat = pa->pa_dmat64;
418 else
419 sc->sc_dmat = pa->pa_dmat;
420 sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
421
422 if (pci_mapreg_map(pa, PCI_MAPREG_START, PCI_MAPREG_TYPE_IO, 0,
423 &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_iosize)) {
424 aprint_error_dev(self, "can't map i/o space\n");
425 return;
426 }
427
428 virtio_device_reset(sc);
429 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_ACK);
430 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER);
431
432 sc->sc_childdevid = PCI_SUBSYS_ID(id);
433 sc->sc_child = NULL;
434 sc->sc_pa = *pa;
435 virtio_rescan(self, "virtio", 0);
436 return;
437 }
438
439 /* ARGSUSED */
440 static int
441 virtio_rescan(device_t self, const char *attr, const int *scan_flags)
442 {
443 struct virtio_softc *sc;
444 struct virtio_attach_args va;
445
446 sc = device_private(self);
447 if (sc->sc_child) /* Child already attached? */
448 return 0;
449
450 memset(&va, 0, sizeof(va));
451 va.sc_childdevid = sc->sc_childdevid;
452
453 config_found_ia(self, attr, &va, NULL);
454
455 if (sc->sc_child == NULL) {
456 aprint_error_dev(self,
457 "no matching child driver; not configured\n");
458 return 0;
459 }
460
461 if (sc->sc_child == VIRTIO_CHILD_FAILED) {
462 aprint_error_dev(self,
463 "virtio configuration failed\n");
464 return 0;
465 }
466
467 /*
468 * Make sure child drivers initialize interrupts via call
469 * to virtio_child_attach_finish().
470 */
471 KASSERT(sc->sc_ihs_num != 0);
472
473 return 0;
474 }
475
476 static int
477 virtio_detach(device_t self, int flags)
478 {
479 struct virtio_softc *sc = device_private(self);
480 int r;
481
482 if (sc->sc_child != NULL) {
483 r = config_detach(sc->sc_child, flags);
484 if (r)
485 return r;
486 }
487
488 /* Check that child detached properly */
489 KASSERT(sc->sc_child == NULL);
490 KASSERT(sc->sc_vqs == NULL);
491 KASSERT(sc->sc_ihs_num == 0);
492
493 if (sc->sc_iosize)
494 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_iosize);
495 sc->sc_iosize = 0;
496
497 return 0;
498 }
499
500 /*
501 * Reset the device.
502 */
503 /*
504 * To reset the device to a known state, do following:
505 * virtio_reset(sc); // this will stop the device activity
506 * <dequeue finished requests>; // virtio_dequeue() still can be called
507 * <revoke pending requests in the vqs if any>;
508 * virtio_reinit_begin(sc); // dequeue prohibitted
509 * newfeatures = virtio_negotiate_features(sc, requestedfeatures);
510 * <some other initialization>;
511 * virtio_reinit_end(sc); // device activated; enqueue allowed
512 * Once attached, feature negotiation can only be allowed after virtio_reset.
513 */
514 void
515 virtio_reset(struct virtio_softc *sc)
516 {
517 virtio_device_reset(sc);
518 }
519
520 void
521 virtio_reinit_start(struct virtio_softc *sc)
522 {
523 int i;
524
525 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_ACK);
526 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER);
527 for (i = 0; i < sc->sc_nvqs; i++) {
528 int n;
529 struct virtqueue *vq = &sc->sc_vqs[i];
530 nbo_bus_space_write_2(sc->sc_iot, sc->sc_ioh,
531 VIRTIO_CONFIG_QUEUE_SELECT,
532 vq->vq_index);
533 n = nbo_bus_space_read_2(sc->sc_iot, sc->sc_ioh,
534 VIRTIO_CONFIG_QUEUE_SIZE);
535 if (n == 0) /* vq disappeared */
536 continue;
537 if (n != vq->vq_num) {
538 panic("%s: virtqueue size changed, vq index %d\n",
539 device_xname(sc->sc_dev),
540 vq->vq_index);
541 }
542 virtio_init_vq(sc, vq, true);
543 nbo_bus_space_write_4(sc->sc_iot, sc->sc_ioh,
544 VIRTIO_CONFIG_QUEUE_ADDRESS,
545 (vq->vq_dmamap->dm_segs[0].ds_addr
546 / VIRTIO_PAGE_SIZE));
547 }
548
549 /* MSI-X should have more than one handles where INTx has just one */
550 if (sc->sc_ihs_num > 1) {
551 if (virtio_setup_msix_vectors(sc) != 0) {
552 aprint_error_dev(sc->sc_dev,
553 "couldn't setup MSI-X vectors\n");
554 return;
555 }
556 }
557 }
558
559 void
560 virtio_reinit_end(struct virtio_softc *sc)
561 {
562 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK);
563 }
564
565 /*
566 * Feature negotiation.
567 */
568 uint32_t
569 virtio_negotiate_features(struct virtio_softc *sc, uint32_t guest_features)
570 {
571 uint32_t r;
572
573 if (!(device_cfdata(sc->sc_dev)->cf_flags & 1) &&
574 !(device_cfdata(sc->sc_child)->cf_flags & 1)) /* XXX */
575 guest_features |= VIRTIO_F_RING_INDIRECT_DESC;
576 r = nbo_bus_space_read_4(sc->sc_iot, sc->sc_ioh,
577 VIRTIO_CONFIG_DEVICE_FEATURES);
578 r &= guest_features;
579 nbo_bus_space_write_4(sc->sc_iot, sc->sc_ioh,
580 VIRTIO_CONFIG_GUEST_FEATURES, r);
581 sc->sc_features = r;
582 if (r & VIRTIO_F_RING_INDIRECT_DESC)
583 sc->sc_indirect = true;
584 else
585 sc->sc_indirect = false;
586
587 return r;
588 }
589
590 /*
591 * Device configuration registers.
592 */
593 uint8_t
594 virtio_read_device_config_1(struct virtio_softc *sc, int index)
595 {
596 return bus_space_read_1(sc->sc_iot, sc->sc_ioh,
597 sc->sc_config_offset + index);
598 }
599
600 uint16_t
601 virtio_read_device_config_2(struct virtio_softc *sc, int index)
602 {
603 return nbo_bus_space_read_2(sc->sc_iot, sc->sc_ioh,
604 sc->sc_config_offset + index);
605 }
606
607 uint32_t
608 virtio_read_device_config_4(struct virtio_softc *sc, int index)
609 {
610 return nbo_bus_space_read_4(sc->sc_iot, sc->sc_ioh,
611 sc->sc_config_offset + index);
612 }
613
614 uint64_t
615 virtio_read_device_config_8(struct virtio_softc *sc, int index)
616 {
617 uint64_t r;
618
619 r = nbo_bus_space_read_4(sc->sc_iot, sc->sc_ioh,
620 sc->sc_config_offset + index + REG_HI_OFF);
621 r <<= 32;
622 r |= nbo_bus_space_read_4(sc->sc_iot, sc->sc_ioh,
623 sc->sc_config_offset + index + REG_LO_OFF);
624
625 return r;
626 }
627
628 void
629 virtio_write_device_config_1(struct virtio_softc *sc,
630 int index, uint8_t value)
631 {
632 bus_space_write_1(sc->sc_iot, sc->sc_ioh,
633 sc->sc_config_offset + index, value);
634 }
635
636 void
637 virtio_write_device_config_2(struct virtio_softc *sc,
638 int index, uint16_t value)
639 {
640 nbo_bus_space_write_2(sc->sc_iot, sc->sc_ioh,
641 sc->sc_config_offset + index, value);
642 }
643
644 void
645 virtio_write_device_config_4(struct virtio_softc *sc,
646 int index, uint32_t value)
647 {
648 nbo_bus_space_write_4(sc->sc_iot, sc->sc_ioh,
649 sc->sc_config_offset + index, value);
650 }
651
652 void
653 virtio_write_device_config_8(struct virtio_softc *sc,
654 int index, uint64_t value)
655 {
656 nbo_bus_space_write_4(sc->sc_iot, sc->sc_ioh,
657 sc->sc_config_offset + index + REG_LO_OFF,
658 value & 0xffffffff);
659 nbo_bus_space_write_4(sc->sc_iot, sc->sc_ioh,
660 sc->sc_config_offset + index + REG_HI_OFF,
661 value >> 32);
662 }
663
664 /*
665 * Interrupt handler.
666 */
667 static int
668 virtio_intr(void *arg)
669 {
670 struct virtio_softc *sc = arg;
671 int isr, r = 0;
672
673 /* check and ack the interrupt */
674 isr = bus_space_read_1(sc->sc_iot, sc->sc_ioh,
675 VIRTIO_CONFIG_ISR_STATUS);
676 if (isr == 0)
677 return 0;
678 if ((isr & VIRTIO_CONFIG_ISR_CONFIG_CHANGE) &&
679 (sc->sc_config_change != NULL))
680 r = (sc->sc_config_change)(sc);
681 if (sc->sc_intrhand != NULL) {
682 if (sc->sc_soft_ih != NULL)
683 softint_schedule(sc->sc_soft_ih);
684 else
685 r |= (sc->sc_intrhand)(sc);
686 }
687
688 return r;
689 }
690
691 static int
692 virtio_msix_queue_intr(void *arg)
693 {
694 struct virtio_softc *sc = arg;
695 int r = 0;
696
697 if (sc->sc_intrhand != NULL) {
698 if (sc->sc_soft_ih != NULL)
699 softint_schedule(sc->sc_soft_ih);
700 else
701 r |= (sc->sc_intrhand)(sc);
702 }
703
704 return r;
705 }
706
707 static int
708 virtio_msix_config_intr(void *arg)
709 {
710 struct virtio_softc *sc = arg;
711 int r = 0;
712
713 if (sc->sc_config_change != NULL)
714 r = (sc->sc_config_change)(sc);
715 return r;
716 }
717
718 static void
719 virtio_soft_intr(void *arg)
720 {
721 struct virtio_softc *sc = arg;
722
723 KASSERT(sc->sc_intrhand != NULL);
724
725 (sc->sc_intrhand)(sc);
726 }
727
728 /*
729 * dmamap sync operations for a virtqueue.
730 */
731 static inline void
732 vq_sync_descs(struct virtio_softc *sc, struct virtqueue *vq, int ops)
733 {
734 /* availoffset == sizeof(vring_desc)*vq_num */
735 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap, 0, vq->vq_availoffset,
736 ops);
737 }
738
739 static inline void
740 vq_sync_aring(struct virtio_softc *sc, struct virtqueue *vq, int ops)
741 {
742 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap,
743 vq->vq_availoffset,
744 offsetof(struct vring_avail, ring)
745 + vq->vq_num * sizeof(uint16_t),
746 ops);
747 }
748
749 static inline void
750 vq_sync_uring(struct virtio_softc *sc, struct virtqueue *vq, int ops)
751 {
752 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap,
753 vq->vq_usedoffset,
754 offsetof(struct vring_used, ring)
755 + vq->vq_num * sizeof(struct vring_used_elem),
756 ops);
757 }
758
759 static inline void
760 vq_sync_indirect(struct virtio_softc *sc, struct virtqueue *vq, int slot,
761 int ops)
762 {
763 int offset = vq->vq_indirectoffset
764 + sizeof(struct vring_desc) * vq->vq_maxnsegs * slot;
765
766 bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap,
767 offset, sizeof(struct vring_desc) * vq->vq_maxnsegs,
768 ops);
769 }
770
771 /*
772 * Can be used as sc_intrhand.
773 */
774 /*
775 * Scan vq, bus_dmamap_sync for the vqs (not for the payload),
776 * and calls (*vq_done)() if some entries are consumed.
777 */
778 int
779 virtio_vq_intr(struct virtio_softc *sc)
780 {
781 struct virtqueue *vq;
782 int i, r = 0;
783
784 for (i = 0; i < sc->sc_nvqs; i++) {
785 vq = &sc->sc_vqs[i];
786 if (vq->vq_queued) {
787 vq->vq_queued = 0;
788 vq_sync_aring(sc, vq, BUS_DMASYNC_POSTWRITE);
789 }
790 vq_sync_uring(sc, vq, BUS_DMASYNC_POSTREAD);
791 membar_consumer();
792 if (vq->vq_used_idx != vq->vq_used->idx) {
793 if (vq->vq_done)
794 r |= (vq->vq_done)(vq);
795 }
796 }
797
798 return r;
799 }
800
801 /*
802 * Start/stop vq interrupt. No guarantee.
803 */
804 void
805 virtio_stop_vq_intr(struct virtio_softc *sc, struct virtqueue *vq)
806 {
807 vq->vq_avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
808 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
809 vq->vq_queued++;
810 }
811
812 void
813 virtio_start_vq_intr(struct virtio_softc *sc, struct virtqueue *vq)
814 {
815 vq->vq_avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
816 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
817 vq->vq_queued++;
818 }
819
820 /*
821 * Initialize vq structure.
822 */
823 static void
824 virtio_init_vq(struct virtio_softc *sc, struct virtqueue *vq,
825 const bool reinit)
826 {
827 int i, j;
828 int vq_size = vq->vq_num;
829
830 memset(vq->vq_vaddr, 0, vq->vq_bytesize);
831
832 /* build the indirect descriptor chain */
833 if (vq->vq_indirect != NULL) {
834 struct vring_desc *vd;
835
836 for (i = 0; i < vq_size; i++) {
837 vd = vq->vq_indirect;
838 vd += vq->vq_maxnsegs * i;
839 for (j = 0; j < vq->vq_maxnsegs-1; j++) {
840 vd[j].next = j + 1;
841 }
842 }
843 }
844
845 /* free slot management */
846 SIMPLEQ_INIT(&vq->vq_freelist);
847 for (i = 0; i < vq_size; i++) {
848 SIMPLEQ_INSERT_TAIL(&vq->vq_freelist,
849 &vq->vq_entries[i], qe_list);
850 vq->vq_entries[i].qe_index = i;
851 }
852 if (!reinit)
853 mutex_init(&vq->vq_freelist_lock, MUTEX_SPIN, sc->sc_ipl);
854
855 /* enqueue/dequeue status */
856 vq->vq_avail_idx = 0;
857 vq->vq_used_idx = 0;
858 vq->vq_queued = 0;
859 if (!reinit) {
860 mutex_init(&vq->vq_aring_lock, MUTEX_SPIN, sc->sc_ipl);
861 mutex_init(&vq->vq_uring_lock, MUTEX_SPIN, sc->sc_ipl);
862 }
863 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
864 vq_sync_uring(sc, vq, BUS_DMASYNC_PREREAD);
865 vq->vq_queued++;
866 }
867
868 /*
869 * Allocate/free a vq.
870 */
871 int
872 virtio_alloc_vq(struct virtio_softc *sc, struct virtqueue *vq, int index,
873 int maxsegsize, int maxnsegs, const char *name)
874 {
875 int vq_size, allocsize1, allocsize2, allocsize3, allocsize = 0;
876 int rsegs, r;
877 #define VIRTQUEUE_ALIGN(n) (((n)+(VIRTIO_PAGE_SIZE-1))& \
878 ~(VIRTIO_PAGE_SIZE-1))
879
880 /* Make sure callers allocate vqs in order */
881 KASSERT(sc->sc_nvqs == index);
882
883 memset(vq, 0, sizeof(*vq));
884
885 nbo_bus_space_write_2(sc->sc_iot, sc->sc_ioh,
886 VIRTIO_CONFIG_QUEUE_SELECT, index);
887 vq_size = nbo_bus_space_read_2(sc->sc_iot, sc->sc_ioh,
888 VIRTIO_CONFIG_QUEUE_SIZE);
889 if (vq_size == 0) {
890 aprint_error_dev(sc->sc_dev,
891 "virtqueue not exist, index %d for %s\n",
892 index, name);
893 goto err;
894 }
895 /* allocsize1: descriptor table + avail ring + pad */
896 allocsize1 = VIRTQUEUE_ALIGN(sizeof(struct vring_desc)*vq_size
897 + sizeof(uint16_t)*(2+vq_size));
898 /* allocsize2: used ring + pad */
899 allocsize2 = VIRTQUEUE_ALIGN(sizeof(uint16_t)*2
900 + sizeof(struct vring_used_elem)*vq_size);
901 /* allocsize3: indirect table */
902 if (sc->sc_indirect && maxnsegs >= MINSEG_INDIRECT)
903 allocsize3 = sizeof(struct vring_desc) * maxnsegs * vq_size;
904 else
905 allocsize3 = 0;
906 allocsize = allocsize1 + allocsize2 + allocsize3;
907
908 /* alloc and map the memory */
909 r = bus_dmamem_alloc(sc->sc_dmat, allocsize, VIRTIO_PAGE_SIZE, 0,
910 &vq->vq_segs[0], 1, &rsegs, BUS_DMA_NOWAIT);
911 if (r != 0) {
912 aprint_error_dev(sc->sc_dev,
913 "virtqueue %d for %s allocation failed, "
914 "error code %d\n", index, name, r);
915 goto err;
916 }
917 r = bus_dmamem_map(sc->sc_dmat, &vq->vq_segs[0], 1, allocsize,
918 &vq->vq_vaddr, BUS_DMA_NOWAIT);
919 if (r != 0) {
920 aprint_error_dev(sc->sc_dev,
921 "virtqueue %d for %s map failed, "
922 "error code %d\n", index, name, r);
923 goto err;
924 }
925 r = bus_dmamap_create(sc->sc_dmat, allocsize, 1, allocsize, 0,
926 BUS_DMA_NOWAIT, &vq->vq_dmamap);
927 if (r != 0) {
928 aprint_error_dev(sc->sc_dev,
929 "virtqueue %d for %s dmamap creation failed, "
930 "error code %d\n", index, name, r);
931 goto err;
932 }
933 r = bus_dmamap_load(sc->sc_dmat, vq->vq_dmamap,
934 vq->vq_vaddr, allocsize, NULL, BUS_DMA_NOWAIT);
935 if (r != 0) {
936 aprint_error_dev(sc->sc_dev,
937 "virtqueue %d for %s dmamap load failed, "
938 "error code %d\n", index, name, r);
939 goto err;
940 }
941
942 /* set the vq address */
943 nbo_bus_space_write_4(sc->sc_iot, sc->sc_ioh,
944 VIRTIO_CONFIG_QUEUE_ADDRESS,
945 (vq->vq_dmamap->dm_segs[0].ds_addr
946 / VIRTIO_PAGE_SIZE));
947
948 /* remember addresses and offsets for later use */
949 vq->vq_owner = sc;
950 vq->vq_num = vq_size;
951 vq->vq_index = index;
952 vq->vq_desc = vq->vq_vaddr;
953 vq->vq_availoffset = sizeof(struct vring_desc)*vq_size;
954 vq->vq_avail = (void*)(((char*)vq->vq_desc) + vq->vq_availoffset);
955 vq->vq_usedoffset = allocsize1;
956 vq->vq_used = (void*)(((char*)vq->vq_desc) + vq->vq_usedoffset);
957 if (allocsize3 > 0) {
958 vq->vq_indirectoffset = allocsize1 + allocsize2;
959 vq->vq_indirect = (void*)(((char*)vq->vq_desc)
960 + vq->vq_indirectoffset);
961 }
962 vq->vq_bytesize = allocsize;
963 vq->vq_maxsegsize = maxsegsize;
964 vq->vq_maxnsegs = maxnsegs;
965
966 /* free slot management */
967 vq->vq_entries = kmem_zalloc(sizeof(struct vq_entry)*vq_size,
968 KM_NOSLEEP);
969 if (vq->vq_entries == NULL) {
970 r = ENOMEM;
971 goto err;
972 }
973
974 virtio_init_vq(sc, vq, false);
975
976 aprint_verbose_dev(sc->sc_dev,
977 "allocated %u byte for virtqueue %d for %s, "
978 "size %d\n", allocsize, index, name, vq_size);
979 if (allocsize3 > 0)
980 aprint_verbose_dev(sc->sc_dev,
981 "using %d byte (%d entries) "
982 "indirect descriptors\n",
983 allocsize3, maxnsegs * vq_size);
984
985 sc->sc_nvqs++;
986
987 return 0;
988
989 err:
990 nbo_bus_space_write_4(sc->sc_iot, sc->sc_ioh,
991 VIRTIO_CONFIG_QUEUE_ADDRESS, 0);
992 if (vq->vq_dmamap)
993 bus_dmamap_destroy(sc->sc_dmat, vq->vq_dmamap);
994 if (vq->vq_vaddr)
995 bus_dmamem_unmap(sc->sc_dmat, vq->vq_vaddr, allocsize);
996 if (vq->vq_segs[0].ds_addr)
997 bus_dmamem_free(sc->sc_dmat, &vq->vq_segs[0], 1);
998 memset(vq, 0, sizeof(*vq));
999
1000 return -1;
1001 }
1002
1003 int
1004 virtio_free_vq(struct virtio_softc *sc, struct virtqueue *vq)
1005 {
1006 struct vq_entry *qe;
1007 int i = 0;
1008
1009 /* device must be already deactivated */
1010 /* confirm the vq is empty */
1011 SIMPLEQ_FOREACH(qe, &vq->vq_freelist, qe_list) {
1012 i++;
1013 }
1014 if (i != vq->vq_num) {
1015 printf("%s: freeing non-empty vq, index %d\n",
1016 device_xname(sc->sc_dev), vq->vq_index);
1017 return EBUSY;
1018 }
1019
1020 /* tell device that there's no virtqueue any longer */
1021 nbo_bus_space_write_2(sc->sc_iot, sc->sc_ioh,
1022 VIRTIO_CONFIG_QUEUE_SELECT, vq->vq_index);
1023 nbo_bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1024 VIRTIO_CONFIG_QUEUE_ADDRESS, 0);
1025
1026 kmem_free(vq->vq_entries, sizeof(*vq->vq_entries) * vq->vq_num);
1027 bus_dmamap_unload(sc->sc_dmat, vq->vq_dmamap);
1028 bus_dmamap_destroy(sc->sc_dmat, vq->vq_dmamap);
1029 bus_dmamem_unmap(sc->sc_dmat, vq->vq_vaddr, vq->vq_bytesize);
1030 bus_dmamem_free(sc->sc_dmat, &vq->vq_segs[0], 1);
1031 mutex_destroy(&vq->vq_freelist_lock);
1032 mutex_destroy(&vq->vq_uring_lock);
1033 mutex_destroy(&vq->vq_aring_lock);
1034 memset(vq, 0, sizeof(*vq));
1035
1036 sc->sc_nvqs--;
1037
1038 return 0;
1039 }
1040
1041 /*
1042 * Free descriptor management.
1043 */
1044 static struct vq_entry *
1045 vq_alloc_entry(struct virtqueue *vq)
1046 {
1047 struct vq_entry *qe;
1048
1049 mutex_enter(&vq->vq_freelist_lock);
1050 if (SIMPLEQ_EMPTY(&vq->vq_freelist)) {
1051 mutex_exit(&vq->vq_freelist_lock);
1052 return NULL;
1053 }
1054 qe = SIMPLEQ_FIRST(&vq->vq_freelist);
1055 SIMPLEQ_REMOVE_HEAD(&vq->vq_freelist, qe_list);
1056 mutex_exit(&vq->vq_freelist_lock);
1057
1058 return qe;
1059 }
1060
1061 static void
1062 vq_free_entry(struct virtqueue *vq, struct vq_entry *qe)
1063 {
1064 mutex_enter(&vq->vq_freelist_lock);
1065 SIMPLEQ_INSERT_TAIL(&vq->vq_freelist, qe, qe_list);
1066 mutex_exit(&vq->vq_freelist_lock);
1067
1068 return;
1069 }
1070
1071 /*
1072 * Enqueue several dmamaps as a single request.
1073 */
1074 /*
1075 * Typical usage:
1076 * <queue size> number of followings are stored in arrays
1077 * - command blocks (in dmamem) should be pre-allocated and mapped
1078 * - dmamaps for command blocks should be pre-allocated and loaded
1079 * - dmamaps for payload should be pre-allocated
1080 * r = virtio_enqueue_prep(sc, vq, &slot); // allocate a slot
1081 * if (r) // currently 0 or EAGAIN
1082 * return r;
1083 * r = bus_dmamap_load(dmat, dmamap_payload[slot], data, count, ..);
1084 * if (r) {
1085 * virtio_enqueue_abort(sc, vq, slot);
1086 * return r;
1087 * }
1088 * r = virtio_enqueue_reserve(sc, vq, slot,
1089 * dmamap_payload[slot]->dm_nsegs+1);
1090 * // ^ +1 for command
1091 * if (r) { // currently 0 or EAGAIN
1092 * bus_dmamap_unload(dmat, dmamap_payload[slot]);
1093 * return r; // do not call abort()
1094 * }
1095 * <setup and prepare commands>
1096 * bus_dmamap_sync(dmat, dmamap_cmd[slot],... BUS_DMASYNC_PREWRITE);
1097 * bus_dmamap_sync(dmat, dmamap_payload[slot],...);
1098 * virtio_enqueue(sc, vq, slot, dmamap_cmd[slot], false);
1099 * virtio_enqueue(sc, vq, slot, dmamap_payload[slot], iswrite);
1100 * virtio_enqueue_commit(sc, vq, slot, true);
1101 */
1102
1103 /*
1104 * enqueue_prep: allocate a slot number
1105 */
1106 int
1107 virtio_enqueue_prep(struct virtio_softc *sc, struct virtqueue *vq, int *slotp)
1108 {
1109 struct vq_entry *qe1;
1110
1111 KASSERT(slotp != NULL);
1112
1113 qe1 = vq_alloc_entry(vq);
1114 if (qe1 == NULL)
1115 return EAGAIN;
1116 /* next slot is not allocated yet */
1117 qe1->qe_next = -1;
1118 *slotp = qe1->qe_index;
1119
1120 return 0;
1121 }
1122
1123 /*
1124 * enqueue_reserve: allocate remaining slots and build the descriptor chain.
1125 */
1126 int
1127 virtio_enqueue_reserve(struct virtio_softc *sc, struct virtqueue *vq,
1128 int slot, int nsegs)
1129 {
1130 int indirect;
1131 struct vq_entry *qe1 = &vq->vq_entries[slot];
1132
1133 KASSERT(qe1->qe_next == -1);
1134 KASSERT(1 <= nsegs && nsegs <= vq->vq_num);
1135
1136 if ((vq->vq_indirect != NULL) &&
1137 (nsegs >= MINSEG_INDIRECT) &&
1138 (nsegs <= vq->vq_maxnsegs))
1139 indirect = 1;
1140 else
1141 indirect = 0;
1142 qe1->qe_indirect = indirect;
1143
1144 if (indirect) {
1145 struct vring_desc *vd;
1146 int i;
1147
1148 vd = &vq->vq_desc[qe1->qe_index];
1149 vd->addr = vq->vq_dmamap->dm_segs[0].ds_addr
1150 + vq->vq_indirectoffset;
1151 vd->addr += sizeof(struct vring_desc)
1152 * vq->vq_maxnsegs * qe1->qe_index;
1153 vd->len = sizeof(struct vring_desc) * nsegs;
1154 vd->flags = VRING_DESC_F_INDIRECT;
1155
1156 vd = vq->vq_indirect;
1157 vd += vq->vq_maxnsegs * qe1->qe_index;
1158 qe1->qe_desc_base = vd;
1159
1160 for (i = 0; i < nsegs-1; i++) {
1161 vd[i].flags = VRING_DESC_F_NEXT;
1162 }
1163 vd[i].flags = 0;
1164 qe1->qe_next = 0;
1165
1166 return 0;
1167 } else {
1168 struct vring_desc *vd;
1169 struct vq_entry *qe;
1170 int i, s;
1171
1172 vd = &vq->vq_desc[0];
1173 qe1->qe_desc_base = vd;
1174 qe1->qe_next = qe1->qe_index;
1175 s = slot;
1176 for (i = 0; i < nsegs - 1; i++) {
1177 qe = vq_alloc_entry(vq);
1178 if (qe == NULL) {
1179 vd[s].flags = 0;
1180 virtio_enqueue_abort(sc, vq, slot);
1181 return EAGAIN;
1182 }
1183 vd[s].flags = VRING_DESC_F_NEXT;
1184 vd[s].next = qe->qe_index;
1185 s = qe->qe_index;
1186 }
1187 vd[s].flags = 0;
1188
1189 return 0;
1190 }
1191 }
1192
1193 /*
1194 * enqueue: enqueue a single dmamap.
1195 */
1196 int
1197 virtio_enqueue(struct virtio_softc *sc, struct virtqueue *vq, int slot,
1198 bus_dmamap_t dmamap, bool write)
1199 {
1200 struct vq_entry *qe1 = &vq->vq_entries[slot];
1201 struct vring_desc *vd = qe1->qe_desc_base;
1202 int i;
1203 int s = qe1->qe_next;
1204
1205 KASSERT(s >= 0);
1206 KASSERT(dmamap->dm_nsegs > 0);
1207
1208 for (i = 0; i < dmamap->dm_nsegs; i++) {
1209 vd[s].addr = dmamap->dm_segs[i].ds_addr;
1210 vd[s].len = dmamap->dm_segs[i].ds_len;
1211 if (!write)
1212 vd[s].flags |= VRING_DESC_F_WRITE;
1213 s = vd[s].next;
1214 }
1215 qe1->qe_next = s;
1216
1217 return 0;
1218 }
1219
1220 int
1221 virtio_enqueue_p(struct virtio_softc *sc, struct virtqueue *vq, int slot,
1222 bus_dmamap_t dmamap, bus_addr_t start, bus_size_t len,
1223 bool write)
1224 {
1225 struct vq_entry *qe1 = &vq->vq_entries[slot];
1226 struct vring_desc *vd = qe1->qe_desc_base;
1227 int s = qe1->qe_next;
1228
1229 KASSERT(s >= 0);
1230 KASSERT(dmamap->dm_nsegs == 1); /* XXX */
1231 KASSERT((dmamap->dm_segs[0].ds_len > start) &&
1232 (dmamap->dm_segs[0].ds_len >= start + len));
1233
1234 vd[s].addr = dmamap->dm_segs[0].ds_addr + start;
1235 vd[s].len = len;
1236 if (!write)
1237 vd[s].flags |= VRING_DESC_F_WRITE;
1238 qe1->qe_next = vd[s].next;
1239
1240 return 0;
1241 }
1242
1243 /*
1244 * enqueue_commit: add it to the aring.
1245 */
1246 int
1247 virtio_enqueue_commit(struct virtio_softc *sc, struct virtqueue *vq, int slot,
1248 bool notifynow)
1249 {
1250 struct vq_entry *qe1;
1251
1252 if (slot < 0) {
1253 mutex_enter(&vq->vq_aring_lock);
1254 goto notify;
1255 }
1256 vq_sync_descs(sc, vq, BUS_DMASYNC_PREWRITE);
1257 qe1 = &vq->vq_entries[slot];
1258 if (qe1->qe_indirect)
1259 vq_sync_indirect(sc, vq, slot, BUS_DMASYNC_PREWRITE);
1260 mutex_enter(&vq->vq_aring_lock);
1261 vq->vq_avail->ring[(vq->vq_avail_idx++) % vq->vq_num] = slot;
1262
1263 notify:
1264 if (notifynow) {
1265 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
1266 vq_sync_uring(sc, vq, BUS_DMASYNC_PREREAD);
1267 membar_producer();
1268 vq->vq_avail->idx = vq->vq_avail_idx;
1269 vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
1270 membar_producer();
1271 vq->vq_queued++;
1272 vq_sync_uring(sc, vq, BUS_DMASYNC_POSTREAD);
1273 membar_consumer();
1274 if (!(vq->vq_used->flags & VRING_USED_F_NO_NOTIFY))
1275 nbo_bus_space_write_2(sc->sc_iot, sc->sc_ioh,
1276 VIRTIO_CONFIG_QUEUE_NOTIFY,
1277 vq->vq_index);
1278 }
1279 mutex_exit(&vq->vq_aring_lock);
1280
1281 return 0;
1282 }
1283
1284 /*
1285 * enqueue_abort: rollback.
1286 */
1287 int
1288 virtio_enqueue_abort(struct virtio_softc *sc, struct virtqueue *vq, int slot)
1289 {
1290 struct vq_entry *qe = &vq->vq_entries[slot];
1291 struct vring_desc *vd;
1292 int s;
1293
1294 if (qe->qe_next < 0) {
1295 vq_free_entry(vq, qe);
1296 return 0;
1297 }
1298
1299 s = slot;
1300 vd = &vq->vq_desc[0];
1301 while (vd[s].flags & VRING_DESC_F_NEXT) {
1302 s = vd[s].next;
1303 vq_free_entry(vq, qe);
1304 qe = &vq->vq_entries[s];
1305 }
1306 vq_free_entry(vq, qe);
1307 return 0;
1308 }
1309
1310 /*
1311 * Dequeue a request.
1312 */
1313 /*
1314 * dequeue: dequeue a request from uring; dmamap_sync for uring is
1315 * already done in the interrupt handler.
1316 */
1317 int
1318 virtio_dequeue(struct virtio_softc *sc, struct virtqueue *vq,
1319 int *slotp, int *lenp)
1320 {
1321 uint16_t slot, usedidx;
1322 struct vq_entry *qe;
1323
1324 if (vq->vq_used_idx == vq->vq_used->idx)
1325 return ENOENT;
1326 mutex_enter(&vq->vq_uring_lock);
1327 usedidx = vq->vq_used_idx++;
1328 mutex_exit(&vq->vq_uring_lock);
1329 usedidx %= vq->vq_num;
1330 slot = vq->vq_used->ring[usedidx].id;
1331 qe = &vq->vq_entries[slot];
1332
1333 if (qe->qe_indirect)
1334 vq_sync_indirect(sc, vq, slot, BUS_DMASYNC_POSTWRITE);
1335
1336 if (slotp)
1337 *slotp = slot;
1338 if (lenp)
1339 *lenp = vq->vq_used->ring[usedidx].len;
1340
1341 return 0;
1342 }
1343
1344 /*
1345 * dequeue_commit: complete dequeue; the slot is recycled for future use.
1346 * if you forget to call this the slot will be leaked.
1347 */
1348 int
1349 virtio_dequeue_commit(struct virtio_softc *sc, struct virtqueue *vq, int slot)
1350 {
1351 struct vq_entry *qe = &vq->vq_entries[slot];
1352 struct vring_desc *vd = &vq->vq_desc[0];
1353 int s = slot;
1354
1355 while (vd[s].flags & VRING_DESC_F_NEXT) {
1356 s = vd[s].next;
1357 vq_free_entry(vq, qe);
1358 qe = &vq->vq_entries[s];
1359 }
1360 vq_free_entry(vq, qe);
1361
1362 return 0;
1363 }
1364
1365 /*
1366 * Attach a child, fill all the members.
1367 */
1368 void
1369 virtio_child_attach_start(struct virtio_softc *sc, device_t child, int ipl,
1370 struct virtqueue *vqs,
1371 virtio_callback config_change,
1372 virtio_callback intr_hand,
1373 int req_flags, int req_features, const char *feat_bits)
1374 {
1375 char buf[256];
1376 int features;
1377
1378 sc->sc_child = child;
1379 sc->sc_ipl = ipl;
1380 sc->sc_vqs = vqs;
1381 sc->sc_config_change = config_change;
1382 sc->sc_intrhand = intr_hand;
1383 sc->sc_flags = req_flags;
1384
1385 features = virtio_negotiate_features(sc, req_features);
1386 snprintb(buf, sizeof(buf), feat_bits, features);
1387 aprint_normal(": Features: %s\n", buf);
1388 aprint_naive("\n");
1389 }
1390
1391 int
1392 virtio_child_attach_finish(struct virtio_softc *sc)
1393 {
1394 int r;
1395
1396 r = virtio_setup_interrupts(sc);
1397 if (r != 0) {
1398 aprint_error_dev(sc->sc_dev, "failed to setup interrupts\n");
1399 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
1400 return 1;
1401 }
1402
1403 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK);
1404
1405 return 0;
1406 }
1407
1408 void
1409 virtio_child_detach(struct virtio_softc *sc)
1410 {
1411 sc->sc_child = NULL;
1412 sc->sc_vqs = NULL;
1413
1414 virtio_device_reset(sc);
1415
1416 virtio_free_interrupts(sc);
1417 }
1418
1419 void
1420 virtio_child_attach_failed(struct virtio_softc *sc)
1421 {
1422 virtio_child_detach(sc);
1423
1424 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
1425
1426 sc->sc_child = VIRTIO_CHILD_FAILED;
1427 }
1428
1429 bus_dma_tag_t
1430 virtio_dmat(struct virtio_softc *sc)
1431 {
1432 return sc->sc_dmat;
1433 }
1434
1435 device_t
1436 virtio_child(struct virtio_softc *sc)
1437 {
1438 return sc->sc_child;
1439 }
1440
1441 int
1442 virtio_intrhand(struct virtio_softc *sc)
1443 {
1444 return (sc->sc_intrhand)(sc);
1445 }
1446
1447 uint32_t
1448 virtio_features(struct virtio_softc *sc)
1449 {
1450 return sc->sc_features;
1451 }
1452
1453 MODULE(MODULE_CLASS_DRIVER, virtio, "pci");
1454
1455 #ifdef _MODULE
1456 #include "ioconf.c"
1457 #endif
1458
1459 static int
1460 virtio_modcmd(modcmd_t cmd, void *opaque)
1461 {
1462 int error = 0;
1463
1464 #ifdef _MODULE
1465 switch (cmd) {
1466 case MODULE_CMD_INIT:
1467 error = config_init_component(cfdriver_ioconf_virtio,
1468 cfattach_ioconf_virtio, cfdata_ioconf_virtio);
1469 break;
1470 case MODULE_CMD_FINI:
1471 error = config_fini_component(cfdriver_ioconf_virtio,
1472 cfattach_ioconf_virtio, cfdata_ioconf_virtio);
1473 break;
1474 default:
1475 error = ENOTTY;
1476 break;
1477 }
1478 #endif
1479
1480 return error;
1481 }
1482