Home | History | Annotate | Line # | Download | only in pci
virtio.c revision 1.16
      1 /*	$NetBSD: virtio.c,v 1.16 2016/07/11 06:14:51 knakahara Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2010 Minoura Makoto.
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  *
     16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     26  */
     27 
     28 #include <sys/cdefs.h>
     29 __KERNEL_RCSID(0, "$NetBSD: virtio.c,v 1.16 2016/07/11 06:14:51 knakahara Exp $");
     30 
     31 #include <sys/param.h>
     32 #include <sys/systm.h>
     33 #include <sys/kernel.h>
     34 #include <sys/atomic.h>
     35 #include <sys/bus.h>
     36 #include <sys/device.h>
     37 #include <sys/kmem.h>
     38 
     39 #include <dev/pci/pcidevs.h>
     40 #include <dev/pci/pcireg.h>
     41 #include <dev/pci/pcivar.h>
     42 
     43 #include <dev/pci/virtioreg.h>
     44 #include <dev/pci/virtiovar.h>
     45 
     46 #define MINSEG_INDIRECT		2 /* use indirect if nsegs >= this value */
     47 
     48 static int	virtio_match(device_t, cfdata_t, void *);
     49 static void	virtio_attach(device_t, device_t, void *);
     50 static int	virtio_detach(device_t, int);
     51 static int	virtio_intr(void *arg);
     52 static int	virtio_msix_queue_intr(void *);
     53 static int	virtio_msix_config_intr(void *);
     54 static int	virtio_setup_msix_vectors(struct virtio_softc *);
     55 static int	virtio_setup_msix_interrupts(struct virtio_softc *,
     56 		    struct pci_attach_args *);
     57 static int	virtio_setup_intx_interrupt(struct virtio_softc *,
     58 		    struct pci_attach_args *);
     59 static int	virtio_setup_interrupts(struct virtio_softc *,
     60 		    struct pci_attach_args *);
     61 static void	virtio_soft_intr(void *arg);
     62 static void	virtio_init_vq(struct virtio_softc *,
     63 		    struct virtqueue *, const bool);
     64 
     65 CFATTACH_DECL3_NEW(virtio, sizeof(struct virtio_softc),
     66     virtio_match, virtio_attach, virtio_detach, NULL, NULL, NULL,
     67     DVF_DETACH_SHUTDOWN);
     68 
     69 static void
     70 virtio_set_status(struct virtio_softc *sc, int status)
     71 {
     72 	int old = 0;
     73 
     74 	if (status != 0)
     75 		old = bus_space_read_1(sc->sc_iot, sc->sc_ioh,
     76 				       VIRTIO_CONFIG_DEVICE_STATUS);
     77 	bus_space_write_1(sc->sc_iot, sc->sc_ioh, VIRTIO_CONFIG_DEVICE_STATUS,
     78 			  status|old);
     79 }
     80 
     81 #define virtio_device_reset(sc)	virtio_set_status((sc), 0)
     82 
     83 static int
     84 virtio_match(device_t parent, cfdata_t match, void *aux)
     85 {
     86 	struct pci_attach_args *pa;
     87 
     88 	pa = (struct pci_attach_args *)aux;
     89 	switch (PCI_VENDOR(pa->pa_id)) {
     90 	case PCI_VENDOR_QUMRANET:
     91 		if ((PCI_PRODUCT_QUMRANET_VIRTIO_1000 <=
     92 		     PCI_PRODUCT(pa->pa_id)) &&
     93 		    (PCI_PRODUCT(pa->pa_id) <=
     94 		     PCI_PRODUCT_QUMRANET_VIRTIO_103F))
     95 			return 1;
     96 		break;
     97 	}
     98 
     99 	return 0;
    100 }
    101 
    102 static const char *virtio_device_name[] = {
    103 	"Unknown (0)",			/* 0 */
    104 	"Network",			/* 1 */
    105 	"Block",			/* 2 */
    106 	"Console",			/* 3 */
    107 	"Entropy",			/* 4 */
    108 	"Memory Balloon",		/* 5 */
    109 	"I/O Memory",			/* 6 */
    110 	"Remote Processor Messaging",	/* 7 */
    111 	"SCSI",				/* 8 */
    112 	"9P Transport",			/* 9 */
    113 	"mac80211 wlan",		/* 10 */
    114 };
    115 #define NDEVNAMES	__arraycount(virtio_device_name)
    116 
    117 #define VIRTIO_MSIX_CONFIG_VECTOR_INDEX	0
    118 #define VIRTIO_MSIX_QUEUE_VECTOR_INDEX	1
    119 
    120 static int
    121 virtio_setup_msix_vectors(struct virtio_softc *sc)
    122 {
    123 	int offset, vector, ret, qid;
    124 
    125 	offset = VIRTIO_CONFIG_MSI_CONFIG_VECTOR;
    126 	vector = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
    127 
    128 	bus_space_write_2(sc->sc_iot, sc->sc_ioh, offset, vector);
    129 	ret = bus_space_read_2(sc->sc_iot, sc->sc_ioh, offset);
    130 	aprint_debug_dev(sc->sc_dev, "expected=%d, actual=%d\n",
    131 	    vector, ret);
    132 	if (ret != vector)
    133 		return -1;
    134 
    135 	for (qid = 0; qid < sc->sc_nvqs; qid++) {
    136 		offset = VIRTIO_CONFIG_QUEUE_SELECT;
    137 		bus_space_write_2(sc->sc_iot, sc->sc_ioh, offset, qid);
    138 
    139 		offset = VIRTIO_CONFIG_MSI_QUEUE_VECTOR;
    140 		vector = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
    141 
    142 		bus_space_write_2(sc->sc_iot, sc->sc_ioh, offset, vector);
    143 		ret = bus_space_read_2(sc->sc_iot, sc->sc_ioh, offset);
    144 		aprint_debug_dev(sc->sc_dev, "expected=%d, actual=%d\n",
    145 		    vector, ret);
    146 		if (ret != vector)
    147 			return -1;
    148 	}
    149 
    150 	return 0;
    151 }
    152 
    153 static int
    154 virtio_setup_msix_interrupts(struct virtio_softc *sc,
    155     struct pci_attach_args *pa)
    156 {
    157 	device_t self = sc->sc_dev;
    158 	pci_chipset_tag_t pc = pa->pa_pc;
    159 	char intrbuf[PCI_INTRSTR_LEN];
    160 	char const *intrstr;
    161 	int idx;
    162 
    163 	idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
    164 	if (sc->sc_flags & VIRTIO_F_PCI_INTR_MPSAFE)
    165 		pci_intr_setattr(pc, &sc->sc_ihp[idx], PCI_INTR_MPSAFE, true);
    166 
    167 	sc->sc_ihs[idx] = pci_intr_establish_xname(pc, sc->sc_ihp[idx], IPL_NET,
    168 	    virtio_msix_config_intr, sc, device_xname(sc->sc_dev));
    169 	if (sc->sc_ihs[idx] == NULL) {
    170 		aprint_error_dev(self, "couldn't establish MSI-X for config\n");
    171 		goto error;
    172 	}
    173 
    174 	idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
    175 	if (sc->sc_flags & VIRTIO_F_PCI_INTR_MPSAFE)
    176 		pci_intr_setattr(pc, &sc->sc_ihp[idx], PCI_INTR_MPSAFE, true);
    177 
    178 	sc->sc_ihs[idx] = pci_intr_establish_xname(pc, sc->sc_ihp[idx], IPL_NET,
    179 	    virtio_msix_queue_intr, sc, device_xname(sc->sc_dev));
    180 	if (sc->sc_ihs[idx] == NULL) {
    181 		aprint_error_dev(self, "couldn't establish MSI-X for queues\n");
    182 		goto error;
    183 	}
    184 
    185 	if (virtio_setup_msix_vectors(sc) != 0) {
    186 		aprint_error_dev(self, "couldn't setup MSI-X vectors\n");
    187 		goto error;
    188 	}
    189 
    190 	idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
    191 	intrstr = pci_intr_string(pc, sc->sc_ihp[idx], intrbuf, sizeof(intrbuf));
    192 	aprint_normal_dev(self, "config interrupting at %s\n", intrstr);
    193 	idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
    194 	intrstr = pci_intr_string(pc, sc->sc_ihp[idx], intrbuf, sizeof(intrbuf));
    195 	aprint_normal_dev(self, "queues interrupting at %s\n", intrstr);
    196 
    197 	return 0;
    198 
    199 error:
    200 	idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
    201 	if (sc->sc_ihs[idx] != NULL)
    202 		pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[idx]);
    203 	idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
    204 	if (sc->sc_ihs[idx] != NULL)
    205 		pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[idx]);
    206 
    207 	return -1;
    208 }
    209 
    210 static int
    211 virtio_setup_intx_interrupt(struct virtio_softc *sc,
    212     struct pci_attach_args *pa)
    213 {
    214 	device_t self = sc->sc_dev;
    215 	pci_chipset_tag_t pc = pa->pa_pc;
    216 	char intrbuf[PCI_INTRSTR_LEN];
    217 	char const *intrstr;
    218 
    219 	if (sc->sc_flags & VIRTIO_F_PCI_INTR_MPSAFE)
    220 		pci_intr_setattr(pc, &sc->sc_ihp[0], PCI_INTR_MPSAFE, true);
    221 
    222 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_ihp[0],
    223 	    IPL_NET, virtio_intr, sc, device_xname(sc->sc_dev));
    224 	if (sc->sc_ihs[0] == NULL) {
    225 		aprint_error_dev(self, "couldn't establish INTx\n");
    226 		return -1;
    227 	}
    228 
    229 	intrstr = pci_intr_string(pc, sc->sc_ihp[0], intrbuf, sizeof(intrbuf));
    230 	aprint_normal_dev(self, "interrupting at %s\n", intrstr);
    231 
    232 	return 0;
    233 }
    234 
    235 static int
    236 virtio_setup_interrupts(struct virtio_softc *sc, struct pci_attach_args *pa)
    237 {
    238 	device_t self = sc->sc_dev;
    239 	pci_chipset_tag_t pc = pa->pa_pc;
    240 	int error;
    241 	int nmsix;
    242 	int counts[PCI_INTR_TYPE_SIZE];
    243 	pci_intr_type_t max_type;
    244 
    245 	nmsix = pci_msix_count(pa->pa_pc, pa->pa_tag);
    246 	aprint_debug_dev(self, "pci_msix_count=%d\n", nmsix);
    247 
    248 	/* We need at least two: one for config and the other for queues */
    249 	if ((sc->sc_flags & VIRTIO_F_PCI_INTR_MSIX) == 0 || nmsix < 2) {
    250 		/* Try INTx only */
    251 		max_type = PCI_INTR_TYPE_INTX;
    252 		counts[PCI_INTR_TYPE_INTX] = 1;
    253 	} else {
    254 		/* Try MSI-X first and INTx second */
    255 		max_type = PCI_INTR_TYPE_MSIX;
    256 		counts[PCI_INTR_TYPE_MSIX] = 2;
    257 		counts[PCI_INTR_TYPE_MSI] = 0;
    258 		counts[PCI_INTR_TYPE_INTX] = 1;
    259 	}
    260 
    261  retry:
    262 	error = pci_intr_alloc(pa, &sc->sc_ihp, counts, max_type);
    263 	if (error != 0) {
    264 		aprint_error_dev(self, "couldn't map interrupt\n");
    265 		return -1;
    266 	}
    267 
    268 	if (pci_intr_type(pc, sc->sc_ihp[0]) == PCI_INTR_TYPE_MSIX) {
    269 		sc->sc_ihs = kmem_alloc(sizeof(*sc->sc_ihs) * 2,
    270 		    KM_SLEEP);
    271 		if (sc->sc_ihs == NULL) {
    272 			pci_intr_release(pc, sc->sc_ihp, 2);
    273 
    274 			/* Retry INTx */
    275 			max_type = PCI_INTR_TYPE_INTX;
    276 			counts[PCI_INTR_TYPE_INTX] = 1;
    277 			goto retry;
    278 		}
    279 
    280 		error = virtio_setup_msix_interrupts(sc, pa);
    281 		if (error != 0) {
    282 			kmem_free(sc->sc_ihs, sizeof(*sc->sc_ihs) * 2);
    283 			pci_intr_release(pc, sc->sc_ihp, 2);
    284 
    285 			/* Retry INTx */
    286 			max_type = PCI_INTR_TYPE_INTX;
    287 			counts[PCI_INTR_TYPE_INTX] = 1;
    288 			goto retry;
    289 		}
    290 
    291 		sc->sc_ihs_num = 2;
    292 		sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_MSI;
    293 	} else if (pci_intr_type(pc, sc->sc_ihp[0]) == PCI_INTR_TYPE_INTX) {
    294 		sc->sc_ihs = kmem_alloc(sizeof(*sc->sc_ihs) * 1,
    295 		    KM_SLEEP);
    296 		if (sc->sc_ihs == NULL) {
    297 			pci_intr_release(pc, sc->sc_ihp, 1);
    298 			return -1;
    299 		}
    300 
    301 		error = virtio_setup_intx_interrupt(sc, pa);
    302 		if (error != 0) {
    303 			kmem_free(sc->sc_ihs, sizeof(*sc->sc_ihs) * 1);
    304 			pci_intr_release(pc, sc->sc_ihp, 1);
    305 			return -1;
    306 		}
    307 
    308 		sc->sc_ihs_num = 1;
    309 		sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
    310 	}
    311 
    312 	return 0;
    313 }
    314 
    315 static void
    316 virtio_attach(device_t parent, device_t self, void *aux)
    317 {
    318 	struct virtio_softc *sc = device_private(self);
    319 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
    320 	pci_chipset_tag_t pc = pa->pa_pc;
    321 	pcitag_t tag = pa->pa_tag;
    322 	int revision;
    323 	pcireg_t id;
    324 	int r;
    325 
    326 	revision = PCI_REVISION(pa->pa_class);
    327 	if (revision != 0) {
    328 		aprint_normal(": unknown revision 0x%02x; giving up\n",
    329 			      revision);
    330 		return;
    331 	}
    332 	aprint_normal("\n");
    333 	aprint_naive("\n");
    334 
    335 	/* subsystem ID shows what I am */
    336 	id = pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG);
    337 	aprint_normal_dev(self, "Virtio %s Device (rev. 0x%02x)\n",
    338 			  (PCI_SUBSYS_ID(id) < NDEVNAMES?
    339 			   virtio_device_name[PCI_SUBSYS_ID(id)] : "Unknown"),
    340 			  revision);
    341 
    342 	sc->sc_dev = self;
    343 	sc->sc_pc = pc;
    344 	sc->sc_tag = tag;
    345 	sc->sc_iot = pa->pa_iot;
    346 	if (pci_dma64_available(pa))
    347 		sc->sc_dmat = pa->pa_dmat64;
    348 	else
    349 		sc->sc_dmat = pa->pa_dmat;
    350 	sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
    351 
    352 	if (pci_mapreg_map(pa, PCI_MAPREG_START, PCI_MAPREG_TYPE_IO, 0,
    353 			   &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_iosize)) {
    354 		aprint_error_dev(self, "can't map i/o space\n");
    355 		return;
    356 	}
    357 
    358 	virtio_device_reset(sc);
    359 	virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_ACK);
    360 	virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER);
    361 
    362 	/* XXX: use softc as aux... */
    363 	sc->sc_childdevid = PCI_SUBSYS_ID(id);
    364 	sc->sc_child = NULL;
    365 	config_found(self, sc, NULL);
    366 	if (sc->sc_child == NULL) {
    367 		aprint_error_dev(self,
    368 				 "no matching child driver; not configured\n");
    369 		return;
    370 	}
    371 	if (sc->sc_child == (void*)1) { /* this shows error */
    372 		aprint_error_dev(self,
    373 				 "virtio configuration failed\n");
    374 		virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
    375 		return;
    376 	}
    377 
    378 	r = virtio_setup_interrupts(sc, pa);
    379 	if (r != 0) {
    380 		aprint_error_dev(self, "failed to setup interrupts\n");
    381 		virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
    382 		return;
    383 	}
    384 
    385 	sc->sc_soft_ih = NULL;
    386 	if (sc->sc_flags & VIRTIO_F_PCI_INTR_SOFTINT) {
    387 		u_int flags = SOFTINT_NET;
    388 		if (sc->sc_flags & VIRTIO_F_PCI_INTR_MPSAFE)
    389 			flags |= SOFTINT_MPSAFE;
    390 
    391 		sc->sc_soft_ih = softint_establish(flags, virtio_soft_intr, sc);
    392 		if (sc->sc_soft_ih == NULL)
    393 			aprint_error(": failed to establish soft interrupt\n");
    394 	}
    395 
    396 	virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK);
    397 
    398 	return;
    399 }
    400 
    401 static int
    402 virtio_detach(device_t self, int flags)
    403 {
    404 	struct virtio_softc *sc = device_private(self);
    405 	int r;
    406 	int i;
    407 
    408 	if (sc->sc_child != 0 && sc->sc_child != (void*)1) {
    409 		r = config_detach(sc->sc_child, flags);
    410 		if (r)
    411 			return r;
    412 	}
    413 	KASSERT(sc->sc_child == 0 || sc->sc_child == (void*)1);
    414 	KASSERT(sc->sc_vqs == 0);
    415 	for (i = 0; i < sc->sc_ihs_num; i++) {
    416 		if (sc->sc_ihs[i] == NULL)
    417 			continue;
    418 		pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
    419 	}
    420 	pci_intr_release(sc->sc_pc, sc->sc_ihp, sc->sc_ihs_num);
    421 	kmem_free(sc->sc_ihs, sizeof(*sc->sc_ihs) * sc->sc_ihs_num);
    422 	sc->sc_ihs_num = 0;
    423 	if (sc->sc_iosize)
    424 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_iosize);
    425 	sc->sc_iosize = 0;
    426 
    427 	return 0;
    428 }
    429 
    430 /*
    431  * Reset the device.
    432  */
    433 /*
    434  * To reset the device to a known state, do following:
    435  *	virtio_reset(sc);	     // this will stop the device activity
    436  *	<dequeue finished requests>; // virtio_dequeue() still can be called
    437  *	<revoke pending requests in the vqs if any>;
    438  *	virtio_reinit_begin(sc);     // dequeue prohibitted
    439  *	newfeatures = virtio_negotiate_features(sc, requestedfeatures);
    440  *	<some other initialization>;
    441  *	virtio_reinit_end(sc);	     // device activated; enqueue allowed
    442  * Once attached, feature negotiation can only be allowed after virtio_reset.
    443  */
    444 void
    445 virtio_reset(struct virtio_softc *sc)
    446 {
    447 	virtio_device_reset(sc);
    448 }
    449 
    450 void
    451 virtio_reinit_start(struct virtio_softc *sc)
    452 {
    453 	int i;
    454 
    455 	virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_ACK);
    456 	virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER);
    457 	for (i = 0; i < sc->sc_nvqs; i++) {
    458 		int n;
    459 		struct virtqueue *vq = &sc->sc_vqs[i];
    460 		bus_space_write_2(sc->sc_iot, sc->sc_ioh,
    461 				  VIRTIO_CONFIG_QUEUE_SELECT,
    462 				  vq->vq_index);
    463 		n = bus_space_read_2(sc->sc_iot, sc->sc_ioh,
    464 				     VIRTIO_CONFIG_QUEUE_SIZE);
    465 		if (n == 0)	/* vq disappeared */
    466 			continue;
    467 		if (n != vq->vq_num) {
    468 			panic("%s: virtqueue size changed, vq index %d\n",
    469 			      device_xname(sc->sc_dev),
    470 			      vq->vq_index);
    471 		}
    472 		virtio_init_vq(sc, vq, true);
    473 		bus_space_write_4(sc->sc_iot, sc->sc_ioh,
    474 				  VIRTIO_CONFIG_QUEUE_ADDRESS,
    475 				  (vq->vq_dmamap->dm_segs[0].ds_addr
    476 				   / VIRTIO_PAGE_SIZE));
    477 	}
    478 
    479 	/* MSI-X should have more than one handles where INTx has just one */
    480 	if (sc->sc_ihs_num > 1) {
    481 		if (virtio_setup_msix_vectors(sc) != 0) {
    482 			aprint_error_dev(sc->sc_dev,
    483 			    "couldn't setup MSI-X vectors\n");
    484 			return;
    485 		}
    486 	}
    487 }
    488 
    489 void
    490 virtio_reinit_end(struct virtio_softc *sc)
    491 {
    492 	virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK);
    493 }
    494 
    495 /*
    496  * Feature negotiation.
    497  */
    498 uint32_t
    499 virtio_negotiate_features(struct virtio_softc *sc, uint32_t guest_features)
    500 {
    501 	uint32_t r;
    502 
    503 	if (!(device_cfdata(sc->sc_dev)->cf_flags & 1) &&
    504 	    !(device_cfdata(sc->sc_child)->cf_flags & 1)) /* XXX */
    505 		guest_features |= VIRTIO_F_RING_INDIRECT_DESC;
    506 	r = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
    507 			     VIRTIO_CONFIG_DEVICE_FEATURES);
    508 	r &= guest_features;
    509 	bus_space_write_4(sc->sc_iot, sc->sc_ioh,
    510 			  VIRTIO_CONFIG_GUEST_FEATURES, r);
    511 	sc->sc_features = r;
    512 	if (r & VIRTIO_F_RING_INDIRECT_DESC)
    513 		sc->sc_indirect = true;
    514 	else
    515 		sc->sc_indirect = false;
    516 
    517 	return r;
    518 }
    519 
    520 /*
    521  * Device configuration registers.
    522  */
    523 uint8_t
    524 virtio_read_device_config_1(struct virtio_softc *sc, int index)
    525 {
    526 	return bus_space_read_1(sc->sc_iot, sc->sc_ioh,
    527 				sc->sc_config_offset + index);
    528 }
    529 
    530 uint16_t
    531 virtio_read_device_config_2(struct virtio_softc *sc, int index)
    532 {
    533 	return bus_space_read_2(sc->sc_iot, sc->sc_ioh,
    534 				sc->sc_config_offset + index);
    535 }
    536 
    537 uint32_t
    538 virtio_read_device_config_4(struct virtio_softc *sc, int index)
    539 {
    540 	return bus_space_read_4(sc->sc_iot, sc->sc_ioh,
    541 				sc->sc_config_offset + index);
    542 }
    543 
    544 uint64_t
    545 virtio_read_device_config_8(struct virtio_softc *sc, int index)
    546 {
    547 	uint64_t r;
    548 
    549 	r = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
    550 			     sc->sc_config_offset + index + sizeof(uint32_t));
    551 	r <<= 32;
    552 	r += bus_space_read_4(sc->sc_iot, sc->sc_ioh,
    553 			      sc->sc_config_offset + index);
    554 	return r;
    555 }
    556 
    557 void
    558 virtio_write_device_config_1(struct virtio_softc *sc,
    559 			     int index, uint8_t value)
    560 {
    561 	bus_space_write_1(sc->sc_iot, sc->sc_ioh,
    562 			  sc->sc_config_offset + index, value);
    563 }
    564 
    565 void
    566 virtio_write_device_config_2(struct virtio_softc *sc,
    567 			     int index, uint16_t value)
    568 {
    569 	bus_space_write_2(sc->sc_iot, sc->sc_ioh,
    570 			  sc->sc_config_offset + index, value);
    571 }
    572 
    573 void
    574 virtio_write_device_config_4(struct virtio_softc *sc,
    575 			     int index, uint32_t value)
    576 {
    577 	bus_space_write_4(sc->sc_iot, sc->sc_ioh,
    578 			  sc->sc_config_offset + index, value);
    579 }
    580 
    581 void
    582 virtio_write_device_config_8(struct virtio_softc *sc,
    583 			     int index, uint64_t value)
    584 {
    585 	bus_space_write_4(sc->sc_iot, sc->sc_ioh,
    586 			  sc->sc_config_offset + index,
    587 			  value & 0xffffffff);
    588 	bus_space_write_4(sc->sc_iot, sc->sc_ioh,
    589 			  sc->sc_config_offset + index + sizeof(uint32_t),
    590 			  value >> 32);
    591 }
    592 
    593 /*
    594  * Interrupt handler.
    595  */
    596 static int
    597 virtio_intr(void *arg)
    598 {
    599 	struct virtio_softc *sc = arg;
    600 	int isr, r = 0;
    601 
    602 	/* check and ack the interrupt */
    603 	isr = bus_space_read_1(sc->sc_iot, sc->sc_ioh,
    604 			       VIRTIO_CONFIG_ISR_STATUS);
    605 	if (isr == 0)
    606 		return 0;
    607 	if ((isr & VIRTIO_CONFIG_ISR_CONFIG_CHANGE) &&
    608 	    (sc->sc_config_change != NULL))
    609 		r = (sc->sc_config_change)(sc);
    610 	if (sc->sc_intrhand != NULL) {
    611 		if (sc->sc_soft_ih != NULL)
    612 			softint_schedule(sc->sc_soft_ih);
    613 		else
    614 			r |= (sc->sc_intrhand)(sc);
    615 	}
    616 
    617 	return r;
    618 }
    619 
    620 static int
    621 virtio_msix_queue_intr(void *arg)
    622 {
    623 	struct virtio_softc *sc = arg;
    624 	int r = 0;
    625 
    626 	if (sc->sc_intrhand != NULL) {
    627 		if (sc->sc_soft_ih != NULL)
    628 			softint_schedule(sc->sc_soft_ih);
    629 		else
    630 			r |= (sc->sc_intrhand)(sc);
    631 	}
    632 
    633 	return r;
    634 }
    635 
    636 static int
    637 virtio_msix_config_intr(void *arg)
    638 {
    639 	struct virtio_softc *sc = arg;
    640 
    641 	/* TODO: handle events */
    642 	aprint_debug_dev(sc->sc_dev, "%s\n", __func__);
    643 	return 1;
    644 }
    645 
    646 static void
    647 virtio_soft_intr(void *arg)
    648 {
    649 	struct virtio_softc *sc = arg;
    650 
    651 	KASSERT(sc->sc_intrhand != NULL);
    652 
    653 	(sc->sc_intrhand)(sc);
    654 }
    655 
    656 /*
    657  * dmamap sync operations for a virtqueue.
    658  */
    659 static inline void
    660 vq_sync_descs(struct virtio_softc *sc, struct virtqueue *vq, int ops)
    661 {
    662 	/* availoffset == sizeof(vring_desc)*vq_num */
    663 	bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap, 0, vq->vq_availoffset,
    664 			ops);
    665 }
    666 
    667 static inline void
    668 vq_sync_aring(struct virtio_softc *sc, struct virtqueue *vq, int ops)
    669 {
    670 	bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap,
    671 			vq->vq_availoffset,
    672 			offsetof(struct vring_avail, ring)
    673 			 + vq->vq_num * sizeof(uint16_t),
    674 			ops);
    675 }
    676 
    677 static inline void
    678 vq_sync_uring(struct virtio_softc *sc, struct virtqueue *vq, int ops)
    679 {
    680 	bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap,
    681 			vq->vq_usedoffset,
    682 			offsetof(struct vring_used, ring)
    683 			 + vq->vq_num * sizeof(struct vring_used_elem),
    684 			ops);
    685 }
    686 
    687 static inline void
    688 vq_sync_indirect(struct virtio_softc *sc, struct virtqueue *vq, int slot,
    689 		     int ops)
    690 {
    691 	int offset = vq->vq_indirectoffset
    692 		      + sizeof(struct vring_desc) * vq->vq_maxnsegs * slot;
    693 
    694 	bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap,
    695 			offset, sizeof(struct vring_desc) * vq->vq_maxnsegs,
    696 			ops);
    697 }
    698 
    699 /*
    700  * Can be used as sc_intrhand.
    701  */
    702 /*
    703  * Scan vq, bus_dmamap_sync for the vqs (not for the payload),
    704  * and calls (*vq_done)() if some entries are consumed.
    705  */
    706 int
    707 virtio_vq_intr(struct virtio_softc *sc)
    708 {
    709 	struct virtqueue *vq;
    710 	int i, r = 0;
    711 
    712 	for (i = 0; i < sc->sc_nvqs; i++) {
    713 		vq = &sc->sc_vqs[i];
    714 		if (vq->vq_queued) {
    715 			vq->vq_queued = 0;
    716 			vq_sync_aring(sc, vq, BUS_DMASYNC_POSTWRITE);
    717 		}
    718 		vq_sync_uring(sc, vq, BUS_DMASYNC_POSTREAD);
    719 		membar_consumer();
    720 		if (vq->vq_used_idx != vq->vq_used->idx) {
    721 			if (vq->vq_done)
    722 				r |= (vq->vq_done)(vq);
    723 		}
    724 	}
    725 
    726 	return r;
    727 }
    728 
    729 /*
    730  * Start/stop vq interrupt.  No guarantee.
    731  */
    732 void
    733 virtio_stop_vq_intr(struct virtio_softc *sc, struct virtqueue *vq)
    734 {
    735 	vq->vq_avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
    736 	vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
    737 	vq->vq_queued++;
    738 }
    739 
    740 void
    741 virtio_start_vq_intr(struct virtio_softc *sc, struct virtqueue *vq)
    742 {
    743 	vq->vq_avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
    744 	vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
    745 	vq->vq_queued++;
    746 }
    747 
    748 /*
    749  * Initialize vq structure.
    750  */
    751 static void
    752 virtio_init_vq(struct virtio_softc *sc, struct virtqueue *vq,
    753     const bool reinit)
    754 {
    755 	int i, j;
    756 	int vq_size = vq->vq_num;
    757 
    758 	memset(vq->vq_vaddr, 0, vq->vq_bytesize);
    759 
    760 	/* build the indirect descriptor chain */
    761 	if (vq->vq_indirect != NULL) {
    762 		struct vring_desc *vd;
    763 
    764 		for (i = 0; i < vq_size; i++) {
    765 			vd = vq->vq_indirect;
    766 			vd += vq->vq_maxnsegs * i;
    767 			for (j = 0; j < vq->vq_maxnsegs-1; j++)
    768 				vd[j].next = j + 1;
    769 		}
    770 	}
    771 
    772 	/* free slot management */
    773 	SIMPLEQ_INIT(&vq->vq_freelist);
    774 	for (i = 0; i < vq_size; i++) {
    775 		SIMPLEQ_INSERT_TAIL(&vq->vq_freelist,
    776 				    &vq->vq_entries[i], qe_list);
    777 		vq->vq_entries[i].qe_index = i;
    778 	}
    779 	if (!reinit)
    780 		mutex_init(&vq->vq_freelist_lock, MUTEX_SPIN, sc->sc_ipl);
    781 
    782 	/* enqueue/dequeue status */
    783 	vq->vq_avail_idx = 0;
    784 	vq->vq_used_idx = 0;
    785 	vq->vq_queued = 0;
    786 	if (!reinit) {
    787 		mutex_init(&vq->vq_aring_lock, MUTEX_SPIN, sc->sc_ipl);
    788 		mutex_init(&vq->vq_uring_lock, MUTEX_SPIN, sc->sc_ipl);
    789 	}
    790 	vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
    791 	vq_sync_uring(sc, vq, BUS_DMASYNC_PREREAD);
    792 	vq->vq_queued++;
    793 }
    794 
    795 /*
    796  * Allocate/free a vq.
    797  */
    798 int
    799 virtio_alloc_vq(struct virtio_softc *sc, struct virtqueue *vq, int index,
    800     int maxsegsize, int maxnsegs, const char *name)
    801 {
    802 	int vq_size, allocsize1, allocsize2, allocsize3, allocsize = 0;
    803 	int rsegs, r;
    804 #define VIRTQUEUE_ALIGN(n)	(((n)+(VIRTIO_PAGE_SIZE-1))&	\
    805 				 ~(VIRTIO_PAGE_SIZE-1))
    806 
    807 	memset(vq, 0, sizeof(*vq));
    808 
    809 	bus_space_write_2(sc->sc_iot, sc->sc_ioh,
    810 			  VIRTIO_CONFIG_QUEUE_SELECT, index);
    811 	vq_size = bus_space_read_2(sc->sc_iot, sc->sc_ioh,
    812 				   VIRTIO_CONFIG_QUEUE_SIZE);
    813 	if (vq_size == 0) {
    814 		aprint_error_dev(sc->sc_dev,
    815 				 "virtqueue not exist, index %d for %s\n",
    816 				 index, name);
    817 		goto err;
    818 	}
    819 	/* allocsize1: descriptor table + avail ring + pad */
    820 	allocsize1 = VIRTQUEUE_ALIGN(sizeof(struct vring_desc)*vq_size
    821 				     + sizeof(uint16_t)*(2+vq_size));
    822 	/* allocsize2: used ring + pad */
    823 	allocsize2 = VIRTQUEUE_ALIGN(sizeof(uint16_t)*2
    824 				     + sizeof(struct vring_used_elem)*vq_size);
    825 	/* allocsize3: indirect table */
    826 	if (sc->sc_indirect && maxnsegs >= MINSEG_INDIRECT)
    827 		allocsize3 = sizeof(struct vring_desc) * maxnsegs * vq_size;
    828 	else
    829 		allocsize3 = 0;
    830 	allocsize = allocsize1 + allocsize2 + allocsize3;
    831 
    832 	/* alloc and map the memory */
    833 	r = bus_dmamem_alloc(sc->sc_dmat, allocsize, VIRTIO_PAGE_SIZE, 0,
    834 			     &vq->vq_segs[0], 1, &rsegs, BUS_DMA_NOWAIT);
    835 	if (r != 0) {
    836 		aprint_error_dev(sc->sc_dev,
    837 				 "virtqueue %d for %s allocation failed, "
    838 				 "error code %d\n", index, name, r);
    839 		goto err;
    840 	}
    841 	r = bus_dmamem_map(sc->sc_dmat, &vq->vq_segs[0], 1, allocsize,
    842 			   &vq->vq_vaddr, BUS_DMA_NOWAIT);
    843 	if (r != 0) {
    844 		aprint_error_dev(sc->sc_dev,
    845 				 "virtqueue %d for %s map failed, "
    846 				 "error code %d\n", index, name, r);
    847 		goto err;
    848 	}
    849 	r = bus_dmamap_create(sc->sc_dmat, allocsize, 1, allocsize, 0,
    850 			      BUS_DMA_NOWAIT, &vq->vq_dmamap);
    851 	if (r != 0) {
    852 		aprint_error_dev(sc->sc_dev,
    853 				 "virtqueue %d for %s dmamap creation failed, "
    854 				 "error code %d\n", index, name, r);
    855 		goto err;
    856 	}
    857 	r = bus_dmamap_load(sc->sc_dmat, vq->vq_dmamap,
    858 			    vq->vq_vaddr, allocsize, NULL, BUS_DMA_NOWAIT);
    859 	if (r != 0) {
    860 		aprint_error_dev(sc->sc_dev,
    861 				 "virtqueue %d for %s dmamap load failed, "
    862 				 "error code %d\n", index, name, r);
    863 		goto err;
    864 	}
    865 
    866 	/* set the vq address */
    867 	bus_space_write_4(sc->sc_iot, sc->sc_ioh,
    868 			  VIRTIO_CONFIG_QUEUE_ADDRESS,
    869 			  (vq->vq_dmamap->dm_segs[0].ds_addr
    870 			   / VIRTIO_PAGE_SIZE));
    871 
    872 	/* remember addresses and offsets for later use */
    873 	vq->vq_owner = sc;
    874 	vq->vq_num = vq_size;
    875 	vq->vq_index = index;
    876 	vq->vq_desc = vq->vq_vaddr;
    877 	vq->vq_availoffset = sizeof(struct vring_desc)*vq_size;
    878 	vq->vq_avail = (void*)(((char*)vq->vq_desc) + vq->vq_availoffset);
    879 	vq->vq_usedoffset = allocsize1;
    880 	vq->vq_used = (void*)(((char*)vq->vq_desc) + vq->vq_usedoffset);
    881 	if (allocsize3 > 0) {
    882 		vq->vq_indirectoffset = allocsize1 + allocsize2;
    883 		vq->vq_indirect = (void*)(((char*)vq->vq_desc)
    884 					  + vq->vq_indirectoffset);
    885 	}
    886 	vq->vq_bytesize = allocsize;
    887 	vq->vq_maxsegsize = maxsegsize;
    888 	vq->vq_maxnsegs = maxnsegs;
    889 
    890 	/* free slot management */
    891 	vq->vq_entries = kmem_zalloc(sizeof(struct vq_entry)*vq_size,
    892 				     KM_NOSLEEP);
    893 	if (vq->vq_entries == NULL) {
    894 		r = ENOMEM;
    895 		goto err;
    896 	}
    897 
    898 	virtio_init_vq(sc, vq, false);
    899 
    900 	aprint_verbose_dev(sc->sc_dev,
    901 			   "allocated %u byte for virtqueue %d for %s, "
    902 			   "size %d\n", allocsize, index, name, vq_size);
    903 	if (allocsize3 > 0)
    904 		aprint_verbose_dev(sc->sc_dev,
    905 				   "using %d byte (%d entries) "
    906 				   "indirect descriptors\n",
    907 				   allocsize3, maxnsegs * vq_size);
    908 	return 0;
    909 
    910 err:
    911 	bus_space_write_4(sc->sc_iot, sc->sc_ioh,
    912 			  VIRTIO_CONFIG_QUEUE_ADDRESS, 0);
    913 	if (vq->vq_dmamap)
    914 		bus_dmamap_destroy(sc->sc_dmat, vq->vq_dmamap);
    915 	if (vq->vq_vaddr)
    916 		bus_dmamem_unmap(sc->sc_dmat, vq->vq_vaddr, allocsize);
    917 	if (vq->vq_segs[0].ds_addr)
    918 		bus_dmamem_free(sc->sc_dmat, &vq->vq_segs[0], 1);
    919 	memset(vq, 0, sizeof(*vq));
    920 
    921 	return -1;
    922 }
    923 
    924 int
    925 virtio_free_vq(struct virtio_softc *sc, struct virtqueue *vq)
    926 {
    927 	struct vq_entry *qe;
    928 	int i = 0;
    929 
    930 	/* device must be already deactivated */
    931 	/* confirm the vq is empty */
    932 	SIMPLEQ_FOREACH(qe, &vq->vq_freelist, qe_list) {
    933 		i++;
    934 	}
    935 	if (i != vq->vq_num) {
    936 		printf("%s: freeing non-empty vq, index %d\n",
    937 		       device_xname(sc->sc_dev), vq->vq_index);
    938 		return EBUSY;
    939 	}
    940 
    941 	/* tell device that there's no virtqueue any longer */
    942 	bus_space_write_2(sc->sc_iot, sc->sc_ioh,
    943 			  VIRTIO_CONFIG_QUEUE_SELECT, vq->vq_index);
    944 	bus_space_write_4(sc->sc_iot, sc->sc_ioh,
    945 			  VIRTIO_CONFIG_QUEUE_ADDRESS, 0);
    946 
    947 	kmem_free(vq->vq_entries, sizeof(*vq->vq_entries) * vq->vq_num);
    948 	bus_dmamap_unload(sc->sc_dmat, vq->vq_dmamap);
    949 	bus_dmamap_destroy(sc->sc_dmat, vq->vq_dmamap);
    950 	bus_dmamem_unmap(sc->sc_dmat, vq->vq_vaddr, vq->vq_bytesize);
    951 	bus_dmamem_free(sc->sc_dmat, &vq->vq_segs[0], 1);
    952 	mutex_destroy(&vq->vq_freelist_lock);
    953 	mutex_destroy(&vq->vq_uring_lock);
    954 	mutex_destroy(&vq->vq_aring_lock);
    955 	memset(vq, 0, sizeof(*vq));
    956 
    957 	return 0;
    958 }
    959 
    960 /*
    961  * Free descriptor management.
    962  */
    963 static struct vq_entry *
    964 vq_alloc_entry(struct virtqueue *vq)
    965 {
    966 	struct vq_entry *qe;
    967 
    968 	mutex_enter(&vq->vq_freelist_lock);
    969 	if (SIMPLEQ_EMPTY(&vq->vq_freelist)) {
    970 		mutex_exit(&vq->vq_freelist_lock);
    971 		return NULL;
    972 	}
    973 	qe = SIMPLEQ_FIRST(&vq->vq_freelist);
    974 	SIMPLEQ_REMOVE_HEAD(&vq->vq_freelist, qe_list);
    975 	mutex_exit(&vq->vq_freelist_lock);
    976 
    977 	return qe;
    978 }
    979 
    980 static void
    981 vq_free_entry(struct virtqueue *vq, struct vq_entry *qe)
    982 {
    983 	mutex_enter(&vq->vq_freelist_lock);
    984 	SIMPLEQ_INSERT_TAIL(&vq->vq_freelist, qe, qe_list);
    985 	mutex_exit(&vq->vq_freelist_lock);
    986 
    987 	return;
    988 }
    989 
    990 /*
    991  * Enqueue several dmamaps as a single request.
    992  */
    993 /*
    994  * Typical usage:
    995  *  <queue size> number of followings are stored in arrays
    996  *  - command blocks (in dmamem) should be pre-allocated and mapped
    997  *  - dmamaps for command blocks should be pre-allocated and loaded
    998  *  - dmamaps for payload should be pre-allocated
    999  *      r = virtio_enqueue_prep(sc, vq, &slot);		// allocate a slot
   1000  *	if (r)		// currently 0 or EAGAIN
   1001  *	  return r;
   1002  *	r = bus_dmamap_load(dmat, dmamap_payload[slot], data, count, ..);
   1003  *	if (r) {
   1004  *	  virtio_enqueue_abort(sc, vq, slot);
   1005  *	  bus_dmamap_unload(dmat, dmamap_payload[slot]);
   1006  *	  return r;
   1007  *	}
   1008  *	r = virtio_enqueue_reserve(sc, vq, slot,
   1009  *				   dmamap_payload[slot]->dm_nsegs+1);
   1010  *							// ^ +1 for command
   1011  *	if (r) {	// currently 0 or EAGAIN
   1012  *	  bus_dmamap_unload(dmat, dmamap_payload[slot]);
   1013  *	  return r;					// do not call abort()
   1014  *	}
   1015  *	<setup and prepare commands>
   1016  *	bus_dmamap_sync(dmat, dmamap_cmd[slot],... BUS_DMASYNC_PREWRITE);
   1017  *	bus_dmamap_sync(dmat, dmamap_payload[slot],...);
   1018  *	virtio_enqueue(sc, vq, slot, dmamap_cmd[slot], false);
   1019  *	virtio_enqueue(sc, vq, slot, dmamap_payload[slot], iswrite);
   1020  *	virtio_enqueue_commit(sc, vq, slot, true);
   1021  */
   1022 
   1023 /*
   1024  * enqueue_prep: allocate a slot number
   1025  */
   1026 int
   1027 virtio_enqueue_prep(struct virtio_softc *sc, struct virtqueue *vq, int *slotp)
   1028 {
   1029 	struct vq_entry *qe1;
   1030 
   1031 	KASSERT(slotp != NULL);
   1032 
   1033 	qe1 = vq_alloc_entry(vq);
   1034 	if (qe1 == NULL)
   1035 		return EAGAIN;
   1036 	/* next slot is not allocated yet */
   1037 	qe1->qe_next = -1;
   1038 	*slotp = qe1->qe_index;
   1039 
   1040 	return 0;
   1041 }
   1042 
   1043 /*
   1044  * enqueue_reserve: allocate remaining slots and build the descriptor chain.
   1045  */
   1046 int
   1047 virtio_enqueue_reserve(struct virtio_softc *sc, struct virtqueue *vq,
   1048 		       int slot, int nsegs)
   1049 {
   1050 	int indirect;
   1051 	struct vq_entry *qe1 = &vq->vq_entries[slot];
   1052 
   1053 	KASSERT(qe1->qe_next == -1);
   1054 	KASSERT(1 <= nsegs && nsegs <= vq->vq_num);
   1055 
   1056 	if ((vq->vq_indirect != NULL) &&
   1057 	    (nsegs >= MINSEG_INDIRECT) &&
   1058 	    (nsegs <= vq->vq_maxnsegs))
   1059 		indirect = 1;
   1060 	else
   1061 		indirect = 0;
   1062 	qe1->qe_indirect = indirect;
   1063 
   1064 	if (indirect) {
   1065 		struct vring_desc *vd;
   1066 		int i;
   1067 
   1068 		vd = &vq->vq_desc[qe1->qe_index];
   1069 		vd->addr = vq->vq_dmamap->dm_segs[0].ds_addr
   1070 			+ vq->vq_indirectoffset;
   1071 		vd->addr += sizeof(struct vring_desc)
   1072 			* vq->vq_maxnsegs * qe1->qe_index;
   1073 		vd->len = sizeof(struct vring_desc) * nsegs;
   1074 		vd->flags = VRING_DESC_F_INDIRECT;
   1075 
   1076 		vd = vq->vq_indirect;
   1077 		vd += vq->vq_maxnsegs * qe1->qe_index;
   1078 		qe1->qe_desc_base = vd;
   1079 
   1080 		for (i = 0; i < nsegs-1; i++) {
   1081 			vd[i].flags = VRING_DESC_F_NEXT;
   1082 		}
   1083 		vd[i].flags = 0;
   1084 		qe1->qe_next = 0;
   1085 
   1086 		return 0;
   1087 	} else {
   1088 		struct vring_desc *vd;
   1089 		struct vq_entry *qe;
   1090 		int i, s;
   1091 
   1092 		vd = &vq->vq_desc[0];
   1093 		qe1->qe_desc_base = vd;
   1094 		qe1->qe_next = qe1->qe_index;
   1095 		s = slot;
   1096 		for (i = 0; i < nsegs - 1; i++) {
   1097 			qe = vq_alloc_entry(vq);
   1098 			if (qe == NULL) {
   1099 				vd[s].flags = 0;
   1100 				virtio_enqueue_abort(sc, vq, slot);
   1101 				return EAGAIN;
   1102 			}
   1103 			vd[s].flags = VRING_DESC_F_NEXT;
   1104 			vd[s].next = qe->qe_index;
   1105 			s = qe->qe_index;
   1106 		}
   1107 		vd[s].flags = 0;
   1108 
   1109 		return 0;
   1110 	}
   1111 }
   1112 
   1113 /*
   1114  * enqueue: enqueue a single dmamap.
   1115  */
   1116 int
   1117 virtio_enqueue(struct virtio_softc *sc, struct virtqueue *vq, int slot,
   1118 	       bus_dmamap_t dmamap, bool write)
   1119 {
   1120 	struct vq_entry *qe1 = &vq->vq_entries[slot];
   1121 	struct vring_desc *vd = qe1->qe_desc_base;
   1122 	int i;
   1123 	int s = qe1->qe_next;
   1124 
   1125 	KASSERT(s >= 0);
   1126 	KASSERT(dmamap->dm_nsegs > 0);
   1127 
   1128 	for (i = 0; i < dmamap->dm_nsegs; i++) {
   1129 		vd[s].addr = dmamap->dm_segs[i].ds_addr;
   1130 		vd[s].len = dmamap->dm_segs[i].ds_len;
   1131 		if (!write)
   1132 			vd[s].flags |= VRING_DESC_F_WRITE;
   1133 		s = vd[s].next;
   1134 	}
   1135 	qe1->qe_next = s;
   1136 
   1137 	return 0;
   1138 }
   1139 
   1140 int
   1141 virtio_enqueue_p(struct virtio_softc *sc, struct virtqueue *vq, int slot,
   1142 		 bus_dmamap_t dmamap, bus_addr_t start, bus_size_t len,
   1143 		 bool write)
   1144 {
   1145 	struct vq_entry *qe1 = &vq->vq_entries[slot];
   1146 	struct vring_desc *vd = qe1->qe_desc_base;
   1147 	int s = qe1->qe_next;
   1148 
   1149 	KASSERT(s >= 0);
   1150 	KASSERT(dmamap->dm_nsegs == 1); /* XXX */
   1151 	KASSERT((dmamap->dm_segs[0].ds_len > start) &&
   1152 		(dmamap->dm_segs[0].ds_len >= start + len));
   1153 
   1154 	vd[s].addr = dmamap->dm_segs[0].ds_addr + start;
   1155 	vd[s].len = len;
   1156 	if (!write)
   1157 		vd[s].flags |= VRING_DESC_F_WRITE;
   1158 	qe1->qe_next = vd[s].next;
   1159 
   1160 	return 0;
   1161 }
   1162 
   1163 /*
   1164  * enqueue_commit: add it to the aring.
   1165  */
   1166 int
   1167 virtio_enqueue_commit(struct virtio_softc *sc, struct virtqueue *vq, int slot,
   1168 		      bool notifynow)
   1169 {
   1170 	struct vq_entry *qe1;
   1171 
   1172 	if (slot < 0) {
   1173 		mutex_enter(&vq->vq_aring_lock);
   1174 		goto notify;
   1175 	}
   1176 	vq_sync_descs(sc, vq, BUS_DMASYNC_PREWRITE);
   1177 	qe1 = &vq->vq_entries[slot];
   1178 	if (qe1->qe_indirect)
   1179 		vq_sync_indirect(sc, vq, slot, BUS_DMASYNC_PREWRITE);
   1180 	mutex_enter(&vq->vq_aring_lock);
   1181 	vq->vq_avail->ring[(vq->vq_avail_idx++) % vq->vq_num] = slot;
   1182 
   1183 notify:
   1184 	if (notifynow) {
   1185 		vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
   1186 		vq_sync_uring(sc, vq, BUS_DMASYNC_PREREAD);
   1187 		membar_producer();
   1188 		vq->vq_avail->idx = vq->vq_avail_idx;
   1189 		vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
   1190 		membar_producer();
   1191 		vq->vq_queued++;
   1192 		vq_sync_uring(sc, vq, BUS_DMASYNC_POSTREAD);
   1193 		membar_consumer();
   1194 		if (!(vq->vq_used->flags & VRING_USED_F_NO_NOTIFY))
   1195 			bus_space_write_2(sc->sc_iot, sc->sc_ioh,
   1196 					  VIRTIO_CONFIG_QUEUE_NOTIFY,
   1197 					  vq->vq_index);
   1198 	}
   1199 	mutex_exit(&vq->vq_aring_lock);
   1200 
   1201 	return 0;
   1202 }
   1203 
   1204 /*
   1205  * enqueue_abort: rollback.
   1206  */
   1207 int
   1208 virtio_enqueue_abort(struct virtio_softc *sc, struct virtqueue *vq, int slot)
   1209 {
   1210 	struct vq_entry *qe = &vq->vq_entries[slot];
   1211 	struct vring_desc *vd;
   1212 	int s;
   1213 
   1214 	if (qe->qe_next < 0) {
   1215 		vq_free_entry(vq, qe);
   1216 		return 0;
   1217 	}
   1218 
   1219 	s = slot;
   1220 	vd = &vq->vq_desc[0];
   1221 	while (vd[s].flags & VRING_DESC_F_NEXT) {
   1222 		s = vd[s].next;
   1223 		vq_free_entry(vq, qe);
   1224 		qe = &vq->vq_entries[s];
   1225 	}
   1226 	vq_free_entry(vq, qe);
   1227 	return 0;
   1228 }
   1229 
   1230 /*
   1231  * Dequeue a request.
   1232  */
   1233 /*
   1234  * dequeue: dequeue a request from uring; dmamap_sync for uring is
   1235  *	    already done in the interrupt handler.
   1236  */
   1237 int
   1238 virtio_dequeue(struct virtio_softc *sc, struct virtqueue *vq,
   1239 	       int *slotp, int *lenp)
   1240 {
   1241 	uint16_t slot, usedidx;
   1242 	struct vq_entry *qe;
   1243 
   1244 	if (vq->vq_used_idx == vq->vq_used->idx)
   1245 		return ENOENT;
   1246 	mutex_enter(&vq->vq_uring_lock);
   1247 	usedidx = vq->vq_used_idx++;
   1248 	mutex_exit(&vq->vq_uring_lock);
   1249 	usedidx %= vq->vq_num;
   1250 	slot = vq->vq_used->ring[usedidx].id;
   1251 	qe = &vq->vq_entries[slot];
   1252 
   1253 	if (qe->qe_indirect)
   1254 		vq_sync_indirect(sc, vq, slot, BUS_DMASYNC_POSTWRITE);
   1255 
   1256 	if (slotp)
   1257 		*slotp = slot;
   1258 	if (lenp)
   1259 		*lenp = vq->vq_used->ring[usedidx].len;
   1260 
   1261 	return 0;
   1262 }
   1263 
   1264 /*
   1265  * dequeue_commit: complete dequeue; the slot is recycled for future use.
   1266  *                 if you forget to call this the slot will be leaked.
   1267  */
   1268 int
   1269 virtio_dequeue_commit(struct virtio_softc *sc, struct virtqueue *vq, int slot)
   1270 {
   1271 	struct vq_entry *qe = &vq->vq_entries[slot];
   1272 	struct vring_desc *vd = &vq->vq_desc[0];
   1273 	int s = slot;
   1274 
   1275 	while (vd[s].flags & VRING_DESC_F_NEXT) {
   1276 		s = vd[s].next;
   1277 		vq_free_entry(vq, qe);
   1278 		qe = &vq->vq_entries[s];
   1279 	}
   1280 	vq_free_entry(vq, qe);
   1281 
   1282 	return 0;
   1283 }
   1284