Home | History | Annotate | Line # | Download | only in pci
virtio_pci.c revision 1.31
      1 /* $NetBSD: virtio_pci.c,v 1.31 2021/10/21 05:32:27 yamaguchi Exp $ */
      2 
      3 /*
      4  * Copyright (c) 2020 The NetBSD Foundation, Inc.
      5  * Copyright (c) 2012 Stefan Fritsch.
      6  * Copyright (c) 2010 Minoura Makoto.
      7  * All rights reserved.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  *
     18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     28  */
     29 
     30 #include <sys/cdefs.h>
     31 __KERNEL_RCSID(0, "$NetBSD: virtio_pci.c,v 1.31 2021/10/21 05:32:27 yamaguchi Exp $");
     32 
     33 #include <sys/param.h>
     34 #include <sys/systm.h>
     35 #include <sys/kmem.h>
     36 #include <sys/module.h>
     37 #include <sys/endian.h>
     38 #include <sys/interrupt.h>
     39 
     40 #include <sys/device.h>
     41 
     42 #include <dev/pci/pcidevs.h>
     43 #include <dev/pci/pcireg.h>
     44 #include <dev/pci/pcivar.h>
     45 
     46 #include <dev/pci/virtioreg.h> /* XXX: move to non-pci */
     47 #include <dev/pci/virtio_pcireg.h>
     48 
     49 #define VIRTIO_PRIVATE
     50 #include <dev/pci/virtiovar.h> /* XXX: move to non-pci */
     51 
     52 
     53 static int	virtio_pci_match(device_t, cfdata_t, void *);
     54 static void	virtio_pci_attach(device_t, device_t, void *);
     55 static int	virtio_pci_rescan(device_t, const char *, const int *);
     56 static int	virtio_pci_detach(device_t, int);
     57 
     58 
     59 #define NMAPREG		((PCI_MAPREG_END - PCI_MAPREG_START) / \
     60 				sizeof(pcireg_t))
     61 struct virtio_pci_softc {
     62 	struct virtio_softc	sc_sc;
     63 
     64 	/* IO space */
     65 	bus_space_tag_t		sc_iot;
     66 	bus_space_handle_t	sc_ioh;
     67 	bus_size_t		sc_iosize;
     68 	bus_size_t		sc_mapped_iosize;
     69 
     70 	/* BARs */
     71 	bus_space_tag_t		sc_bars_iot[NMAPREG];
     72 	bus_space_handle_t	sc_bars_ioh[NMAPREG];
     73 	bus_size_t		sc_bars_iosize[NMAPREG];
     74 
     75 	/* notify space */
     76 	bus_space_tag_t		sc_notify_iot;
     77 	bus_space_handle_t	sc_notify_ioh;
     78 	bus_size_t		sc_notify_iosize;
     79 	uint32_t		sc_notify_off_multiplier;
     80 
     81 	/* isr space */
     82 	bus_space_tag_t		sc_isr_iot;
     83 	bus_space_handle_t	sc_isr_ioh;
     84 	bus_size_t		sc_isr_iosize;
     85 
     86 	/* generic */
     87 	struct pci_attach_args	sc_pa;
     88 	pci_intr_handle_t	*sc_ihp;
     89 	void			**sc_ihs;
     90 	int			sc_ihs_num;
     91 	int			sc_devcfg_offset;	/* for 0.9 */
     92 };
     93 
     94 static int	virtio_pci_attach_09(device_t, void *);
     95 static void	virtio_pci_kick_09(struct virtio_softc *, uint16_t);
     96 static uint16_t	virtio_pci_read_queue_size_09(struct virtio_softc *, uint16_t);
     97 static void	virtio_pci_setup_queue_09(struct virtio_softc *, uint16_t, uint64_t);
     98 static void	virtio_pci_set_status_09(struct virtio_softc *, int);
     99 static void	virtio_pci_negotiate_features_09(struct virtio_softc *, uint64_t);
    100 
    101 static int	virtio_pci_attach_10(device_t, void *);
    102 static void	virtio_pci_kick_10(struct virtio_softc *, uint16_t);
    103 static uint16_t	virtio_pci_read_queue_size_10(struct virtio_softc *, uint16_t);
    104 static void	virtio_pci_setup_queue_10(struct virtio_softc *, uint16_t, uint64_t);
    105 static void	virtio_pci_set_status_10(struct virtio_softc *, int);
    106 static void	virtio_pci_negotiate_features_10(struct virtio_softc *, uint64_t);
    107 static int	virtio_pci_find_cap(struct virtio_pci_softc *psc, int cfg_type, void *buf, int buflen);
    108 
    109 static int	virtio_pci_alloc_interrupts(struct virtio_softc *);
    110 static void	virtio_pci_free_interrupts(struct virtio_softc *);
    111 static int	virtio_pci_adjust_config_region(struct virtio_pci_softc *psc);
    112 static int	virtio_pci_intr(void *arg);
    113 static int	virtio_pci_msix_queue_intr(void *);
    114 static int	virtio_pci_msix_config_intr(void *);
    115 static int	virtio_pci_setup_interrupts_09(struct virtio_softc *);
    116 static int	virtio_pci_setup_interrupts_10(struct virtio_softc *);
    117 static int	virtio_pci_establish_msix_interrupts(struct virtio_softc *,
    118 		    struct pci_attach_args *);
    119 static int	virtio_pci_establish_intx_interrupt(struct virtio_softc *,
    120 		    struct pci_attach_args *);
    121 static bool	virtio_pci_msix_enabled(struct virtio_pci_softc *);
    122 
    123 #define VIRTIO_MSIX_CONFIG_VECTOR_INDEX	0
    124 #define VIRTIO_MSIX_QUEUE_VECTOR_INDEX	1
    125 
    126 /*
    127  * When using PCI attached virtio on aarch64-eb under Qemu, the IO space
    128  * suddenly read BIG_ENDIAN where it should stay LITTLE_ENDIAN. The data read
    129  * 1 byte at a time seem OK but reading bigger lengths result in swapped
    130  * endian. This is most notable on reading 8 byters since we can't use
    131  * bus_space_{read,write}_8().
    132  */
    133 
    134 #if defined(__aarch64__) && BYTE_ORDER == BIG_ENDIAN
    135 #	define READ_ENDIAN_09	BIG_ENDIAN	/* should be LITTLE_ENDIAN */
    136 #	define READ_ENDIAN_10	BIG_ENDIAN
    137 #	define STRUCT_ENDIAN_09	BIG_ENDIAN
    138 #	define STRUCT_ENDIAN_10	LITTLE_ENDIAN
    139 #elif BYTE_ORDER == BIG_ENDIAN
    140 #	define READ_ENDIAN_09	LITTLE_ENDIAN
    141 #	define READ_ENDIAN_10	BIG_ENDIAN
    142 #	define STRUCT_ENDIAN_09	BIG_ENDIAN
    143 #	define STRUCT_ENDIAN_10	LITTLE_ENDIAN
    144 #else /* little endian */
    145 #	define READ_ENDIAN_09	LITTLE_ENDIAN
    146 #	define READ_ENDIAN_10	LITTLE_ENDIAN
    147 #	define STRUCT_ENDIAN_09	LITTLE_ENDIAN
    148 #	define STRUCT_ENDIAN_10	LITTLE_ENDIAN
    149 #endif
    150 
    151 
    152 CFATTACH_DECL3_NEW(virtio_pci, sizeof(struct virtio_pci_softc),
    153     virtio_pci_match, virtio_pci_attach, virtio_pci_detach, NULL,
    154     virtio_pci_rescan, NULL, DVF_DETACH_SHUTDOWN);
    155 
    156 static const struct virtio_ops virtio_pci_ops_09 = {
    157 	.kick = virtio_pci_kick_09,
    158 	.read_queue_size = virtio_pci_read_queue_size_09,
    159 	.setup_queue = virtio_pci_setup_queue_09,
    160 	.set_status = virtio_pci_set_status_09,
    161 	.neg_features = virtio_pci_negotiate_features_09,
    162 	.alloc_interrupts = virtio_pci_alloc_interrupts,
    163 	.free_interrupts = virtio_pci_free_interrupts,
    164 	.setup_interrupts = virtio_pci_setup_interrupts_09,
    165 };
    166 
    167 static const struct virtio_ops virtio_pci_ops_10 = {
    168 	.kick = virtio_pci_kick_10,
    169 	.read_queue_size = virtio_pci_read_queue_size_10,
    170 	.setup_queue = virtio_pci_setup_queue_10,
    171 	.set_status = virtio_pci_set_status_10,
    172 	.neg_features = virtio_pci_negotiate_features_10,
    173 	.alloc_interrupts = virtio_pci_alloc_interrupts,
    174 	.free_interrupts = virtio_pci_free_interrupts,
    175 	.setup_interrupts = virtio_pci_setup_interrupts_10,
    176 };
    177 
    178 static int
    179 virtio_pci_match(device_t parent, cfdata_t match, void *aux)
    180 {
    181 	struct pci_attach_args *pa;
    182 
    183 	pa = (struct pci_attach_args *)aux;
    184 	switch (PCI_VENDOR(pa->pa_id)) {
    185 	case PCI_VENDOR_QUMRANET:
    186 		if (((PCI_PRODUCT_QUMRANET_VIRTIO_1000 <=
    187 		      PCI_PRODUCT(pa->pa_id)) &&
    188 		     (PCI_PRODUCT(pa->pa_id) <=
    189 		      PCI_PRODUCT_QUMRANET_VIRTIO_103F)) &&
    190 	              PCI_REVISION(pa->pa_class) == 0)
    191 			return 1;
    192 		if (((PCI_PRODUCT_QUMRANET_VIRTIO_1040 <=
    193 		      PCI_PRODUCT(pa->pa_id)) &&
    194 		     (PCI_PRODUCT(pa->pa_id) <=
    195 		      PCI_PRODUCT_QUMRANET_VIRTIO_107F)) &&
    196 		      PCI_REVISION(pa->pa_class) == 1)
    197 			return 1;
    198 		break;
    199 	}
    200 
    201 	return 0;
    202 }
    203 
    204 static void
    205 virtio_pci_attach(device_t parent, device_t self, void *aux)
    206 {
    207 	struct virtio_pci_softc * const psc = device_private(self);
    208 	struct virtio_softc * const sc = &psc->sc_sc;
    209 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
    210 	pci_chipset_tag_t pc = pa->pa_pc;
    211 	pcitag_t tag = pa->pa_tag;
    212 	int revision;
    213 	int ret;
    214 	pcireg_t id;
    215 	pcireg_t csr;
    216 
    217 	revision = PCI_REVISION(pa->pa_class);
    218 	switch (revision) {
    219 	case 0:
    220 		/* subsystem ID shows what I am */
    221 		id = PCI_SUBSYS_ID(pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG));
    222 		break;
    223 	case 1:
    224 		/* pci product number shows what I am */
    225 		id = PCI_PRODUCT(pa->pa_id) - PCI_PRODUCT_QUMRANET_VIRTIO_1040;
    226 		break;
    227 	default:
    228 		aprint_normal(": unknown revision 0x%02x; giving up\n",
    229 			      revision);
    230 		return;
    231 	}
    232 
    233 	aprint_normal("\n");
    234 	aprint_naive("\n");
    235 	virtio_print_device_type(self, id, revision);
    236 
    237 	csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
    238 	csr |= PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_IO_ENABLE;
    239 	pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
    240 
    241 	sc->sc_dev = self;
    242 	psc->sc_pa = *pa;
    243 	psc->sc_iot = pa->pa_iot;
    244 
    245 	sc->sc_dmat = pa->pa_dmat;
    246 	if (pci_dma64_available(pa))
    247 		sc->sc_dmat = pa->pa_dmat64;
    248 
    249 	/* attach is dependent on revision */
    250 	ret = 0;
    251 	if (revision == 1) {
    252 		/* try to attach 1.0 */
    253 		ret = virtio_pci_attach_10(self, aux);
    254 	}
    255 	if (ret == 0 && revision == 0) {
    256 		/* revision 0 means 0.9 only or both 0.9 and 1.0 */
    257 		ret = virtio_pci_attach_09(self, aux);
    258 	}
    259 	if (ret) {
    260 		aprint_error_dev(self, "cannot attach (%d)\n", ret);
    261 		return;
    262 	}
    263 	KASSERT(sc->sc_ops);
    264 
    265 	/* preset config region */
    266 	psc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
    267 	if (virtio_pci_adjust_config_region(psc))
    268 		return;
    269 
    270 	/* generic */
    271 	virtio_device_reset(sc);
    272 	virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_ACK);
    273 	virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER);
    274 
    275 	sc->sc_childdevid = id;
    276 	sc->sc_child = NULL;
    277 	virtio_pci_rescan(self, NULL, NULL);
    278 	return;
    279 }
    280 
    281 /* ARGSUSED */
    282 static int
    283 virtio_pci_rescan(device_t self, const char *ifattr, const int *locs)
    284 {
    285 	struct virtio_pci_softc * const psc = device_private(self);
    286 	struct virtio_softc * const sc = &psc->sc_sc;
    287 	struct virtio_attach_args va;
    288 
    289 	if (sc->sc_child)	/* Child already attached? */
    290 		return 0;
    291 
    292 	memset(&va, 0, sizeof(va));
    293 	va.sc_childdevid = sc->sc_childdevid;
    294 
    295 	config_found(self, &va, NULL, CFARGS_NONE);
    296 
    297 	if (virtio_attach_failed(sc))
    298 		return 0;
    299 
    300 	return 0;
    301 }
    302 
    303 
    304 static int
    305 virtio_pci_detach(device_t self, int flags)
    306 {
    307 	struct virtio_pci_softc * const psc = device_private(self);
    308 	struct virtio_softc * const sc = &psc->sc_sc;
    309 	int r;
    310 
    311 	if (sc->sc_child != NULL) {
    312 		r = config_detach(sc->sc_child, flags);
    313 		if (r)
    314 			return r;
    315 	}
    316 
    317 	/* Check that child detached properly */
    318 	KASSERT(sc->sc_child == NULL);
    319 	KASSERT(sc->sc_vqs == NULL);
    320 	KASSERT(psc->sc_ihs_num == 0);
    321 
    322 	if (psc->sc_iosize)
    323 		bus_space_unmap(psc->sc_iot, psc->sc_ioh,
    324 			psc->sc_mapped_iosize);
    325 	psc->sc_iosize = 0;
    326 
    327 	return 0;
    328 }
    329 
    330 
    331 static int
    332 virtio_pci_attach_09(device_t self, void *aux)
    333 	//struct virtio_pci_softc *psc, struct pci_attach_args *pa)
    334 {
    335 	struct virtio_pci_softc * const psc = device_private(self);
    336 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
    337 	struct virtio_softc * const sc = &psc->sc_sc;
    338 //	pci_chipset_tag_t pc = pa->pa_pc;
    339 //	pcitag_t tag = pa->pa_tag;
    340 
    341 	/* complete IO region */
    342 	if (pci_mapreg_map(pa, PCI_MAPREG_START, PCI_MAPREG_TYPE_IO, 0,
    343 			   &psc->sc_iot, &psc->sc_ioh, NULL, &psc->sc_iosize)) {
    344 		aprint_error_dev(self, "can't map i/o space\n");
    345 		return EIO;
    346 	}
    347 	psc->sc_mapped_iosize = psc->sc_iosize;
    348 
    349 	/* queue space */
    350 	if (bus_space_subregion(psc->sc_iot, psc->sc_ioh,
    351 			VIRTIO_CONFIG_QUEUE_NOTIFY, 2, &psc->sc_notify_ioh)) {
    352 		aprint_error_dev(self, "can't map notify i/o space\n");
    353 		return EIO;
    354 	}
    355 	psc->sc_notify_iosize = 2;
    356 	psc->sc_notify_iot = psc->sc_iot;
    357 
    358 	/* ISR space */
    359 	if (bus_space_subregion(psc->sc_iot, psc->sc_ioh,
    360 			VIRTIO_CONFIG_ISR_STATUS, 1, &psc->sc_isr_ioh)) {
    361 		aprint_error_dev(self, "can't map isr i/o space\n");
    362 		return EIO;
    363 	}
    364 	psc->sc_isr_iosize = 1;
    365 	psc->sc_isr_iot = psc->sc_iot;
    366 
    367 	/* set our version 0.9 ops */
    368 	sc->sc_ops = &virtio_pci_ops_09;
    369 	sc->sc_bus_endian    = READ_ENDIAN_09;
    370 	sc->sc_struct_endian = STRUCT_ENDIAN_09;
    371 	return 0;
    372 }
    373 
    374 
    375 static int
    376 virtio_pci_attach_10(device_t self, void *aux)
    377 {
    378 	struct virtio_pci_softc * const psc = device_private(self);
    379 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
    380 	struct virtio_softc * const sc = &psc->sc_sc;
    381 	pci_chipset_tag_t pc = pa->pa_pc;
    382 	pcitag_t tag = pa->pa_tag;
    383 
    384 	struct virtio_pci_cap common, isr, device;
    385 	struct virtio_pci_notify_cap notify;
    386 	int have_device_cfg = 0;
    387 	bus_size_t bars[NMAPREG] = { 0 };
    388 	int bars_idx[NMAPREG] = { 0 };
    389 	struct virtio_pci_cap *caps[] = { &common, &isr, &device, &notify.cap };
    390 	int i, j, ret = 0;
    391 
    392 	if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_COMMON_CFG,
    393 			&common, sizeof(common)))
    394 		return ENODEV;
    395 	if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_NOTIFY_CFG,
    396 			&notify, sizeof(notify)))
    397 		return ENODEV;
    398 	if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_ISR_CFG,
    399 			&isr, sizeof(isr)))
    400 		return ENODEV;
    401 	if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_DEVICE_CFG,
    402 			&device, sizeof(device)))
    403 		memset(&device, 0, sizeof(device));
    404 	else
    405 		have_device_cfg = 1;
    406 
    407 	/* Figure out which bars we need to map */
    408 	for (i = 0; i < __arraycount(caps); i++) {
    409 		int bar = caps[i]->bar;
    410 		bus_size_t len = caps[i]->offset + caps[i]->length;
    411 		if (caps[i]->length == 0)
    412 			continue;
    413 		if (bars[bar] < len)
    414 			bars[bar] = len;
    415 	}
    416 
    417 	for (i = j = 0; i < __arraycount(bars); i++) {
    418 		int reg;
    419 		pcireg_t type;
    420 		if (bars[i] == 0)
    421 			continue;
    422 		reg = PCI_MAPREG_START + i * 4;
    423 		type = pci_mapreg_type(pc, tag, reg);
    424 		if (pci_mapreg_map(pa, reg, type, 0,
    425 				&psc->sc_bars_iot[j], &psc->sc_bars_ioh[j],
    426 				NULL, &psc->sc_bars_iosize[j])) {
    427 			aprint_error_dev(self, "can't map bar %u \n", i);
    428 			ret = EIO;
    429 			goto err;
    430 		}
    431 		aprint_debug_dev(self,
    432 		    "bar[%d]: iot %p, size 0x%" PRIxBUSSIZE "\n",
    433 		    j, psc->sc_bars_iot[j], psc->sc_bars_iosize[j]);
    434 		bars_idx[i] = j;
    435 		j++;
    436 	}
    437 
    438 	i = bars_idx[notify.cap.bar];
    439 	if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i],
    440 			notify.cap.offset, notify.cap.length,
    441 			&psc->sc_notify_ioh)) {
    442 		aprint_error_dev(self, "can't map notify i/o space\n");
    443 		ret = EIO;
    444 		goto err;
    445 	}
    446 	psc->sc_notify_iosize = notify.cap.length;
    447 	psc->sc_notify_iot = psc->sc_bars_iot[i];
    448 	psc->sc_notify_off_multiplier = le32toh(notify.notify_off_multiplier);
    449 
    450 	if (have_device_cfg) {
    451 		i = bars_idx[device.bar];
    452 		if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i],
    453 				device.offset, device.length,
    454 				&sc->sc_devcfg_ioh)) {
    455 			aprint_error_dev(self, "can't map devcfg i/o space\n");
    456 			ret = EIO;
    457 			goto err;
    458 		}
    459 		aprint_debug_dev(self,
    460 			"device.offset = 0x%x, device.length = 0x%x\n",
    461 			device.offset, device.length);
    462 		sc->sc_devcfg_iosize = device.length;
    463 		sc->sc_devcfg_iot = psc->sc_bars_iot[i];
    464 	}
    465 
    466 	i = bars_idx[isr.bar];
    467 	if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i],
    468 			isr.offset, isr.length, &psc->sc_isr_ioh)) {
    469 		aprint_error_dev(self, "can't map isr i/o space\n");
    470 		ret = EIO;
    471 		goto err;
    472 	}
    473 	psc->sc_isr_iosize = isr.length;
    474 	psc->sc_isr_iot = psc->sc_bars_iot[i];
    475 
    476 	i = bars_idx[common.bar];
    477 	if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i],
    478 			common.offset, common.length, &psc->sc_ioh)) {
    479 		aprint_error_dev(self, "can't map common i/o space\n");
    480 		ret = EIO;
    481 		goto err;
    482 	}
    483 	psc->sc_iosize = common.length;
    484 	psc->sc_iot = psc->sc_bars_iot[i];
    485 	psc->sc_mapped_iosize = psc->sc_bars_iosize[i];
    486 
    487 	psc->sc_sc.sc_version_1 = 1;
    488 
    489 	/* set our version 1.0 ops */
    490 	sc->sc_ops = &virtio_pci_ops_10;
    491 	sc->sc_bus_endian    = READ_ENDIAN_10;
    492 	sc->sc_struct_endian = STRUCT_ENDIAN_10;
    493 	return 0;
    494 
    495 err:
    496 	/* undo our pci_mapreg_map()s */
    497 	for (i = 0; i < __arraycount(bars); i++) {
    498 		if (psc->sc_bars_iosize[i] == 0)
    499 			continue;
    500 		bus_space_unmap(psc->sc_bars_iot[i], psc->sc_bars_ioh[i],
    501 				psc->sc_bars_iosize[i]);
    502 	}
    503 	return ret;
    504 }
    505 
    506 /* v1.0 attach helper */
    507 static int
    508 virtio_pci_find_cap(struct virtio_pci_softc *psc, int cfg_type, void *buf, int buflen)
    509 {
    510 	device_t self = psc->sc_sc.sc_dev;
    511 	pci_chipset_tag_t pc = psc->sc_pa.pa_pc;
    512 	pcitag_t tag = psc->sc_pa.pa_tag;
    513 	unsigned int offset, i, len;
    514 	union {
    515 		pcireg_t reg[8];
    516 		struct virtio_pci_cap vcap;
    517 	} *v = buf;
    518 
    519 	if (buflen < sizeof(struct virtio_pci_cap))
    520 		return ERANGE;
    521 
    522 	if (!pci_get_capability(pc, tag, PCI_CAP_VENDSPEC, &offset, &v->reg[0]))
    523 		return ENOENT;
    524 
    525 	do {
    526 		for (i = 0; i < 4; i++)
    527 			v->reg[i] =
    528 				le32toh(pci_conf_read(pc, tag, offset + i * 4));
    529 		if (v->vcap.cfg_type == cfg_type)
    530 			break;
    531 		offset = v->vcap.cap_next;
    532 	} while (offset != 0);
    533 
    534 	if (offset == 0)
    535 		return ENOENT;
    536 
    537 	if (v->vcap.cap_len > sizeof(struct virtio_pci_cap)) {
    538 		len = roundup(v->vcap.cap_len, sizeof(pcireg_t));
    539 		if (len > buflen) {
    540 			aprint_error_dev(self, "%s cap too large\n", __func__);
    541 			return ERANGE;
    542 		}
    543 		for (i = 4; i < len / sizeof(pcireg_t);  i++)
    544 			v->reg[i] =
    545 				le32toh(pci_conf_read(pc, tag, offset + i * 4));
    546 	}
    547 
    548 	/* endian fixup */
    549 	v->vcap.offset = le32toh(v->vcap.offset);
    550 	v->vcap.length = le32toh(v->vcap.length);
    551 	return 0;
    552 }
    553 
    554 
    555 /* -------------------------------------
    556  * Version 0.9 support
    557  * -------------------------------------*/
    558 
    559 static void
    560 virtio_pci_kick_09(struct virtio_softc *sc, uint16_t idx)
    561 {
    562 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
    563 
    564 	bus_space_write_2(psc->sc_notify_iot, psc->sc_notify_ioh, 0, idx);
    565 }
    566 
    567 /* only applicable for v 0.9 but also called for 1.0 */
    568 static int
    569 virtio_pci_adjust_config_region(struct virtio_pci_softc *psc)
    570 {
    571 	struct virtio_softc * const sc = (struct virtio_softc *) psc;
    572 	device_t self = psc->sc_sc.sc_dev;
    573 
    574 	if (psc->sc_sc.sc_version_1)
    575 		return 0;
    576 
    577 	sc->sc_devcfg_iosize = psc->sc_iosize - psc->sc_devcfg_offset;
    578 	sc->sc_devcfg_iot = psc->sc_iot;
    579 	if (bus_space_subregion(psc->sc_iot, psc->sc_ioh,
    580 			psc->sc_devcfg_offset, sc->sc_devcfg_iosize,
    581 			&sc->sc_devcfg_ioh)) {
    582 		aprint_error_dev(self, "can't map config i/o space\n");
    583 		return EIO;
    584 	}
    585 
    586 	return 0;
    587 }
    588 
    589 static uint16_t
    590 virtio_pci_read_queue_size_09(struct virtio_softc *sc, uint16_t idx)
    591 {
    592 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
    593 
    594 	bus_space_write_2(psc->sc_iot, psc->sc_ioh,
    595 	    VIRTIO_CONFIG_QUEUE_SELECT, idx);
    596 	return bus_space_read_2(psc->sc_iot, psc->sc_ioh,
    597 	    VIRTIO_CONFIG_QUEUE_SIZE);
    598 }
    599 
    600 static void
    601 virtio_pci_setup_queue_09(struct virtio_softc *sc, uint16_t idx, uint64_t addr)
    602 {
    603 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
    604 
    605 	bus_space_write_2(psc->sc_iot, psc->sc_ioh,
    606 	    VIRTIO_CONFIG_QUEUE_SELECT, idx);
    607 	bus_space_write_4(psc->sc_iot, psc->sc_ioh,
    608 	    VIRTIO_CONFIG_QUEUE_ADDRESS, addr / VIRTIO_PAGE_SIZE);
    609 
    610 	if (psc->sc_ihs_num > 1) {
    611 		int vec = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
    612 		if (sc->sc_child_mq)
    613 			vec += idx;
    614 		bus_space_write_2(psc->sc_iot, psc->sc_ioh,
    615 		    VIRTIO_CONFIG_MSI_QUEUE_VECTOR, vec);
    616 	}
    617 }
    618 
    619 static void
    620 virtio_pci_set_status_09(struct virtio_softc *sc, int status)
    621 {
    622 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
    623 	int old = 0;
    624 
    625 	if (status != 0) {
    626 	    old = bus_space_read_1(psc->sc_iot, psc->sc_ioh,
    627 		VIRTIO_CONFIG_DEVICE_STATUS);
    628 	}
    629 	bus_space_write_1(psc->sc_iot, psc->sc_ioh,
    630 	    VIRTIO_CONFIG_DEVICE_STATUS, status|old);
    631 }
    632 
    633 static void
    634 virtio_pci_negotiate_features_09(struct virtio_softc *sc, uint64_t guest_features)
    635 {
    636 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
    637 	uint32_t r;
    638 
    639 	r = bus_space_read_4(psc->sc_iot, psc->sc_ioh,
    640 	    VIRTIO_CONFIG_DEVICE_FEATURES);
    641 
    642 	r &= guest_features;
    643 
    644 	bus_space_write_4(psc->sc_iot, psc->sc_ioh,
    645 	    VIRTIO_CONFIG_GUEST_FEATURES, r);
    646 
    647 	sc->sc_active_features = r;
    648 }
    649 
    650 /* -------------------------------------
    651  * Version 1.0 support
    652  * -------------------------------------*/
    653 
    654 static void
    655 virtio_pci_kick_10(struct virtio_softc *sc, uint16_t idx)
    656 {
    657 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
    658 	unsigned offset = sc->sc_vqs[idx].vq_notify_off *
    659 		psc->sc_notify_off_multiplier;
    660 
    661 	bus_space_write_2(psc->sc_notify_iot, psc->sc_notify_ioh, offset, idx);
    662 }
    663 
    664 
    665 static uint16_t
    666 virtio_pci_read_queue_size_10(struct virtio_softc *sc, uint16_t idx)
    667 {
    668 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
    669 	bus_space_tag_t	   iot = psc->sc_iot;
    670 	bus_space_handle_t ioh = psc->sc_ioh;
    671 
    672 	bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SELECT, idx);
    673 	return bus_space_read_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SIZE);
    674 }
    675 
    676 /*
    677  * By definition little endian only in v1.0 and 8 byters are allowed to be
    678  * written as two 4 byters
    679  *
    680  * This is not a general purpose function that can be used in any
    681  * driver. Virtio specifically allows the 8 byte bus transaction
    682  * to be split into two 4 byte transactions. Do not copy/use it
    683  * in other device drivers unless you know that the device accepts it.
    684  */
    685 static __inline void
    686 virtio_pci_bus_space_write_8(bus_space_tag_t iot, bus_space_handle_t ioh,
    687      bus_size_t offset, uint64_t value)
    688 {
    689 #if defined(__HAVE_BUS_SPACE_8)
    690 	bus_space_write_8(iot, ioh, offset, value);
    691 #elif _QUAD_HIGHWORD
    692 	bus_space_write_4(iot, ioh, offset, BUS_ADDR_LO32(value));
    693 	bus_space_write_4(iot, ioh, offset + 4, BUS_ADDR_HI32(value));
    694 #else
    695 	bus_space_write_4(iot, ioh, offset, BUS_ADDR_HI32(value));
    696 	bus_space_write_4(iot, ioh, offset + 4, BUS_ADDR_LO32(value));
    697 #endif
    698 }
    699 
    700 static void
    701 virtio_pci_setup_queue_10(struct virtio_softc *sc, uint16_t idx, uint64_t addr)
    702 {
    703 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
    704 	struct virtqueue *vq = &sc->sc_vqs[idx];
    705 	bus_space_tag_t	   iot = psc->sc_iot;
    706 	bus_space_handle_t ioh = psc->sc_ioh;
    707 	KASSERT(vq->vq_index == idx);
    708 
    709 	bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SELECT, vq->vq_index);
    710 	if (addr == 0) {
    711 		bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_ENABLE, 0);
    712 		virtio_pci_bus_space_write_8(iot, ioh,
    713 		    VIRTIO_CONFIG1_QUEUE_DESC,   0);
    714 		virtio_pci_bus_space_write_8(iot, ioh,
    715 		    VIRTIO_CONFIG1_QUEUE_AVAIL,  0);
    716 		virtio_pci_bus_space_write_8(iot, ioh,
    717 		    VIRTIO_CONFIG1_QUEUE_USED,   0);
    718 	} else {
    719 		virtio_pci_bus_space_write_8(iot, ioh,
    720 			VIRTIO_CONFIG1_QUEUE_DESC, addr);
    721 		virtio_pci_bus_space_write_8(iot, ioh,
    722 			VIRTIO_CONFIG1_QUEUE_AVAIL, addr + vq->vq_availoffset);
    723 		virtio_pci_bus_space_write_8(iot, ioh,
    724 			VIRTIO_CONFIG1_QUEUE_USED, addr + vq->vq_usedoffset);
    725 		bus_space_write_2(iot, ioh,
    726 			VIRTIO_CONFIG1_QUEUE_ENABLE, 1);
    727 		vq->vq_notify_off = bus_space_read_2(iot, ioh,
    728 			VIRTIO_CONFIG1_QUEUE_NOTIFY_OFF);
    729 	}
    730 
    731 	if (psc->sc_ihs_num > 1) {
    732 		int vec = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
    733 		if (sc->sc_child_mq)
    734 			vec += idx;
    735 		bus_space_write_2(iot, ioh,
    736 			VIRTIO_CONFIG1_QUEUE_MSIX_VECTOR, vec);
    737 	}
    738 }
    739 
    740 static void
    741 virtio_pci_set_status_10(struct virtio_softc *sc, int status)
    742 {
    743 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
    744 	bus_space_tag_t	   iot = psc->sc_iot;
    745 	bus_space_handle_t ioh = psc->sc_ioh;
    746 	int old = 0;
    747 
    748 	if (status)
    749 		old = bus_space_read_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS);
    750 	bus_space_write_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS, status | old);
    751 }
    752 
    753 void
    754 virtio_pci_negotiate_features_10(struct virtio_softc *sc, uint64_t guest_features)
    755 {
    756 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
    757 	device_t self          =  sc->sc_dev;
    758 	bus_space_tag_t	   iot = psc->sc_iot;
    759 	bus_space_handle_t ioh = psc->sc_ioh;
    760 	uint64_t host, negotiated, device_status;
    761 
    762 	guest_features |= VIRTIO_F_VERSION_1;
    763 	/* notify on empty is 0.9 only */
    764 	guest_features &= ~VIRTIO_F_NOTIFY_ON_EMPTY;
    765 	sc->sc_active_features = 0;
    766 
    767 	bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE_SELECT, 0);
    768 	host = bus_space_read_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE);
    769 	bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE_SELECT, 1);
    770 	host |= (uint64_t)
    771 		bus_space_read_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE) << 32;
    772 
    773 	negotiated = host & guest_features;
    774 
    775 	bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE_SELECT, 0);
    776 	bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE,
    777 			negotiated & 0xffffffff);
    778 	bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE_SELECT, 1);
    779 	bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE,
    780 			negotiated >> 32);
    781 	virtio_pci_set_status_10(sc, VIRTIO_CONFIG_DEVICE_STATUS_FEATURES_OK);
    782 
    783 	device_status = bus_space_read_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS);
    784 	if ((device_status & VIRTIO_CONFIG_DEVICE_STATUS_FEATURES_OK) == 0) {
    785 		aprint_error_dev(self, "feature negotiation failed\n");
    786 		bus_space_write_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS,
    787 				VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
    788 		return;
    789 	}
    790 
    791 	if ((negotiated & VIRTIO_F_VERSION_1) == 0) {
    792 		aprint_error_dev(self, "host rejected version 1\n");
    793 		bus_space_write_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS,
    794 				VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
    795 		return;
    796 	}
    797 
    798 	sc->sc_active_features = negotiated;
    799 	return;
    800 }
    801 
    802 
    803 /* -------------------------------------
    804  * Generic PCI interrupt code
    805  * -------------------------------------*/
    806 
    807 static int
    808 virtio_pci_setup_interrupts_10(struct virtio_softc *sc)
    809 {
    810 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
    811 	device_t self          =  sc->sc_dev;
    812 	bus_space_tag_t	   iot = psc->sc_iot;
    813 	bus_space_handle_t ioh = psc->sc_ioh;
    814 	int vector, ret, qid;
    815 
    816 	if (!virtio_pci_msix_enabled(psc))
    817 		return 0;
    818 
    819 	vector = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
    820 	bus_space_write_2(iot, ioh,
    821 		VIRTIO_CONFIG1_CONFIG_MSIX_VECTOR, vector);
    822 	ret = bus_space_read_2(iot, ioh, VIRTIO_CONFIG1_CONFIG_MSIX_VECTOR);
    823 	if (ret != vector) {
    824 		aprint_error_dev(self, "can't set config msix vector\n");
    825 		return -1;
    826 	}
    827 
    828 	for (qid = 0; qid < sc->sc_nvqs; qid++) {
    829 		vector = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
    830 
    831 		if (sc->sc_child_mq)
    832 			vector += qid;
    833 		bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SELECT, qid);
    834 		bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_MSIX_VECTOR,
    835 			vector);
    836 		ret = bus_space_read_2(iot, ioh,
    837 			VIRTIO_CONFIG1_QUEUE_MSIX_VECTOR);
    838 		if (ret != vector) {
    839 			aprint_error_dev(self, "can't set queue %d "
    840 				"msix vector\n", qid);
    841 			return -1;
    842 		}
    843 	}
    844 
    845 	return 0;
    846 }
    847 
    848 static int
    849 virtio_pci_setup_interrupts_09(struct virtio_softc *sc)
    850 {
    851 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
    852 	device_t self = sc->sc_dev;
    853 	int offset, vector, ret, qid;
    854 
    855 	if (!virtio_pci_msix_enabled(psc))
    856 		return 0;
    857 
    858 	offset = VIRTIO_CONFIG_MSI_CONFIG_VECTOR;
    859 	vector = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
    860 
    861 	bus_space_write_2(psc->sc_iot, psc->sc_ioh, offset, vector);
    862 	ret = bus_space_read_2(psc->sc_iot, psc->sc_ioh, offset);
    863 	aprint_debug_dev(sc->sc_dev, "expected=%d, actual=%d\n",
    864 	    vector, ret);
    865 	if (ret != vector) {
    866 		aprint_error_dev(self, "can't set config msix vector\n");
    867 		return -1;
    868 	}
    869 
    870 	for (qid = 0; qid < sc->sc_nvqs; qid++) {
    871 		offset = VIRTIO_CONFIG_QUEUE_SELECT;
    872 		bus_space_write_2(psc->sc_iot, psc->sc_ioh, offset, qid);
    873 
    874 		offset = VIRTIO_CONFIG_MSI_QUEUE_VECTOR;
    875 		vector = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
    876 
    877 		if (sc->sc_child_mq)
    878 			vector += qid;
    879 
    880 		bus_space_write_2(psc->sc_iot, psc->sc_ioh, offset, vector);
    881 		ret = bus_space_read_2(psc->sc_iot, psc->sc_ioh, offset);
    882 		aprint_debug_dev(sc->sc_dev, "expected=%d, actual=%d\n",
    883 		    vector, ret);
    884 		if (ret != vector) {
    885 			aprint_error_dev(self, "can't set queue %d "
    886 				"msix vector\n", qid);
    887 			return -1;
    888 		}
    889 	}
    890 
    891 	return 0;
    892 }
    893 
    894 static int
    895 virtio_pci_establish_msix_interrupts(struct virtio_softc *sc,
    896     struct pci_attach_args *pa)
    897 {
    898 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
    899 	device_t self = sc->sc_dev;
    900 	pci_chipset_tag_t pc = pa->pa_pc;
    901 	struct virtqueue *vq;
    902 	char intrbuf[PCI_INTRSTR_LEN];
    903 	char intr_xname[INTRDEVNAMEBUF];
    904 	char const *intrstr;
    905 	int idx, qid, n;
    906 
    907 	idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
    908 	if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE)
    909 		pci_intr_setattr(pc, &psc->sc_ihp[idx], PCI_INTR_MPSAFE, true);
    910 
    911 	snprintf(intr_xname, sizeof(intr_xname), "%s config",
    912 	    device_xname(sc->sc_dev));
    913 
    914 	psc->sc_ihs[idx] = pci_intr_establish_xname(pc, psc->sc_ihp[idx],
    915 	    sc->sc_ipl, virtio_pci_msix_config_intr, sc, intr_xname);
    916 	if (psc->sc_ihs[idx] == NULL) {
    917 		aprint_error_dev(self, "couldn't establish MSI-X for config\n");
    918 		goto error;
    919 	}
    920 
    921 	idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
    922 	if (sc->sc_child_mq) {
    923 		for (qid = 0; qid < sc->sc_nvqs; qid++) {
    924 			n = idx + qid;
    925 			vq = &sc->sc_vqs[qid];
    926 
    927 			snprintf(intr_xname, sizeof(intr_xname), "%s vq#%d",
    928 			    device_xname(sc->sc_dev), qid);
    929 
    930 			if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE) {
    931 				pci_intr_setattr(pc, &psc->sc_ihp[n],
    932 				    PCI_INTR_MPSAFE, true);
    933 			}
    934 
    935 			psc->sc_ihs[n] = pci_intr_establish_xname(pc, psc->sc_ihp[n],
    936 			    sc->sc_ipl, vq->vq_intrhand, vq->vq_intrhand_arg, intr_xname);
    937 			if (psc->sc_ihs[n] == NULL) {
    938 				aprint_error_dev(self, "couldn't establish MSI-X for a vq\n");
    939 				goto error;
    940 			}
    941 		}
    942 	} else {
    943 		if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE)
    944 			pci_intr_setattr(pc, &psc->sc_ihp[idx], PCI_INTR_MPSAFE, true);
    945 
    946 		snprintf(intr_xname, sizeof(intr_xname), "%s queues",
    947 		    device_xname(sc->sc_dev));
    948 		psc->sc_ihs[idx] = pci_intr_establish_xname(pc, psc->sc_ihp[idx],
    949 		    sc->sc_ipl, virtio_pci_msix_queue_intr, sc, intr_xname);
    950 		if (psc->sc_ihs[idx] == NULL) {
    951 			aprint_error_dev(self, "couldn't establish MSI-X for queues\n");
    952 			goto error;
    953 		}
    954 	}
    955 
    956 	idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
    957 	intrstr = pci_intr_string(pc, psc->sc_ihp[idx], intrbuf, sizeof(intrbuf));
    958 	aprint_normal_dev(self, "config interrupting at %s\n", intrstr);
    959 	idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
    960 	if (sc->sc_child_mq) {
    961 		kcpuset_t *affinity;
    962 		int affinity_to, r;
    963 
    964 		kcpuset_create(&affinity, false);
    965 
    966 		for (qid = 0; qid < sc->sc_nvqs; qid++) {
    967 			n = idx + qid;
    968 			affinity_to = (qid / 2) % ncpu;
    969 
    970 			intrstr = pci_intr_string(pc, psc->sc_ihp[n],
    971 			    intrbuf, sizeof(intrbuf));
    972 
    973 			kcpuset_zero(affinity);
    974 			kcpuset_set(affinity, affinity_to);
    975 			r = interrupt_distribute(psc->sc_ihs[n], affinity, NULL);
    976 			if (r == 0) {
    977 				aprint_normal_dev(self,
    978 				    "for vq #%d interrupting at %s affinity to %u\n",
    979 				    qid, intrstr, affinity_to);
    980 			} else {
    981 				aprint_normal_dev(self,
    982 				    "for vq #%d interrupting at %s\n",
    983 				    qid, intrstr);
    984 			}
    985 		}
    986 
    987 		kcpuset_destroy(affinity);
    988 	} else {
    989 		intrstr = pci_intr_string(pc, psc->sc_ihp[idx], intrbuf, sizeof(intrbuf));
    990 		aprint_normal_dev(self, "queues interrupting at %s\n", intrstr);
    991 	}
    992 
    993 	return 0;
    994 
    995 error:
    996 	idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
    997 	if (psc->sc_ihs[idx] != NULL)
    998 		pci_intr_disestablish(psc->sc_pa.pa_pc, psc->sc_ihs[idx]);
    999 	idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
   1000 	if (sc->sc_child_mq) {
   1001 		for (qid = 0; qid < sc->sc_nvqs; qid++) {
   1002 			n = idx + qid;
   1003 			if (psc->sc_ihs[n] == NULL)
   1004 				continue;
   1005 			pci_intr_disestablish(psc->sc_pa.pa_pc, psc->sc_ihs[n]);
   1006 		}
   1007 
   1008 	} else {
   1009 		if (psc->sc_ihs[idx] != NULL)
   1010 			pci_intr_disestablish(psc->sc_pa.pa_pc, psc->sc_ihs[idx]);
   1011 	}
   1012 
   1013 	return -1;
   1014 }
   1015 
   1016 static int
   1017 virtio_pci_establish_intx_interrupt(struct virtio_softc *sc,
   1018     struct pci_attach_args *pa)
   1019 {
   1020 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
   1021 	device_t self = sc->sc_dev;
   1022 	pci_chipset_tag_t pc = pa->pa_pc;
   1023 	char intrbuf[PCI_INTRSTR_LEN];
   1024 	char const *intrstr;
   1025 
   1026 	if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE)
   1027 		pci_intr_setattr(pc, &psc->sc_ihp[0], PCI_INTR_MPSAFE, true);
   1028 
   1029 	psc->sc_ihs[0] = pci_intr_establish_xname(pc, psc->sc_ihp[0],
   1030 	    sc->sc_ipl, virtio_pci_intr, sc, device_xname(sc->sc_dev));
   1031 	if (psc->sc_ihs[0] == NULL) {
   1032 		aprint_error_dev(self, "couldn't establish INTx\n");
   1033 		return -1;
   1034 	}
   1035 
   1036 	intrstr = pci_intr_string(pc, psc->sc_ihp[0], intrbuf, sizeof(intrbuf));
   1037 	aprint_normal_dev(self, "interrupting at %s\n", intrstr);
   1038 
   1039 	return 0;
   1040 }
   1041 
   1042 static int
   1043 virtio_pci_alloc_interrupts(struct virtio_softc *sc)
   1044 {
   1045 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
   1046 	device_t self = sc->sc_dev;
   1047 	pci_chipset_tag_t pc = psc->sc_pa.pa_pc;
   1048 	pcitag_t tag = psc->sc_pa.pa_tag;
   1049 	int error;
   1050 	int nmsix;
   1051 	int off;
   1052 	int counts[PCI_INTR_TYPE_SIZE];
   1053 	pci_intr_type_t max_type;
   1054 	pcireg_t ctl;
   1055 
   1056 	nmsix = pci_msix_count(psc->sc_pa.pa_pc, psc->sc_pa.pa_tag);
   1057 	aprint_debug_dev(self, "pci_msix_count=%d\n", nmsix);
   1058 
   1059 	/* We need at least two: one for config and the other for queues */
   1060 	if ((sc->sc_flags & VIRTIO_F_INTR_MSIX) == 0 || nmsix < 2) {
   1061 		/* Try INTx only */
   1062 		max_type = PCI_INTR_TYPE_INTX;
   1063 		counts[PCI_INTR_TYPE_INTX] = 1;
   1064 	} else {
   1065 		/* Try MSI-X first and INTx second */
   1066 		if (sc->sc_nvqs + VIRTIO_MSIX_QUEUE_VECTOR_INDEX <= nmsix) {
   1067 			nmsix = sc->sc_nvqs + VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
   1068 		} else {
   1069 			sc->sc_child_mq = false;
   1070 		}
   1071 
   1072 		if (sc->sc_child_mq == false) {
   1073 			nmsix = 2;
   1074 		}
   1075 
   1076 		max_type = PCI_INTR_TYPE_MSIX;
   1077 		counts[PCI_INTR_TYPE_MSIX] = nmsix;
   1078 		counts[PCI_INTR_TYPE_MSI] = 0;
   1079 		counts[PCI_INTR_TYPE_INTX] = 1;
   1080 	}
   1081 
   1082 retry:
   1083 	error = pci_intr_alloc(&psc->sc_pa, &psc->sc_ihp, counts, max_type);
   1084 	if (error != 0) {
   1085 		aprint_error_dev(self, "couldn't map interrupt\n");
   1086 		return -1;
   1087 	}
   1088 
   1089 	if (pci_intr_type(pc, psc->sc_ihp[0]) == PCI_INTR_TYPE_MSIX) {
   1090 		psc->sc_ihs = kmem_zalloc(sizeof(*psc->sc_ihs) * nmsix,
   1091 		    KM_SLEEP);
   1092 
   1093 		error = virtio_pci_establish_msix_interrupts(sc, &psc->sc_pa);
   1094 		if (error != 0) {
   1095 			kmem_free(psc->sc_ihs, sizeof(*psc->sc_ihs) * nmsix);
   1096 			pci_intr_release(pc, psc->sc_ihp, nmsix);
   1097 
   1098 			/* Retry INTx */
   1099 			max_type = PCI_INTR_TYPE_INTX;
   1100 			counts[PCI_INTR_TYPE_INTX] = 1;
   1101 			goto retry;
   1102 		}
   1103 
   1104 		psc->sc_ihs_num = nmsix;
   1105 		psc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_MSI;
   1106 		virtio_pci_adjust_config_region(psc);
   1107 	} else if (pci_intr_type(pc, psc->sc_ihp[0]) == PCI_INTR_TYPE_INTX) {
   1108 		psc->sc_ihs = kmem_zalloc(sizeof(*psc->sc_ihs) * 1,
   1109 		    KM_SLEEP);
   1110 
   1111 		error = virtio_pci_establish_intx_interrupt(sc, &psc->sc_pa);
   1112 		if (error != 0) {
   1113 			kmem_free(psc->sc_ihs, sizeof(*psc->sc_ihs) * 1);
   1114 			pci_intr_release(pc, psc->sc_ihp, 1);
   1115 			return -1;
   1116 		}
   1117 
   1118 		psc->sc_ihs_num = 1;
   1119 		psc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
   1120 		virtio_pci_adjust_config_region(psc);
   1121 
   1122 		error = pci_get_capability(pc, tag, PCI_CAP_MSIX, &off, NULL);
   1123 		if (error != 0) {
   1124 			ctl = pci_conf_read(pc, tag, off + PCI_MSIX_CTL);
   1125 			ctl &= ~PCI_MSIX_CTL_ENABLE;
   1126 			pci_conf_write(pc, tag, off + PCI_MSIX_CTL, ctl);
   1127 		}
   1128 	}
   1129 
   1130 	return 0;
   1131 }
   1132 
   1133 static void
   1134 virtio_pci_free_interrupts(struct virtio_softc *sc)
   1135 {
   1136 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
   1137 
   1138 	for (int i = 0; i < psc->sc_ihs_num; i++) {
   1139 		if (psc->sc_ihs[i] == NULL)
   1140 			continue;
   1141 		pci_intr_disestablish(psc->sc_pa.pa_pc, psc->sc_ihs[i]);
   1142 		psc->sc_ihs[i] = NULL;
   1143 	}
   1144 
   1145 	if (psc->sc_ihs_num > 0)
   1146 		pci_intr_release(psc->sc_pa.pa_pc, psc->sc_ihp, psc->sc_ihs_num);
   1147 
   1148 	if (psc->sc_ihs != NULL) {
   1149 		kmem_free(psc->sc_ihs, sizeof(*psc->sc_ihs) * psc->sc_ihs_num);
   1150 		psc->sc_ihs = NULL;
   1151 	}
   1152 	psc->sc_ihs_num = 0;
   1153 }
   1154 
   1155 static bool
   1156 virtio_pci_msix_enabled(struct virtio_pci_softc *psc)
   1157 {
   1158 	pci_chipset_tag_t pc = psc->sc_pa.pa_pc;
   1159 
   1160 	if (pci_intr_type(pc, psc->sc_ihp[0]) == PCI_INTR_TYPE_MSIX)
   1161 		return true;
   1162 
   1163 	return false;
   1164 }
   1165 
   1166 /*
   1167  * Interrupt handler.
   1168  */
   1169 static int
   1170 virtio_pci_intr(void *arg)
   1171 {
   1172 	struct virtio_softc *sc = arg;
   1173 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
   1174 	int isr, r = 0;
   1175 
   1176 	/* check and ack the interrupt */
   1177 	isr = bus_space_read_1(psc->sc_isr_iot, psc->sc_isr_ioh, 0);
   1178 	if (isr == 0)
   1179 		return 0;
   1180 	if ((isr & VIRTIO_CONFIG_ISR_CONFIG_CHANGE) &&
   1181 	    (sc->sc_config_change != NULL))
   1182 		r = (sc->sc_config_change)(sc);
   1183 	if (sc->sc_intrhand != NULL) {
   1184 		if (sc->sc_soft_ih != NULL)
   1185 			softint_schedule(sc->sc_soft_ih);
   1186 		else
   1187 			r |= (sc->sc_intrhand)(sc);
   1188 	}
   1189 
   1190 	return r;
   1191 }
   1192 
   1193 static int
   1194 virtio_pci_msix_queue_intr(void *arg)
   1195 {
   1196 	struct virtio_softc *sc = arg;
   1197 	int r = 0;
   1198 
   1199 	if (sc->sc_intrhand != NULL) {
   1200 		if (sc->sc_soft_ih != NULL)
   1201 			softint_schedule(sc->sc_soft_ih);
   1202 		else
   1203 			r |= (sc->sc_intrhand)(sc);
   1204 	}
   1205 
   1206 	return r;
   1207 }
   1208 
   1209 static int
   1210 virtio_pci_msix_config_intr(void *arg)
   1211 {
   1212 	struct virtio_softc *sc = arg;
   1213 	int r = 0;
   1214 
   1215 	if (sc->sc_config_change != NULL)
   1216 		r = (sc->sc_config_change)(sc);
   1217 	return r;
   1218 }
   1219 
   1220 MODULE(MODULE_CLASS_DRIVER, virtio_pci, "pci,virtio");
   1221 
   1222 #ifdef _MODULE
   1223 #include "ioconf.c"
   1224 #endif
   1225 
   1226 static int
   1227 virtio_pci_modcmd(modcmd_t cmd, void *opaque)
   1228 {
   1229 	int error = 0;
   1230 
   1231 #ifdef _MODULE
   1232 	switch (cmd) {
   1233 	case MODULE_CMD_INIT:
   1234 		error = config_init_component(cfdriver_ioconf_virtio_pci,
   1235 		    cfattach_ioconf_virtio_pci, cfdata_ioconf_virtio_pci);
   1236 		break;
   1237 	case MODULE_CMD_FINI:
   1238 		error = config_fini_component(cfdriver_ioconf_virtio_pci,
   1239 		    cfattach_ioconf_virtio_pci, cfdata_ioconf_virtio_pci);
   1240 		break;
   1241 	default:
   1242 		error = ENOTTY;
   1243 		break;
   1244 	}
   1245 #endif
   1246 
   1247 	return error;
   1248 }
   1249