Home | History | Annotate | Line # | Download | only in pci
virtio_pci.c revision 1.38.4.4
      1 /* $NetBSD: virtio_pci.c,v 1.38.4.4 2024/10/02 18:20:48 martin Exp $ */
      2 
      3 /*
      4  * Copyright (c) 2020 The NetBSD Foundation, Inc.
      5  * Copyright (c) 2012 Stefan Fritsch.
      6  * Copyright (c) 2010 Minoura Makoto.
      7  * All rights reserved.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  *
     18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     28  */
     29 
     30 #include <sys/cdefs.h>
     31 __KERNEL_RCSID(0, "$NetBSD: virtio_pci.c,v 1.38.4.4 2024/10/02 18:20:48 martin Exp $");
     32 
     33 #include <sys/param.h>
     34 #include <sys/types.h>
     35 
     36 #include <sys/device.h>
     37 #include <sys/endian.h>
     38 #include <sys/interrupt.h>
     39 #include <sys/kmem.h>
     40 #include <sys/module.h>
     41 #include <sys/syslog.h>
     42 #include <sys/systm.h>
     43 
     44 #include <dev/pci/pcidevs.h>
     45 #include <dev/pci/pcireg.h>
     46 #include <dev/pci/pcivar.h>
     47 
     48 #include <dev/pci/virtioreg.h> /* XXX: move to non-pci */
     49 #include <dev/pci/virtio_pcireg.h>
     50 
     51 #define VIRTIO_PRIVATE
     52 #include <dev/pci/virtiovar.h> /* XXX: move to non-pci */
     53 
     54 
     55 #define VIRTIO_PCI_LOG(_sc, _use_log, _fmt, _args...)	\
     56 do {							\
     57 	if ((_use_log)) {				\
     58 		log(LOG_DEBUG, "%s: " _fmt,		\
     59 		    device_xname((_sc)->sc_dev),	\
     60 		    ##_args);				\
     61 	} else {					\
     62 		aprint_error_dev((_sc)->sc_dev,		\
     63 		    _fmt, ##_args);			\
     64 	}						\
     65 } while(0)
     66 
     67 static int	virtio_pci_match(device_t, cfdata_t, void *);
     68 static void	virtio_pci_attach(device_t, device_t, void *);
     69 static int	virtio_pci_rescan(device_t, const char *, const int *);
     70 static int	virtio_pci_detach(device_t, int);
     71 
     72 #define NMAPREG		((PCI_MAPREG_END - PCI_MAPREG_START) / \
     73 				sizeof(pcireg_t))
     74 struct virtio_pci_softc {
     75 	struct virtio_softc	sc_sc;
     76 	bool			sc_intr_pervq;
     77 
     78 	/* IO space */
     79 	bus_space_tag_t		sc_iot;
     80 	bus_space_handle_t	sc_ioh;
     81 	bus_size_t		sc_iosize;
     82 
     83 	/* BARs */
     84 	bus_space_tag_t		sc_bars_iot[NMAPREG];
     85 	bus_space_handle_t	sc_bars_ioh[NMAPREG];
     86 	bus_size_t		sc_bars_iosize[NMAPREG];
     87 
     88 	/* notify space */
     89 	bus_space_tag_t		sc_notify_iot;
     90 	bus_space_handle_t	sc_notify_ioh;
     91 	bus_size_t		sc_notify_iosize;
     92 	uint32_t		sc_notify_off_multiplier;
     93 
     94 	/* isr space */
     95 	bus_space_tag_t		sc_isr_iot;
     96 	bus_space_handle_t	sc_isr_ioh;
     97 	bus_size_t		sc_isr_iosize;
     98 
     99 	/* generic */
    100 	struct pci_attach_args	sc_pa;
    101 	pci_intr_handle_t	*sc_ihp;
    102 	void			**sc_ihs;
    103 	int			sc_ihs_num;
    104 	int			sc_devcfg_offset;	/* for 0.9 */
    105 };
    106 
    107 static int	virtio_pci_attach_09(device_t, void *);
    108 static void	virtio_pci_kick_09(struct virtio_softc *, uint16_t);
    109 static uint16_t	virtio_pci_read_queue_size_09(struct virtio_softc *, uint16_t);
    110 static void	virtio_pci_setup_queue_09(struct virtio_softc *, uint16_t,
    111 		    uint64_t);
    112 static void	virtio_pci_set_status_09(struct virtio_softc *, int);
    113 static void	virtio_pci_negotiate_features_09(struct virtio_softc *,
    114 		    uint64_t);
    115 
    116 static int	virtio_pci_attach_10(device_t, void *);
    117 static void	virtio_pci_kick_10(struct virtio_softc *, uint16_t);
    118 static uint16_t	virtio_pci_read_queue_size_10(struct virtio_softc *, uint16_t);
    119 static void	virtio_pci_setup_queue_10(struct virtio_softc *, uint16_t,
    120 		    uint64_t);
    121 static void	virtio_pci_set_status_10(struct virtio_softc *, int);
    122 static void	virtio_pci_negotiate_features_10(struct virtio_softc *,
    123 		    uint64_t);
    124 static int	virtio_pci_find_cap(struct virtio_pci_softc *, int, void *,
    125 		    int);
    126 
    127 static int	virtio_pci_alloc_interrupts(struct virtio_softc *);
    128 static void	virtio_pci_free_interrupts(struct virtio_softc *);
    129 static int	virtio_pci_adjust_config_region(struct virtio_pci_softc *);
    130 static int	virtio_pci_intr(void *);
    131 static int	virtio_pci_msix_queue_intr(void *);
    132 static int	virtio_pci_msix_config_intr(void *);
    133 static int	virtio_pci_setup_interrupts_09(struct virtio_softc *, int);
    134 static int	virtio_pci_setup_interrupts_10(struct virtio_softc *, int);
    135 static int	virtio_pci_establish_msix_interrupts(struct virtio_softc *,
    136 		    const struct pci_attach_args *);
    137 static int	virtio_pci_establish_intx_interrupt(struct virtio_softc *,
    138 		    const struct pci_attach_args *);
    139 static bool	virtio_pci_msix_enabled(struct virtio_pci_softc *);
    140 
    141 #define VIRTIO_MSIX_CONFIG_VECTOR_INDEX	0
    142 #define VIRTIO_MSIX_QUEUE_VECTOR_INDEX	1
    143 
    144 /*
    145  * For big-endian aarch64/armv7 on QEMU (and most real HW), only CPU cores
    146  * are running in big-endian mode, with all peripheral being configured to
    147  * little-endian mode. Their default bus_space(9) functions forcibly swap
    148  * byte-order. This guarantees that PIO'ed data from pci(4), e.g., are
    149  * correctly handled by bus_space(9), while DMA'ed ones should be swapped
    150  * by hand, in violation of virtio(4) specifications.
    151  */
    152 
    153 #if (defined(__aarch64__) || defined(__arm__)) && BYTE_ORDER == BIG_ENDIAN
    154 #	define READ_ENDIAN_09	BIG_ENDIAN
    155 #	define READ_ENDIAN_10	BIG_ENDIAN
    156 #	define STRUCT_ENDIAN_09	BIG_ENDIAN
    157 #	define STRUCT_ENDIAN_10	LITTLE_ENDIAN
    158 #elif BYTE_ORDER == BIG_ENDIAN
    159 #	define READ_ENDIAN_09	LITTLE_ENDIAN
    160 #	define READ_ENDIAN_10	BIG_ENDIAN
    161 #	define STRUCT_ENDIAN_09	BIG_ENDIAN
    162 #	define STRUCT_ENDIAN_10	LITTLE_ENDIAN
    163 #else /* little endian */
    164 #	define READ_ENDIAN_09	LITTLE_ENDIAN
    165 #	define READ_ENDIAN_10	LITTLE_ENDIAN
    166 #	define STRUCT_ENDIAN_09	LITTLE_ENDIAN
    167 #	define STRUCT_ENDIAN_10	LITTLE_ENDIAN
    168 #endif
    169 
    170 CFATTACH_DECL3_NEW(virtio_pci, sizeof(struct virtio_pci_softc),
    171     virtio_pci_match, virtio_pci_attach, virtio_pci_detach, NULL,
    172     virtio_pci_rescan, NULL, 0);
    173 
    174 static const struct virtio_ops virtio_pci_ops_09 = {
    175 	.kick = virtio_pci_kick_09,
    176 	.read_queue_size = virtio_pci_read_queue_size_09,
    177 	.setup_queue = virtio_pci_setup_queue_09,
    178 	.set_status = virtio_pci_set_status_09,
    179 	.neg_features = virtio_pci_negotiate_features_09,
    180 	.alloc_interrupts = virtio_pci_alloc_interrupts,
    181 	.free_interrupts = virtio_pci_free_interrupts,
    182 	.setup_interrupts = virtio_pci_setup_interrupts_09,
    183 };
    184 
    185 static const struct virtio_ops virtio_pci_ops_10 = {
    186 	.kick = virtio_pci_kick_10,
    187 	.read_queue_size = virtio_pci_read_queue_size_10,
    188 	.setup_queue = virtio_pci_setup_queue_10,
    189 	.set_status = virtio_pci_set_status_10,
    190 	.neg_features = virtio_pci_negotiate_features_10,
    191 	.alloc_interrupts = virtio_pci_alloc_interrupts,
    192 	.free_interrupts = virtio_pci_free_interrupts,
    193 	.setup_interrupts = virtio_pci_setup_interrupts_10,
    194 };
    195 
    196 static int
    197 virtio_pci_match(device_t parent, cfdata_t match, void *aux)
    198 {
    199 	const struct pci_attach_args * const pa = aux;
    200 
    201 	switch (PCI_VENDOR(pa->pa_id)) {
    202 	case PCI_VENDOR_QUMRANET:
    203 		/* Transitional devices MUST have a PCI Revision ID of 0. */
    204 		if (((PCI_PRODUCT_QUMRANET_VIRTIO_1000 <=
    205 			    PCI_PRODUCT(pa->pa_id)) &&
    206 			(PCI_PRODUCT(pa->pa_id) <=
    207 			    PCI_PRODUCT_QUMRANET_VIRTIO_103F)) &&
    208 		    PCI_REVISION(pa->pa_class) == 0)
    209 			return 1;
    210 		/*
    211 		 * Non-transitional devices SHOULD have a PCI Revision
    212 		 * ID of 1 or higher.  Drivers MUST match any PCI
    213 		 * Revision ID value.
    214 		 */
    215 		if (((PCI_PRODUCT_QUMRANET_VIRTIO_1040 <=
    216 			    PCI_PRODUCT(pa->pa_id)) &&
    217 			(PCI_PRODUCT(pa->pa_id) <=
    218 			    PCI_PRODUCT_QUMRANET_VIRTIO_107F)) &&
    219 		    /* XXX: TODO */
    220 		    PCI_REVISION(pa->pa_class) == 1)
    221 			return 1;
    222 		break;
    223 	}
    224 
    225 	return 0;
    226 }
    227 
    228 static void
    229 virtio_pci_attach(device_t parent, device_t self, void *aux)
    230 {
    231 	struct virtio_pci_softc * const psc = device_private(self);
    232 	struct virtio_softc * const sc = &psc->sc_sc;
    233 	const struct pci_attach_args * const pa = aux;
    234 	pci_chipset_tag_t pc = pa->pa_pc;
    235 	pcitag_t tag = pa->pa_tag;
    236 	int revision;
    237 	int ret;
    238 	pcireg_t id;
    239 	pcireg_t csr;
    240 
    241 	revision = PCI_REVISION(pa->pa_class);
    242 	switch (revision) {
    243 	case 0:
    244 		/* subsystem ID shows what I am */
    245 		id = PCI_SUBSYS_ID(pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG));
    246 		break;
    247 	case 1:
    248 		/* pci product number shows what I am */
    249 		id = PCI_PRODUCT(pa->pa_id) - PCI_PRODUCT_QUMRANET_VIRTIO_1040;
    250 		break;
    251 	default:
    252 		aprint_normal(": unknown revision 0x%02x; giving up\n",
    253 		    revision);
    254 		return;
    255 	}
    256 
    257 	aprint_normal("\n");
    258 	aprint_naive("\n");
    259 	virtio_print_device_type(self, id, revision);
    260 
    261 	csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
    262 	csr |= PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_IO_ENABLE;
    263 	pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
    264 
    265 	sc->sc_dev = self;
    266 	psc->sc_pa = *pa;
    267 	psc->sc_iot = pa->pa_iot;
    268 
    269 	sc->sc_dmat = pa->pa_dmat;
    270 	if (pci_dma64_available(pa))
    271 		sc->sc_dmat = pa->pa_dmat64;
    272 
    273 	/* attach is dependent on revision */
    274 	ret = 0;
    275 	if (revision == 1) {
    276 		/* try to attach 1.0 */
    277 		ret = virtio_pci_attach_10(self, aux);
    278 	}
    279 	if (ret == 0 && revision == 0) {
    280 		/* revision 0 means 0.9 only or both 0.9 and 1.0 */
    281 		ret = virtio_pci_attach_09(self, aux);
    282 	}
    283 	if (ret) {
    284 		aprint_error_dev(self, "cannot attach (%d)\n", ret);
    285 		return;
    286 	}
    287 	KASSERT(sc->sc_ops);
    288 
    289 	/* preset config region */
    290 	psc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
    291 	if (virtio_pci_adjust_config_region(psc))
    292 		return;
    293 
    294 	/* generic */
    295 	virtio_device_reset(sc);
    296 	virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_ACK);
    297 	virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER);
    298 
    299 	sc->sc_childdevid = id;
    300 	sc->sc_child = NULL;
    301 	virtio_pci_rescan(self, NULL, NULL);
    302 	return;
    303 }
    304 
    305 /* ARGSUSED */
    306 static int
    307 virtio_pci_rescan(device_t self, const char *ifattr, const int *locs)
    308 {
    309 	struct virtio_pci_softc * const psc = device_private(self);
    310 	struct virtio_softc * const sc = &psc->sc_sc;
    311 	struct virtio_attach_args va;
    312 
    313 	if (sc->sc_child)	/* Child already attached? */
    314 		return 0;
    315 
    316 	memset(&va, 0, sizeof(va));
    317 	va.sc_childdevid = sc->sc_childdevid;
    318 
    319 	config_found(self, &va, NULL, CFARGS_NONE);
    320 
    321 	if (virtio_attach_failed(sc))
    322 		return 0;
    323 
    324 	return 0;
    325 }
    326 
    327 static int
    328 virtio_pci_detach(device_t self, int flags)
    329 {
    330 	struct virtio_pci_softc * const psc = device_private(self);
    331 	struct virtio_softc * const sc = &psc->sc_sc;
    332 	unsigned i;
    333 	int r;
    334 
    335 	r = config_detach_children(self, flags);
    336 	if (r != 0)
    337 		return r;
    338 
    339 	/* Check that child never attached, or detached properly */
    340 	KASSERT(sc->sc_child == NULL);
    341 	KASSERT(sc->sc_vqs == NULL);
    342 	KASSERT(psc->sc_ihs_num == 0);
    343 
    344 	if (sc->sc_version_1) {
    345 		for (i = 0; i < __arraycount(psc->sc_bars_iot); i++) {
    346 			if (psc->sc_bars_iosize[i] == 0)
    347 				continue;
    348 			bus_space_unmap(psc->sc_bars_iot[i],
    349 			    psc->sc_bars_ioh[i], psc->sc_bars_iosize[i]);
    350 			psc->sc_bars_iosize[i] = 0;
    351 		}
    352 	} else {
    353 		if (psc->sc_iosize) {
    354 			bus_space_unmap(psc->sc_iot, psc->sc_ioh,
    355 			    psc->sc_iosize);
    356 			psc->sc_iosize = 0;
    357 		}
    358 	}
    359 
    360 	return 0;
    361 }
    362 
    363 static int
    364 virtio_pci_attach_09(device_t self, void *aux)
    365 {
    366 	struct virtio_pci_softc * const psc = device_private(self);
    367 	const struct pci_attach_args * const pa = aux;
    368 	struct virtio_softc * const sc = &psc->sc_sc;
    369 
    370 	/* complete IO region */
    371 	if (pci_mapreg_map(pa, PCI_MAPREG_START, PCI_MAPREG_TYPE_IO, 0,
    372 		&psc->sc_iot, &psc->sc_ioh, NULL, &psc->sc_iosize)) {
    373 		aprint_error_dev(self, "can't map i/o space\n");
    374 		return EIO;
    375 	}
    376 
    377 	/* queue space */
    378 	if (bus_space_subregion(psc->sc_iot, psc->sc_ioh,
    379 		VIRTIO_CONFIG_QUEUE_NOTIFY, 2, &psc->sc_notify_ioh)) {
    380 		aprint_error_dev(self, "can't map notify i/o space\n");
    381 		return EIO;
    382 	}
    383 	psc->sc_notify_iosize = 2;
    384 	psc->sc_notify_iot = psc->sc_iot;
    385 
    386 	/* ISR space */
    387 	if (bus_space_subregion(psc->sc_iot, psc->sc_ioh,
    388 		VIRTIO_CONFIG_ISR_STATUS, 1, &psc->sc_isr_ioh)) {
    389 		aprint_error_dev(self, "can't map isr i/o space\n");
    390 		return EIO;
    391 	}
    392 	psc->sc_isr_iosize = 1;
    393 	psc->sc_isr_iot = psc->sc_iot;
    394 
    395 	/* set our version 0.9 ops */
    396 	sc->sc_ops = &virtio_pci_ops_09;
    397 	sc->sc_bus_endian = READ_ENDIAN_09;
    398 	sc->sc_struct_endian = STRUCT_ENDIAN_09;
    399 	return 0;
    400 }
    401 
    402 static int
    403 virtio_pci_attach_10(device_t self, void *aux)
    404 {
    405 	struct virtio_pci_softc * const psc = device_private(self);
    406 	const struct pci_attach_args * const pa = aux;
    407 	struct virtio_softc * const sc = &psc->sc_sc;
    408 	const pci_chipset_tag_t pc = pa->pa_pc;
    409 	const pcitag_t tag = pa->pa_tag;
    410 
    411 	struct virtio_pci_cap common, isr, device;
    412 	struct virtio_pci_notify_cap notify;
    413 	int have_device_cfg = 0;
    414 	bus_size_t bars[NMAPREG] = { 0 };
    415 	int bars_idx[NMAPREG] = { 0 };
    416 	struct virtio_pci_cap * const caps[] =
    417 	    { &common, &isr, &device, &notify.cap };
    418 	int i, j, ret = 0;
    419 
    420 	if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_COMMON_CFG,
    421 		&common, sizeof(common)))
    422 		return ENODEV;
    423 	if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_NOTIFY_CFG,
    424 		&notify, sizeof(notify)))
    425 		return ENODEV;
    426 	if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_ISR_CFG,
    427 		&isr, sizeof(isr)))
    428 		return ENODEV;
    429 	if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_DEVICE_CFG,
    430 		&device, sizeof(device)))
    431 		memset(&device, 0, sizeof(device));
    432 	else
    433 		have_device_cfg = 1;
    434 
    435 	/* Figure out which bars we need to map */
    436 	for (i = 0; i < __arraycount(caps); i++) {
    437 		int bar = caps[i]->bar;
    438 		bus_size_t len = caps[i]->offset + caps[i]->length;
    439 
    440 		if (caps[i]->length == 0)
    441 			continue;
    442 		if (bars[bar] < len)
    443 			bars[bar] = len;
    444 	}
    445 
    446 	for (i = j = 0; i < __arraycount(bars); i++) {
    447 		int reg;
    448 		pcireg_t type;
    449 
    450 		if (bars[i] == 0)
    451 			continue;
    452 		reg = PCI_BAR(i);
    453 		type = pci_mapreg_type(pc, tag, reg);
    454 		if (pci_mapreg_map(pa, reg, type, 0,
    455 			&psc->sc_bars_iot[j], &psc->sc_bars_ioh[j],
    456 			NULL, &psc->sc_bars_iosize[j])) {
    457 			aprint_error_dev(self, "can't map bar %u \n", i);
    458 			ret = EIO;
    459 			goto err;
    460 		}
    461 		aprint_debug_dev(self,
    462 		    "bar[%d]: iot %p, size 0x%" PRIxBUSSIZE "\n",
    463 		    j, psc->sc_bars_iot[j], psc->sc_bars_iosize[j]);
    464 		bars_idx[i] = j;
    465 		j++;
    466 	}
    467 
    468 	i = bars_idx[notify.cap.bar];
    469 	if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i],
    470 		notify.cap.offset, notify.cap.length, &psc->sc_notify_ioh)) {
    471 		aprint_error_dev(self, "can't map notify i/o space\n");
    472 		ret = EIO;
    473 		goto err;
    474 	}
    475 	psc->sc_notify_iosize = notify.cap.length;
    476 	psc->sc_notify_iot = psc->sc_bars_iot[i];
    477 	psc->sc_notify_off_multiplier = le32toh(notify.notify_off_multiplier);
    478 
    479 	if (have_device_cfg) {
    480 		i = bars_idx[device.bar];
    481 		if (bus_space_subregion(psc->sc_bars_iot[i],
    482 			psc->sc_bars_ioh[i], device.offset, device.length,
    483 			&sc->sc_devcfg_ioh)) {
    484 			aprint_error_dev(self, "can't map devcfg i/o space\n");
    485 			ret = EIO;
    486 			goto err;
    487 		}
    488 		aprint_debug_dev(self,
    489 		    "device.offset = 0x%x, device.length = 0x%x\n",
    490 		    device.offset, device.length);
    491 		sc->sc_devcfg_iosize = device.length;
    492 		sc->sc_devcfg_iot = psc->sc_bars_iot[i];
    493 	}
    494 
    495 	i = bars_idx[isr.bar];
    496 	if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i],
    497 		isr.offset, isr.length, &psc->sc_isr_ioh)) {
    498 		aprint_error_dev(self, "can't map isr i/o space\n");
    499 		ret = EIO;
    500 		goto err;
    501 	}
    502 	psc->sc_isr_iosize = isr.length;
    503 	psc->sc_isr_iot = psc->sc_bars_iot[i];
    504 
    505 	i = bars_idx[common.bar];
    506 	if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i],
    507 		common.offset, common.length, &psc->sc_ioh)) {
    508 		aprint_error_dev(self, "can't map common i/o space\n");
    509 		ret = EIO;
    510 		goto err;
    511 	}
    512 	psc->sc_iosize = common.length;
    513 	psc->sc_iot = psc->sc_bars_iot[i];
    514 
    515 	psc->sc_sc.sc_version_1 = 1;
    516 
    517 	/* set our version 1.0 ops */
    518 	sc->sc_ops = &virtio_pci_ops_10;
    519 	sc->sc_bus_endian = READ_ENDIAN_10;
    520 	sc->sc_struct_endian = STRUCT_ENDIAN_10;
    521 	return 0;
    522 
    523 err:
    524 	/* undo our pci_mapreg_map()s */
    525 	for (i = 0; i < __arraycount(bars); i++) {
    526 		if (psc->sc_bars_iosize[i] == 0)
    527 			continue;
    528 		bus_space_unmap(psc->sc_bars_iot[i], psc->sc_bars_ioh[i],
    529 		    psc->sc_bars_iosize[i]);
    530 		psc->sc_bars_iosize[i] = 0;
    531 	}
    532 	return ret;
    533 }
    534 
    535 /* v1.0 attach helper */
    536 static int
    537 virtio_pci_find_cap(struct virtio_pci_softc *psc, int cfg_type, void *buf,
    538     int buflen)
    539 {
    540 	device_t self = psc->sc_sc.sc_dev;
    541 	pci_chipset_tag_t pc = psc->sc_pa.pa_pc;
    542 	pcitag_t tag = psc->sc_pa.pa_tag;
    543 	unsigned int offset, i, len;
    544 	union {
    545 		pcireg_t reg[8];
    546 		struct virtio_pci_cap vcap;
    547 	} *v = buf;
    548 
    549 	if (buflen < sizeof(struct virtio_pci_cap))
    550 		return ERANGE;
    551 
    552 	if (!pci_get_capability(pc, tag, PCI_CAP_VENDSPEC, &offset,
    553 		&v->reg[0]))
    554 		return ENOENT;
    555 
    556 	do {
    557 		for (i = 0; i < 4; i++)
    558 			v->reg[i] =
    559 			    le32toh(pci_conf_read(pc, tag, offset + i * 4));
    560 		if (v->vcap.cfg_type == cfg_type)
    561 			break;
    562 		offset = v->vcap.cap_next;
    563 	} while (offset != 0);
    564 
    565 	if (offset == 0)
    566 		return ENOENT;
    567 
    568 	if (v->vcap.cap_len > sizeof(struct virtio_pci_cap)) {
    569 		len = roundup(v->vcap.cap_len, sizeof(pcireg_t));
    570 		if (len > buflen) {
    571 			aprint_error_dev(self, "%s cap too large\n", __func__);
    572 			return ERANGE;
    573 		}
    574 		for (i = 4; i < len / sizeof(pcireg_t);  i++)
    575 			v->reg[i] =
    576 			    le32toh(pci_conf_read(pc, tag, offset + i * 4));
    577 	}
    578 
    579 	/* endian fixup */
    580 	v->vcap.offset = le32toh(v->vcap.offset);
    581 	v->vcap.length = le32toh(v->vcap.length);
    582 	return 0;
    583 }
    584 
    585 /* -------------------------------------
    586  * Version 0.9 support
    587  * -------------------------------------*/
    588 
    589 static void
    590 virtio_pci_kick_09(struct virtio_softc *sc, uint16_t idx)
    591 {
    592 	struct virtio_pci_softc * const psc = container_of(sc,
    593 	    struct virtio_pci_softc, sc_sc);
    594 
    595 	bus_space_write_2(psc->sc_notify_iot, psc->sc_notify_ioh, 0, idx);
    596 }
    597 
    598 /* only applicable for v 0.9 but also called for 1.0 */
    599 static int
    600 virtio_pci_adjust_config_region(struct virtio_pci_softc *psc)
    601 {
    602 	struct virtio_softc * const sc = &psc->sc_sc;
    603 	device_t self = sc->sc_dev;
    604 
    605 	if (psc->sc_sc.sc_version_1)
    606 		return 0;
    607 
    608 	sc->sc_devcfg_iosize = psc->sc_iosize - psc->sc_devcfg_offset;
    609 	sc->sc_devcfg_iot = psc->sc_iot;
    610 	if (bus_space_subregion(psc->sc_iot, psc->sc_ioh,
    611 		psc->sc_devcfg_offset, sc->sc_devcfg_iosize,
    612 		&sc->sc_devcfg_ioh)) {
    613 		aprint_error_dev(self, "can't map config i/o space\n");
    614 		return EIO;
    615 	}
    616 
    617 	return 0;
    618 }
    619 
    620 static uint16_t
    621 virtio_pci_read_queue_size_09(struct virtio_softc *sc, uint16_t idx)
    622 {
    623 	struct virtio_pci_softc * const psc = container_of(sc,
    624 	    struct virtio_pci_softc, sc_sc);
    625 
    626 	bus_space_write_2(psc->sc_iot, psc->sc_ioh,
    627 	    VIRTIO_CONFIG_QUEUE_SELECT, idx);
    628 	return bus_space_read_2(psc->sc_iot, psc->sc_ioh,
    629 	    VIRTIO_CONFIG_QUEUE_SIZE);
    630 }
    631 
    632 static void
    633 virtio_pci_setup_queue_09(struct virtio_softc *sc, uint16_t idx, uint64_t addr)
    634 {
    635 	struct virtio_pci_softc * const psc = container_of(sc,
    636 	    struct virtio_pci_softc, sc_sc);
    637 
    638 	bus_space_write_2(psc->sc_iot, psc->sc_ioh,
    639 	    VIRTIO_CONFIG_QUEUE_SELECT, idx);
    640 	bus_space_write_4(psc->sc_iot, psc->sc_ioh,
    641 	    VIRTIO_CONFIG_QUEUE_ADDRESS, addr / VIRTIO_PAGE_SIZE);
    642 
    643 	if (psc->sc_ihs_num > 1) {
    644 		int vec = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
    645 		if (psc->sc_intr_pervq)
    646 			vec += idx;
    647 		bus_space_write_2(psc->sc_iot, psc->sc_ioh,
    648 		    VIRTIO_CONFIG_MSI_QUEUE_VECTOR, vec);
    649 	}
    650 }
    651 
    652 static void
    653 virtio_pci_set_status_09(struct virtio_softc *sc, int status)
    654 {
    655 	struct virtio_pci_softc * const psc = container_of(sc,
    656 	    struct virtio_pci_softc, sc_sc);
    657 	int old = 0;
    658 
    659 	if (status != 0) {
    660 		old = bus_space_read_1(psc->sc_iot, psc->sc_ioh,
    661 		    VIRTIO_CONFIG_DEVICE_STATUS);
    662 	}
    663 	bus_space_write_1(psc->sc_iot, psc->sc_ioh,
    664 	    VIRTIO_CONFIG_DEVICE_STATUS, status|old);
    665 }
    666 
    667 static void
    668 virtio_pci_negotiate_features_09(struct virtio_softc *sc,
    669     uint64_t guest_features)
    670 {
    671 	struct virtio_pci_softc * const psc = container_of(sc,
    672 	    struct virtio_pci_softc, sc_sc);
    673 	uint32_t r;
    674 
    675 	r = bus_space_read_4(psc->sc_iot, psc->sc_ioh,
    676 	    VIRTIO_CONFIG_DEVICE_FEATURES);
    677 
    678 	r &= guest_features;
    679 
    680 	bus_space_write_4(psc->sc_iot, psc->sc_ioh,
    681 	    VIRTIO_CONFIG_GUEST_FEATURES, r);
    682 
    683 	sc->sc_active_features = r;
    684 }
    685 
    686 /* -------------------------------------
    687  * Version 1.0 support
    688  * -------------------------------------*/
    689 
    690 static void
    691 virtio_pci_kick_10(struct virtio_softc *sc, uint16_t idx)
    692 {
    693 	struct virtio_pci_softc * const psc = container_of(sc,
    694 	    struct virtio_pci_softc, sc_sc);
    695 	unsigned offset = sc->sc_vqs[idx].vq_notify_off *
    696 	    psc->sc_notify_off_multiplier;
    697 
    698 	bus_space_write_2(psc->sc_notify_iot, psc->sc_notify_ioh, offset, idx);
    699 }
    700 
    701 static uint16_t
    702 virtio_pci_read_queue_size_10(struct virtio_softc *sc, uint16_t idx)
    703 {
    704 	struct virtio_pci_softc * const psc = container_of(sc,
    705 	    struct virtio_pci_softc, sc_sc);
    706 	bus_space_tag_t iot = psc->sc_iot;
    707 	bus_space_handle_t ioh = psc->sc_ioh;
    708 
    709 	bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SELECT, idx);
    710 	return bus_space_read_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SIZE);
    711 }
    712 
    713 /*
    714  * By definition little endian only in v1.0.  NB: "MAY" in the text
    715  * below refers to "independently" (i.e. the order of accesses) not
    716  * "32-bit" (which is restricted by the earlier "MUST").
    717  *
    718  * 4.1.3.1 Driver Requirements: PCI Device Layout
    719  *
    720  * For device configuration access, the driver MUST use ... 32-bit
    721  * wide and aligned accesses for ... 64-bit wide fields.  For 64-bit
    722  * fields, the driver MAY access each of the high and low 32-bit parts
    723  * of the field independently.
    724  */
    725 static __inline void
    726 virtio_pci_bus_space_write_8(bus_space_tag_t iot, bus_space_handle_t ioh,
    727     bus_size_t offset, uint64_t value)
    728 {
    729 #if _QUAD_HIGHWORD
    730 	bus_space_write_4(iot, ioh, offset, BUS_ADDR_LO32(value));
    731 	bus_space_write_4(iot, ioh, offset + 4, BUS_ADDR_HI32(value));
    732 #else
    733 	bus_space_write_4(iot, ioh, offset, BUS_ADDR_HI32(value));
    734 	bus_space_write_4(iot, ioh, offset + 4, BUS_ADDR_LO32(value));
    735 #endif
    736 }
    737 
    738 static void
    739 virtio_pci_setup_queue_10(struct virtio_softc *sc, uint16_t idx, uint64_t addr)
    740 {
    741 	struct virtio_pci_softc * const psc = container_of(sc,
    742 	    struct virtio_pci_softc, sc_sc);
    743 	struct virtqueue *vq = &sc->sc_vqs[idx];
    744 	bus_space_tag_t iot = psc->sc_iot;
    745 	bus_space_handle_t ioh = psc->sc_ioh;
    746 	KASSERT(vq->vq_index == idx);
    747 
    748 	bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SELECT, vq->vq_index);
    749 	if (addr == 0) {
    750 		bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_ENABLE, 0);
    751 		virtio_pci_bus_space_write_8(iot, ioh,
    752 		    VIRTIO_CONFIG1_QUEUE_DESC, 0);
    753 		virtio_pci_bus_space_write_8(iot, ioh,
    754 		    VIRTIO_CONFIG1_QUEUE_AVAIL, 0);
    755 		virtio_pci_bus_space_write_8(iot, ioh,
    756 		    VIRTIO_CONFIG1_QUEUE_USED, 0);
    757 	} else {
    758 		virtio_pci_bus_space_write_8(iot, ioh,
    759 		    VIRTIO_CONFIG1_QUEUE_DESC, addr);
    760 		virtio_pci_bus_space_write_8(iot, ioh,
    761 		    VIRTIO_CONFIG1_QUEUE_AVAIL, addr + vq->vq_availoffset);
    762 		virtio_pci_bus_space_write_8(iot, ioh,
    763 		    VIRTIO_CONFIG1_QUEUE_USED, addr + vq->vq_usedoffset);
    764 		bus_space_write_2(iot, ioh,
    765 		    VIRTIO_CONFIG1_QUEUE_ENABLE, 1);
    766 		vq->vq_notify_off = bus_space_read_2(iot, ioh,
    767 		    VIRTIO_CONFIG1_QUEUE_NOTIFY_OFF);
    768 	}
    769 
    770 	if (psc->sc_ihs_num > 1) {
    771 		int vec = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
    772 		if (psc->sc_intr_pervq)
    773 			vec += idx;
    774 		bus_space_write_2(iot, ioh,
    775 		    VIRTIO_CONFIG1_QUEUE_MSIX_VECTOR, vec);
    776 	}
    777 }
    778 
    779 static void
    780 virtio_pci_set_status_10(struct virtio_softc *sc, int status)
    781 {
    782 	struct virtio_pci_softc * const psc = container_of(sc,
    783 	    struct virtio_pci_softc, sc_sc);
    784 	bus_space_tag_t iot = psc->sc_iot;
    785 	bus_space_handle_t ioh = psc->sc_ioh;
    786 	int old = 0;
    787 
    788 	if (status)
    789 		old = bus_space_read_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS);
    790 	bus_space_write_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS,
    791 	    status | old);
    792 }
    793 
    794 void
    795 virtio_pci_negotiate_features_10(struct virtio_softc *sc,
    796     uint64_t guest_features)
    797 {
    798 	struct virtio_pci_softc * const psc = container_of(sc,
    799 	    struct virtio_pci_softc, sc_sc);
    800 	device_t self = sc->sc_dev;
    801 	bus_space_tag_t iot = psc->sc_iot;
    802 	bus_space_handle_t ioh = psc->sc_ioh;
    803 	uint64_t host, negotiated, device_status;
    804 
    805 	guest_features |= VIRTIO_F_VERSION_1;
    806 	/* notify on empty is 0.9 only */
    807 	guest_features &= ~VIRTIO_F_NOTIFY_ON_EMPTY;
    808 	sc->sc_active_features = 0;
    809 
    810 	bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE_SELECT, 0);
    811 	host = bus_space_read_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE);
    812 	bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE_SELECT, 1);
    813 	host |= (uint64_t)bus_space_read_4(iot, ioh,
    814 	    VIRTIO_CONFIG1_DEVICE_FEATURE) << 32;
    815 
    816 	negotiated = host & guest_features;
    817 
    818 	bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE_SELECT, 0);
    819 	bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE,
    820 	    negotiated & 0xffffffff);
    821 	bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE_SELECT, 1);
    822 	bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE,
    823 	    negotiated >> 32);
    824 	virtio_pci_set_status_10(sc, VIRTIO_CONFIG_DEVICE_STATUS_FEATURES_OK);
    825 
    826 	device_status = bus_space_read_1(iot, ioh,
    827 	    VIRTIO_CONFIG1_DEVICE_STATUS);
    828 	if ((device_status & VIRTIO_CONFIG_DEVICE_STATUS_FEATURES_OK) == 0) {
    829 		aprint_error_dev(self, "feature negotiation failed\n");
    830 		bus_space_write_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS,
    831 		    VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
    832 		return;
    833 	}
    834 
    835 	if ((negotiated & VIRTIO_F_VERSION_1) == 0) {
    836 		aprint_error_dev(self, "host rejected version 1\n");
    837 		bus_space_write_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS,
    838 		    VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
    839 		return;
    840 	}
    841 
    842 	sc->sc_active_features = negotiated;
    843 	return;
    844 }
    845 
    846 /* -------------------------------------
    847  * Generic PCI interrupt code
    848  * -------------------------------------*/
    849 
    850 static int
    851 virtio_pci_setup_interrupts_10(struct virtio_softc *sc, int reinit)
    852 {
    853 	struct virtio_pci_softc * const psc = container_of(sc,
    854 	    struct virtio_pci_softc, sc_sc);
    855 	bus_space_tag_t iot = psc->sc_iot;
    856 	bus_space_handle_t ioh = psc->sc_ioh;
    857 	int vector, ret, qid;
    858 
    859 	if (!virtio_pci_msix_enabled(psc))
    860 		return 0;
    861 
    862 	vector = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
    863 	bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_CONFIG_MSIX_VECTOR, vector);
    864 	ret = bus_space_read_2(iot, ioh, VIRTIO_CONFIG1_CONFIG_MSIX_VECTOR);
    865 	if (ret != vector) {
    866 		VIRTIO_PCI_LOG(sc, reinit, "can't set config msix vector\n");
    867 		return -1;
    868 	}
    869 
    870 	for (qid = 0; qid < sc->sc_nvqs; qid++) {
    871 		vector = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
    872 
    873 		if (psc->sc_intr_pervq)
    874 			vector += qid;
    875 		bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SELECT, qid);
    876 		bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_MSIX_VECTOR,
    877 		    vector);
    878 		ret = bus_space_read_2(iot, ioh,
    879 		    VIRTIO_CONFIG1_QUEUE_MSIX_VECTOR);
    880 		if (ret != vector) {
    881 			VIRTIO_PCI_LOG(sc, reinit, "can't set queue %d "
    882 			    "msix vector\n", qid);
    883 			return -1;
    884 		}
    885 	}
    886 
    887 	return 0;
    888 }
    889 
    890 static int
    891 virtio_pci_setup_interrupts_09(struct virtio_softc *sc, int reinit)
    892 {
    893 	struct virtio_pci_softc * const psc = container_of(sc,
    894 	    struct virtio_pci_softc, sc_sc);
    895 	int offset, vector, ret, qid;
    896 
    897 	if (!virtio_pci_msix_enabled(psc))
    898 		return 0;
    899 
    900 	offset = VIRTIO_CONFIG_MSI_CONFIG_VECTOR;
    901 	vector = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
    902 
    903 	bus_space_write_2(psc->sc_iot, psc->sc_ioh, offset, vector);
    904 	ret = bus_space_read_2(psc->sc_iot, psc->sc_ioh, offset);
    905 	if (ret != vector) {
    906 		aprint_debug_dev(sc->sc_dev, "%s: expected=%d, actual=%d\n",
    907 		    __func__, vector, ret);
    908 		VIRTIO_PCI_LOG(sc, reinit,
    909 		    "can't set config msix vector\n");
    910 		return -1;
    911 	}
    912 
    913 	for (qid = 0; qid < sc->sc_nvqs; qid++) {
    914 		offset = VIRTIO_CONFIG_QUEUE_SELECT;
    915 		bus_space_write_2(psc->sc_iot, psc->sc_ioh, offset, qid);
    916 
    917 		offset = VIRTIO_CONFIG_MSI_QUEUE_VECTOR;
    918 		vector = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
    919 
    920 		if (psc->sc_intr_pervq)
    921 			vector += qid;
    922 
    923 		bus_space_write_2(psc->sc_iot, psc->sc_ioh, offset, vector);
    924 		ret = bus_space_read_2(psc->sc_iot, psc->sc_ioh, offset);
    925 		if (ret != vector) {
    926 			aprint_debug_dev(sc->sc_dev, "%s[qid=%d]:"
    927 			    " expected=%d, actual=%d\n",
    928 			    __func__, qid, vector, ret);
    929 			VIRTIO_PCI_LOG(sc, reinit, "can't set queue %d "
    930 			    "msix vector\n", qid);
    931 			return -1;
    932 		}
    933 	}
    934 
    935 	return 0;
    936 }
    937 
    938 static int
    939 virtio_pci_establish_msix_interrupts(struct virtio_softc *sc,
    940     const struct pci_attach_args *pa)
    941 {
    942 	struct virtio_pci_softc * const psc = container_of(sc,
    943 	    struct virtio_pci_softc, sc_sc);
    944 	device_t self = sc->sc_dev;
    945 	pci_chipset_tag_t pc = pa->pa_pc;
    946 	struct virtqueue *vq;
    947 	char intrbuf[PCI_INTRSTR_LEN];
    948 	char intr_xname[INTRDEVNAMEBUF];
    949 	char const *intrstr;
    950 	int idx, qid, n;
    951 
    952 	idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
    953 	if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE)
    954 		pci_intr_setattr(pc, &psc->sc_ihp[idx], PCI_INTR_MPSAFE, true);
    955 
    956 	snprintf(intr_xname, sizeof(intr_xname), "%s config",
    957 	    device_xname(sc->sc_dev));
    958 
    959 	psc->sc_ihs[idx] = pci_intr_establish_xname(pc, psc->sc_ihp[idx],
    960 	    sc->sc_ipl, virtio_pci_msix_config_intr, sc, intr_xname);
    961 	if (psc->sc_ihs[idx] == NULL) {
    962 		aprint_error_dev(self,
    963 		    "couldn't establish MSI-X for config\n");
    964 		goto error;
    965 	}
    966 
    967 	idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
    968 	if (psc->sc_intr_pervq) {
    969 		for (qid = 0; qid < sc->sc_nvqs; qid++) {
    970 			n = idx + qid;
    971 			vq = &sc->sc_vqs[qid];
    972 
    973 			snprintf(intr_xname, sizeof(intr_xname), "%s vq#%d",
    974 			    device_xname(sc->sc_dev), qid);
    975 
    976 			if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE) {
    977 				pci_intr_setattr(pc, &psc->sc_ihp[n],
    978 				    PCI_INTR_MPSAFE, true);
    979 			}
    980 
    981 			psc->sc_ihs[n] = pci_intr_establish_xname(pc,
    982 			    psc->sc_ihp[n], sc->sc_ipl,
    983 			    vq->vq_intrhand, vq->vq_intrhand_arg, intr_xname);
    984 			if (psc->sc_ihs[n] == NULL) {
    985 				aprint_error_dev(self,
    986 				    "couldn't establish MSI-X for a vq\n");
    987 				goto error;
    988 			}
    989 		}
    990 	} else {
    991 		if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE) {
    992 			pci_intr_setattr(pc, &psc->sc_ihp[idx],
    993 			    PCI_INTR_MPSAFE, true);
    994 		}
    995 
    996 		snprintf(intr_xname, sizeof(intr_xname), "%s queues",
    997 		    device_xname(sc->sc_dev));
    998 		psc->sc_ihs[idx] = pci_intr_establish_xname(pc,
    999 		    psc->sc_ihp[idx], sc->sc_ipl,
   1000 		    virtio_pci_msix_queue_intr, sc, intr_xname);
   1001 		if (psc->sc_ihs[idx] == NULL) {
   1002 			aprint_error_dev(self,
   1003 			    "couldn't establish MSI-X for queues\n");
   1004 			goto error;
   1005 		}
   1006 	}
   1007 
   1008 	idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
   1009 	intrstr = pci_intr_string(pc, psc->sc_ihp[idx], intrbuf,
   1010 	    sizeof(intrbuf));
   1011 	aprint_normal_dev(self, "config interrupting at %s\n", intrstr);
   1012 	idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
   1013 	if (psc->sc_intr_pervq) {
   1014 		kcpuset_t *affinity;
   1015 		int affinity_to, r;
   1016 
   1017 		kcpuset_create(&affinity, false);
   1018 
   1019 		for (qid = 0; qid < sc->sc_nvqs; qid++) {
   1020 			n = idx + qid;
   1021 			affinity_to = (qid / 2) % ncpu;
   1022 
   1023 			intrstr = pci_intr_string(pc, psc->sc_ihp[n],
   1024 			    intrbuf, sizeof(intrbuf));
   1025 
   1026 			kcpuset_zero(affinity);
   1027 			kcpuset_set(affinity, affinity_to);
   1028 			r = interrupt_distribute(psc->sc_ihs[n], affinity,
   1029 			    NULL);
   1030 			if (r == 0) {
   1031 				aprint_normal_dev(self,
   1032 				    "for vq #%d interrupting at %s"
   1033 				    " affinity to %u\n",
   1034 				    qid, intrstr, affinity_to);
   1035 			} else {
   1036 				aprint_normal_dev(self,
   1037 				    "for vq #%d interrupting at %s\n",
   1038 				    qid, intrstr);
   1039 			}
   1040 		}
   1041 
   1042 		kcpuset_destroy(affinity);
   1043 	} else {
   1044 		intrstr = pci_intr_string(pc, psc->sc_ihp[idx], intrbuf,
   1045 		    sizeof(intrbuf));
   1046 		aprint_normal_dev(self, "queues interrupting at %s\n",
   1047 		    intrstr);
   1048 	}
   1049 
   1050 	return 0;
   1051 
   1052 error:
   1053 	idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
   1054 	if (psc->sc_ihs[idx] != NULL)
   1055 		pci_intr_disestablish(psc->sc_pa.pa_pc, psc->sc_ihs[idx]);
   1056 	idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
   1057 	if (psc->sc_intr_pervq) {
   1058 		for (qid = 0; qid < sc->sc_nvqs; qid++) {
   1059 			n = idx + qid;
   1060 			if (psc->sc_ihs[n] == NULL)
   1061 				continue;
   1062 			pci_intr_disestablish(psc->sc_pa.pa_pc,
   1063 			    psc->sc_ihs[n]);
   1064 		}
   1065 
   1066 	} else {
   1067 		if (psc->sc_ihs[idx] != NULL) {
   1068 			pci_intr_disestablish(psc->sc_pa.pa_pc,
   1069 			    psc->sc_ihs[idx]);
   1070 		}
   1071 	}
   1072 
   1073 	return -1;
   1074 }
   1075 
   1076 static int
   1077 virtio_pci_establish_intx_interrupt(struct virtio_softc *sc,
   1078     const struct pci_attach_args *pa)
   1079 {
   1080 	struct virtio_pci_softc * const psc = container_of(sc,
   1081 	    struct virtio_pci_softc, sc_sc);
   1082 	device_t self = sc->sc_dev;
   1083 	pci_chipset_tag_t pc = pa->pa_pc;
   1084 	char intrbuf[PCI_INTRSTR_LEN];
   1085 	char const *intrstr;
   1086 
   1087 	if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE)
   1088 		pci_intr_setattr(pc, &psc->sc_ihp[0], PCI_INTR_MPSAFE, true);
   1089 
   1090 	psc->sc_ihs[0] = pci_intr_establish_xname(pc, psc->sc_ihp[0],
   1091 	    sc->sc_ipl, virtio_pci_intr, sc, device_xname(sc->sc_dev));
   1092 	if (psc->sc_ihs[0] == NULL) {
   1093 		aprint_error_dev(self, "couldn't establish INTx\n");
   1094 		return -1;
   1095 	}
   1096 
   1097 	intrstr = pci_intr_string(pc, psc->sc_ihp[0], intrbuf,
   1098 	    sizeof(intrbuf));
   1099 	aprint_normal_dev(self, "interrupting at %s\n", intrstr);
   1100 
   1101 	return 0;
   1102 }
   1103 
   1104 static int
   1105 virtio_pci_alloc_interrupts(struct virtio_softc *sc)
   1106 {
   1107 	struct virtio_pci_softc * const psc = container_of(sc,
   1108 	    struct virtio_pci_softc, sc_sc);
   1109 	device_t self = sc->sc_dev;
   1110 	pci_chipset_tag_t pc = psc->sc_pa.pa_pc;
   1111 	pcitag_t tag = psc->sc_pa.pa_tag;
   1112 	int error;
   1113 	int nmsix;
   1114 	int off;
   1115 	int counts[PCI_INTR_TYPE_SIZE];
   1116 	pci_intr_type_t max_type;
   1117 	pcireg_t ctl;
   1118 
   1119 	nmsix = pci_msix_count(psc->sc_pa.pa_pc, psc->sc_pa.pa_tag);
   1120 	aprint_debug_dev(self, "pci_msix_count=%d\n", nmsix);
   1121 
   1122 	/* We need at least two: one for config and the other for queues */
   1123 	if ((sc->sc_flags & VIRTIO_F_INTR_MSIX) == 0 || nmsix < 2) {
   1124 		/* Try INTx only */
   1125 		max_type = PCI_INTR_TYPE_INTX;
   1126 		counts[PCI_INTR_TYPE_INTX] = 1;
   1127 	} else {
   1128 		/* Try MSI-X first and INTx second */
   1129 		if (ISSET(sc->sc_flags, VIRTIO_F_INTR_PERVQ) &&
   1130 		    sc->sc_nvqs + VIRTIO_MSIX_QUEUE_VECTOR_INDEX <= nmsix) {
   1131 			nmsix = sc->sc_nvqs + VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
   1132 		} else {
   1133 			nmsix = 2;
   1134 		}
   1135 
   1136 		max_type = PCI_INTR_TYPE_MSIX;
   1137 		counts[PCI_INTR_TYPE_MSIX] = nmsix;
   1138 		counts[PCI_INTR_TYPE_MSI] = 0;
   1139 		counts[PCI_INTR_TYPE_INTX] = 1;
   1140 	}
   1141 
   1142 retry:
   1143 	error = pci_intr_alloc(&psc->sc_pa, &psc->sc_ihp, counts, max_type);
   1144 	if (error != 0) {
   1145 		aprint_error_dev(self, "couldn't map interrupt\n");
   1146 		return -1;
   1147 	}
   1148 
   1149 	if (pci_intr_type(pc, psc->sc_ihp[0]) == PCI_INTR_TYPE_MSIX) {
   1150 		psc->sc_intr_pervq = nmsix > 2 ? true : false;
   1151 		psc->sc_ihs = kmem_zalloc(sizeof(*psc->sc_ihs) * nmsix,
   1152 		    KM_SLEEP);
   1153 
   1154 		error = virtio_pci_establish_msix_interrupts(sc, &psc->sc_pa);
   1155 		if (error != 0) {
   1156 			kmem_free(psc->sc_ihs, sizeof(*psc->sc_ihs) * nmsix);
   1157 			pci_intr_release(pc, psc->sc_ihp, nmsix);
   1158 
   1159 			/* Retry INTx */
   1160 			max_type = PCI_INTR_TYPE_INTX;
   1161 			counts[PCI_INTR_TYPE_INTX] = 1;
   1162 			goto retry;
   1163 		}
   1164 
   1165 		psc->sc_ihs_num = nmsix;
   1166 		psc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_MSI;
   1167 		virtio_pci_adjust_config_region(psc);
   1168 	} else if (pci_intr_type(pc, psc->sc_ihp[0]) == PCI_INTR_TYPE_INTX) {
   1169 		psc->sc_intr_pervq = false;
   1170 		psc->sc_ihs = kmem_zalloc(sizeof(*psc->sc_ihs) * 1,
   1171 		    KM_SLEEP);
   1172 
   1173 		error = virtio_pci_establish_intx_interrupt(sc, &psc->sc_pa);
   1174 		if (error != 0) {
   1175 			kmem_free(psc->sc_ihs, sizeof(*psc->sc_ihs) * 1);
   1176 			pci_intr_release(pc, psc->sc_ihp, 1);
   1177 			return -1;
   1178 		}
   1179 
   1180 		psc->sc_ihs_num = 1;
   1181 		psc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
   1182 		virtio_pci_adjust_config_region(psc);
   1183 
   1184 		error = pci_get_capability(pc, tag, PCI_CAP_MSIX, &off, NULL);
   1185 		if (error != 0) {
   1186 			ctl = pci_conf_read(pc, tag, off + PCI_MSIX_CTL);
   1187 			ctl &= ~PCI_MSIX_CTL_ENABLE;
   1188 			pci_conf_write(pc, tag, off + PCI_MSIX_CTL, ctl);
   1189 		}
   1190 	}
   1191 
   1192 	if (!psc->sc_intr_pervq)
   1193 		CLR(sc->sc_flags, VIRTIO_F_INTR_PERVQ);
   1194 	return 0;
   1195 }
   1196 
   1197 static void
   1198 virtio_pci_free_interrupts(struct virtio_softc *sc)
   1199 {
   1200 	struct virtio_pci_softc * const psc = container_of(sc,
   1201 	    struct virtio_pci_softc, sc_sc);
   1202 
   1203 	for (int i = 0; i < psc->sc_ihs_num; i++) {
   1204 		if (psc->sc_ihs[i] == NULL)
   1205 			continue;
   1206 		pci_intr_disestablish(psc->sc_pa.pa_pc, psc->sc_ihs[i]);
   1207 		psc->sc_ihs[i] = NULL;
   1208 	}
   1209 
   1210 	if (psc->sc_ihs_num > 0) {
   1211 		pci_intr_release(psc->sc_pa.pa_pc, psc->sc_ihp,
   1212 		    psc->sc_ihs_num);
   1213 	}
   1214 
   1215 	if (psc->sc_ihs != NULL) {
   1216 		kmem_free(psc->sc_ihs, sizeof(*psc->sc_ihs) * psc->sc_ihs_num);
   1217 		psc->sc_ihs = NULL;
   1218 	}
   1219 	psc->sc_ihs_num = 0;
   1220 }
   1221 
   1222 static bool
   1223 virtio_pci_msix_enabled(struct virtio_pci_softc *psc)
   1224 {
   1225 	pci_chipset_tag_t pc = psc->sc_pa.pa_pc;
   1226 
   1227 	if (pci_intr_type(pc, psc->sc_ihp[0]) == PCI_INTR_TYPE_MSIX)
   1228 		return true;
   1229 
   1230 	return false;
   1231 }
   1232 
   1233 /*
   1234  * Interrupt handler.
   1235  */
   1236 static int
   1237 virtio_pci_intr(void *arg)
   1238 {
   1239 	struct virtio_softc *sc = arg;
   1240 	struct virtio_pci_softc * const psc = container_of(sc,
   1241 	    struct virtio_pci_softc, sc_sc);
   1242 	int isr, r = 0;
   1243 
   1244 	/* check and ack the interrupt */
   1245 	isr = bus_space_read_1(psc->sc_isr_iot, psc->sc_isr_ioh, 0);
   1246 	if (isr == 0)
   1247 		return 0;
   1248 	if ((isr & VIRTIO_CONFIG_ISR_CONFIG_CHANGE) &&
   1249 	    (sc->sc_config_change != NULL))
   1250 		r = (sc->sc_config_change)(sc);
   1251 	if (sc->sc_intrhand != NULL) {
   1252 		if (sc->sc_soft_ih != NULL)
   1253 			softint_schedule(sc->sc_soft_ih);
   1254 		else
   1255 			r |= (sc->sc_intrhand)(sc);
   1256 	}
   1257 
   1258 	return r;
   1259 }
   1260 
   1261 static int
   1262 virtio_pci_msix_queue_intr(void *arg)
   1263 {
   1264 	struct virtio_softc *sc = arg;
   1265 	int r = 0;
   1266 
   1267 	if (sc->sc_intrhand != NULL) {
   1268 		if (sc->sc_soft_ih != NULL)
   1269 			softint_schedule(sc->sc_soft_ih);
   1270 		else
   1271 			r |= (sc->sc_intrhand)(sc);
   1272 	}
   1273 
   1274 	return r;
   1275 }
   1276 
   1277 static int
   1278 virtio_pci_msix_config_intr(void *arg)
   1279 {
   1280 	struct virtio_softc *sc = arg;
   1281 	int r = 0;
   1282 
   1283 	if (sc->sc_config_change != NULL)
   1284 		r = (sc->sc_config_change)(sc);
   1285 	return r;
   1286 }
   1287 
   1288 MODULE(MODULE_CLASS_DRIVER, virtio_pci, "pci,virtio");
   1289 
   1290 #ifdef _MODULE
   1291 #include "ioconf.c"
   1292 #endif
   1293 
   1294 static int
   1295 virtio_pci_modcmd(modcmd_t cmd, void *opaque)
   1296 {
   1297 	int error = 0;
   1298 
   1299 #ifdef _MODULE
   1300 	switch (cmd) {
   1301 	case MODULE_CMD_INIT:
   1302 		error = config_init_component(cfdriver_ioconf_virtio_pci,
   1303 		    cfattach_ioconf_virtio_pci, cfdata_ioconf_virtio_pci);
   1304 		break;
   1305 	case MODULE_CMD_FINI:
   1306 		error = config_fini_component(cfdriver_ioconf_virtio_pci,
   1307 		    cfattach_ioconf_virtio_pci, cfdata_ioconf_virtio_pci);
   1308 		break;
   1309 	default:
   1310 		error = ENOTTY;
   1311 		break;
   1312 	}
   1313 #endif
   1314 
   1315 	return error;
   1316 }
   1317