Home | History | Annotate | Line # | Download | only in pci
virtio_pci.c revision 1.17
      1 /* $NetBSD: virtio_pci.c,v 1.17 2021/01/21 08:17:13 martin Exp $ */
      2 
      3 /*
      4  * Copyright (c) 2020 The NetBSD Foundation, Inc.
      5  * Copyright (c) 2012 Stefan Fritsch.
      6  * Copyright (c) 2010 Minoura Makoto.
      7  * All rights reserved.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  *
     18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     28  */
     29 
     30 #include <sys/cdefs.h>
     31 __KERNEL_RCSID(0, "$NetBSD: virtio_pci.c,v 1.17 2021/01/21 08:17:13 martin Exp $");
     32 
     33 #include <sys/param.h>
     34 #include <sys/systm.h>
     35 #include <sys/kmem.h>
     36 #include <sys/module.h>
     37 #include <sys/interrupt.h>
     38 
     39 #include <sys/device.h>
     40 
     41 #include <dev/pci/pcidevs.h>
     42 #include <dev/pci/pcireg.h>
     43 #include <dev/pci/pcivar.h>
     44 
     45 #include <dev/pci/virtioreg.h> /* XXX: move to non-pci */
     46 #include <dev/pci/virtio_pcireg.h>
     47 
     48 #define VIRTIO_PRIVATE
     49 #include <dev/pci/virtiovar.h> /* XXX: move to non-pci */
     50 
     51 
     52 static int	virtio_pci_match(device_t, cfdata_t, void *);
     53 static void	virtio_pci_attach(device_t, device_t, void *);
     54 static int	virtio_pci_rescan(device_t, const char *, const int *);
     55 static int	virtio_pci_detach(device_t, int);
     56 
     57 struct virtio_pci_softc {
     58 	struct virtio_softc	sc_sc;
     59 
     60 	/* IO space */
     61 	bus_space_tag_t		sc_iot;
     62 	bus_space_handle_t	sc_ioh;
     63 	bus_size_t		sc_iosize;
     64 	bus_size_t		sc_mapped_iosize;
     65 
     66 	/* BARs */
     67 	bus_space_tag_t		sc_bars_iot[4];
     68 	bus_space_handle_t	sc_bars_ioh[4];
     69 	bus_size_t		sc_bars_iosize[4];
     70 
     71 	/* notify space */
     72 	bus_space_tag_t		sc_notify_iot;
     73 	bus_space_handle_t	sc_notify_ioh;
     74 	bus_size_t		sc_notify_iosize;
     75 	uint32_t		sc_notify_off_multiplier;
     76 
     77 	/* isr space */
     78 	bus_space_tag_t		sc_isr_iot;
     79 	bus_space_handle_t	sc_isr_ioh;
     80 	bus_size_t		sc_isr_iosize;
     81 
     82 	/* generic */
     83 	struct pci_attach_args	sc_pa;
     84 	pci_intr_handle_t	*sc_ihp;
     85 	void			**sc_ihs;
     86 	int			sc_ihs_num;
     87 	int			sc_devcfg_offset;	/* for 0.9 */
     88 };
     89 
     90 static int	virtio_pci_attach_09(device_t, void *);
     91 static void	virtio_pci_kick_09(struct virtio_softc *, uint16_t);
     92 static uint16_t	virtio_pci_read_queue_size_09(struct virtio_softc *, uint16_t);
     93 static void	virtio_pci_setup_queue_09(struct virtio_softc *, uint16_t, uint64_t);
     94 static void	virtio_pci_set_status_09(struct virtio_softc *, int);
     95 static void	virtio_pci_negotiate_features_09(struct virtio_softc *, uint64_t);
     96 
     97 static int	virtio_pci_attach_10(device_t, void *);
     98 static void	virtio_pci_kick_10(struct virtio_softc *, uint16_t);
     99 static uint16_t	virtio_pci_read_queue_size_10(struct virtio_softc *, uint16_t);
    100 static void	virtio_pci_setup_queue_10(struct virtio_softc *, uint16_t, uint64_t);
    101 static void	virtio_pci_set_status_10(struct virtio_softc *, int);
    102 static void	virtio_pci_negotiate_features_10(struct virtio_softc *, uint64_t);
    103 static int	virtio_pci_find_cap(struct virtio_pci_softc *psc, int cfg_type, void *buf, int buflen);
    104 
    105 static uint8_t	virtio_pci_read_device_config_1(struct virtio_softc *, int);
    106 static uint16_t	virtio_pci_read_device_config_2(struct virtio_softc *, int);
    107 static uint32_t	virtio_pci_read_device_config_4(struct virtio_softc *, int);
    108 static uint64_t	virtio_pci_read_device_config_8(struct virtio_softc *, int);
    109 static void 	virtio_pci_write_device_config_1(struct virtio_softc *, int, uint8_t);
    110 static void	virtio_pci_write_device_config_2(struct virtio_softc *, int, uint16_t);
    111 static void	virtio_pci_write_device_config_4(struct virtio_softc *, int, uint32_t);
    112 static void	virtio_pci_write_device_config_8(struct virtio_softc *, int, uint64_t);
    113 
    114 static int	virtio_pci_setup_interrupts(struct virtio_softc *);
    115 static void	virtio_pci_free_interrupts(struct virtio_softc *);
    116 static int	virtio_pci_adjust_config_region(struct virtio_pci_softc *psc);
    117 static int	virtio_pci_intr(void *arg);
    118 static int	virtio_pci_msix_queue_intr(void *);
    119 static int	virtio_pci_msix_config_intr(void *);
    120 static int	virtio_pci_setup_msix_vectors_09(struct virtio_softc *);
    121 static int	virtio_pci_setup_msix_vectors_10(struct virtio_softc *);
    122 static int	virtio_pci_setup_msix_interrupts(struct virtio_softc *,
    123 		    struct pci_attach_args *);
    124 static int	virtio_pci_setup_intx_interrupt(struct virtio_softc *,
    125 		    struct pci_attach_args *);
    126 
    127 #define VIRTIO_MSIX_CONFIG_VECTOR_INDEX	0
    128 #define VIRTIO_MSIX_QUEUE_VECTOR_INDEX	1
    129 
    130 #if 0
    131 /* we use the legacy virtio spec, so the PCI registers are host native
    132  * byte order, not PCI (i.e. LE) byte order */
    133 #if BYTE_ORDER == BIG_ENDIAN
    134 #define REG_HI_OFF      0
    135 #define REG_LO_OFF      4
    136 #ifndef __BUS_SPACE_HAS_STREAM_METHODS
    137 #define bus_space_read_stream_1 bus_space_read_1
    138 #define bus_space_write_stream_1 bus_space_write_1
    139 static inline uint16_t
    140 bus_space_read_stream_2(bus_space_tag_t t, bus_space_handle_t h,
    141     bus_size_t o)
    142 {
    143 	return le16toh(bus_space_read_2(t, h, o));
    144 }
    145 static inline void
    146 bus_space_write_stream_2(bus_space_tag_t t, bus_space_handle_t h,
    147     bus_size_t o, uint16_t v)
    148 {
    149 	bus_space_write_2(t, h, o, htole16(v));
    150 }
    151 static inline uint32_t
    152 bus_space_read_stream_4(bus_space_tag_t t, bus_space_handle_t h,
    153     bus_size_t o)
    154 {
    155 	return le32toh(bus_space_read_4(t, h, o));
    156 }
    157 static inline void
    158 bus_space_write_stream_4(bus_space_tag_t t, bus_space_handle_t h,
    159     bus_size_t o, uint32_t v)
    160 {
    161 	bus_space_write_4(t, h, o, htole32(v));
    162 }
    163 #endif
    164 #else
    165 #define REG_HI_OFF	4
    166 #define REG_LO_OFF	0
    167 #ifndef __BUS_SPACE_HAS_STREAM_METHODS
    168 #define bus_space_read_stream_1 bus_space_read_1
    169 #define bus_space_read_stream_2 bus_space_read_2
    170 #define bus_space_read_stream_4 bus_space_read_4
    171 #define bus_space_write_stream_1 bus_space_write_1
    172 #define bus_space_write_stream_2 bus_space_write_2
    173 #define bus_space_write_stream_4 bus_space_write_4
    174 #endif
    175 #endif
    176 #endif
    177 
    178 
    179 #if BYTE_ORDER == LITTLE_ENDIAN
    180 #	define VIODEVRW_SWAP_09 false
    181 #	define VIODEVRW_SWAP_10 false
    182 #else /* big endian */
    183 #	define VIODEVRW_SWAP_09 false
    184 #	define VIODEVRW_SWAP_10 true
    185 #endif
    186 
    187 
    188 CFATTACH_DECL3_NEW(virtio_pci, sizeof(struct virtio_pci_softc),
    189     virtio_pci_match, virtio_pci_attach, virtio_pci_detach, NULL,
    190     virtio_pci_rescan, NULL, DVF_DETACH_SHUTDOWN);
    191 
    192 static const struct virtio_ops virtio_pci_ops_09 = {
    193 	.kick = virtio_pci_kick_09,
    194 
    195 	.read_dev_cfg_1 = virtio_pci_read_device_config_1,
    196 	.read_dev_cfg_2 = virtio_pci_read_device_config_2,
    197 	.read_dev_cfg_4 = virtio_pci_read_device_config_4,
    198 	.read_dev_cfg_8 = virtio_pci_read_device_config_8,
    199 	.write_dev_cfg_1 = virtio_pci_write_device_config_1,
    200 	.write_dev_cfg_2 = virtio_pci_write_device_config_2,
    201 	.write_dev_cfg_4 = virtio_pci_write_device_config_4,
    202 	.write_dev_cfg_8 = virtio_pci_write_device_config_8,
    203 
    204 	.read_queue_size = virtio_pci_read_queue_size_09,
    205 	.setup_queue = virtio_pci_setup_queue_09,
    206 	.set_status = virtio_pci_set_status_09,
    207 	.neg_features = virtio_pci_negotiate_features_09,
    208 	.setup_interrupts = virtio_pci_setup_interrupts,
    209 	.free_interrupts = virtio_pci_free_interrupts,
    210 };
    211 
    212 static const struct virtio_ops virtio_pci_ops_10 = {
    213 	.kick = virtio_pci_kick_10,
    214 
    215 	.read_dev_cfg_1 = virtio_pci_read_device_config_1,
    216 	.read_dev_cfg_2 = virtio_pci_read_device_config_2,
    217 	.read_dev_cfg_4 = virtio_pci_read_device_config_4,
    218 	.read_dev_cfg_8 = virtio_pci_read_device_config_8,
    219 	.write_dev_cfg_1 = virtio_pci_write_device_config_1,
    220 	.write_dev_cfg_2 = virtio_pci_write_device_config_2,
    221 	.write_dev_cfg_4 = virtio_pci_write_device_config_4,
    222 	.write_dev_cfg_8 = virtio_pci_write_device_config_8,
    223 
    224 	.read_queue_size = virtio_pci_read_queue_size_10,
    225 	.setup_queue = virtio_pci_setup_queue_10,
    226 	.set_status = virtio_pci_set_status_10,
    227 	.neg_features = virtio_pci_negotiate_features_10,
    228 	.setup_interrupts = virtio_pci_setup_interrupts,
    229 	.free_interrupts = virtio_pci_free_interrupts,
    230 };
    231 
    232 static int
    233 virtio_pci_match(device_t parent, cfdata_t match, void *aux)
    234 {
    235 	struct pci_attach_args *pa;
    236 
    237 	pa = (struct pci_attach_args *)aux;
    238 	switch (PCI_VENDOR(pa->pa_id)) {
    239 	case PCI_VENDOR_QUMRANET:
    240 		if (((PCI_PRODUCT_QUMRANET_VIRTIO_1000 <=
    241 		      PCI_PRODUCT(pa->pa_id)) &&
    242 		     (PCI_PRODUCT(pa->pa_id) <=
    243 		      PCI_PRODUCT_QUMRANET_VIRTIO_103F)) &&
    244 	              PCI_REVISION(pa->pa_class) == 0)
    245 			return 1;
    246 		if (((PCI_PRODUCT_QUMRANET_VIRTIO_1040 <=
    247 		      PCI_PRODUCT(pa->pa_id)) &&
    248 		     (PCI_PRODUCT(pa->pa_id) <=
    249 		      PCI_PRODUCT_QUMRANET_VIRTIO_107F)) &&
    250 		      PCI_REVISION(pa->pa_class) == 1)
    251 			return 1;
    252 		break;
    253 	}
    254 
    255 	return 0;
    256 }
    257 
    258 static void
    259 virtio_pci_attach(device_t parent, device_t self, void *aux)
    260 {
    261 	struct virtio_pci_softc * const psc = device_private(self);
    262 	struct virtio_softc * const sc = &psc->sc_sc;
    263 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
    264 	pci_chipset_tag_t pc = pa->pa_pc;
    265 	pcitag_t tag = pa->pa_tag;
    266 	int revision;
    267 	int ret;
    268 	pcireg_t id;
    269 	pcireg_t csr;
    270 
    271 	revision = PCI_REVISION(pa->pa_class);
    272 	switch (revision) {
    273 	case 0:
    274 		/* subsystem ID shows what I am */
    275 		id = PCI_SUBSYS_ID(pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG));
    276 		break;
    277 	case 1:
    278 		/* pci product number shows what I am */
    279 		id = PCI_PRODUCT(pa->pa_id) - PCI_PRODUCT_QUMRANET_VIRTIO_1040;
    280 		break;
    281 	default:
    282 		aprint_normal(": unknown revision 0x%02x; giving up\n",
    283 			      revision);
    284 		return;
    285 	}
    286 
    287 	aprint_normal("\n");
    288 	aprint_naive("\n");
    289 	virtio_print_device_type(self, id, revision);
    290 
    291 	csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
    292 	csr |= PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_IO_ENABLE;
    293 	pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
    294 
    295 	sc->sc_dev = self;
    296 	psc->sc_pa = *pa;
    297 	psc->sc_iot = pa->pa_iot;
    298 
    299 	sc->sc_dmat = pa->pa_dmat;
    300 	if (pci_dma64_available(pa))
    301 		sc->sc_dmat = pa->pa_dmat64;
    302 
    303 	/* attach is dependent on revision */
    304 	ret = 0;
    305 	if (revision == 1) {
    306 		/* try to attach 1.0 */
    307 		ret = virtio_pci_attach_10(self, aux);
    308 	}
    309 	if (ret == 0 && revision == 0) {
    310 		/* revision 0 means 0.9 only or both 0.9 and 1.0 */
    311 		ret = virtio_pci_attach_09(self, aux);
    312 	}
    313 	if (ret) {
    314 		aprint_error_dev(self, "cannot attach (%d)\n", ret);
    315 		return;
    316 	}
    317 	KASSERT(sc->sc_ops);
    318 
    319 	/* preset config region */
    320 	psc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
    321 	if (virtio_pci_adjust_config_region(psc))
    322 		return;
    323 
    324 	/* generic */
    325 	virtio_device_reset(sc);
    326 	virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_ACK);
    327 	virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER);
    328 
    329 	sc->sc_childdevid = id;
    330 	sc->sc_child = NULL;
    331 	virtio_pci_rescan(self, "virtio", 0);
    332 	return;
    333 }
    334 
    335 /* ARGSUSED */
    336 static int
    337 virtio_pci_rescan(device_t self, const char *attr, const int *scan_flags)
    338 {
    339 	struct virtio_pci_softc * const psc = device_private(self);
    340 	struct virtio_softc * const sc = &psc->sc_sc;
    341 	struct virtio_attach_args va;
    342 
    343 	if (sc->sc_child)	/* Child already attached? */
    344 		return 0;
    345 
    346 	memset(&va, 0, sizeof(va));
    347 	va.sc_childdevid = sc->sc_childdevid;
    348 
    349 	config_found_ia(self, attr, &va, NULL);
    350 
    351 	if (virtio_attach_failed(sc))
    352 		return 0;
    353 
    354 	return 0;
    355 }
    356 
    357 
    358 static int
    359 virtio_pci_detach(device_t self, int flags)
    360 {
    361 	struct virtio_pci_softc * const psc = device_private(self);
    362 	struct virtio_softc * const sc = &psc->sc_sc;
    363 	int r;
    364 
    365 	if (sc->sc_child != NULL) {
    366 		r = config_detach(sc->sc_child, flags);
    367 		if (r)
    368 			return r;
    369 	}
    370 
    371 	/* Check that child detached properly */
    372 	KASSERT(sc->sc_child == NULL);
    373 	KASSERT(sc->sc_vqs == NULL);
    374 	KASSERT(psc->sc_ihs_num == 0);
    375 
    376 	if (psc->sc_iosize)
    377 		bus_space_unmap(psc->sc_iot, psc->sc_ioh,
    378 			psc->sc_mapped_iosize);
    379 	psc->sc_iosize = 0;
    380 
    381 	return 0;
    382 }
    383 
    384 
    385 static int
    386 virtio_pci_attach_09(device_t self, void *aux)
    387 	//struct virtio_pci_softc *psc, struct pci_attach_args *pa)
    388 {
    389 	struct virtio_pci_softc * const psc = device_private(self);
    390 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
    391 	struct virtio_softc * const sc = &psc->sc_sc;
    392 //	pci_chipset_tag_t pc = pa->pa_pc;
    393 //	pcitag_t tag = pa->pa_tag;
    394 
    395 	/* complete IO region */
    396 	if (pci_mapreg_map(pa, PCI_MAPREG_START, PCI_MAPREG_TYPE_IO, 0,
    397 			   &psc->sc_iot, &psc->sc_ioh, NULL, &psc->sc_iosize)) {
    398 		aprint_error_dev(self, "can't map i/o space\n");
    399 		return EIO;
    400 	}
    401 	psc->sc_mapped_iosize = psc->sc_iosize;
    402 
    403 	/* queue space */
    404 	if (bus_space_subregion(psc->sc_iot, psc->sc_ioh,
    405 			VIRTIO_CONFIG_QUEUE_NOTIFY, 2, &psc->sc_notify_ioh)) {
    406 		aprint_error_dev(self, "can't map notify i/o space\n");
    407 		return EIO;
    408 	}
    409 	psc->sc_notify_iosize = 2;
    410 	psc->sc_notify_iot = psc->sc_iot;
    411 
    412 	/* ISR space */
    413 	if (bus_space_subregion(psc->sc_iot, psc->sc_ioh,
    414 			VIRTIO_CONFIG_ISR_STATUS, 1, &psc->sc_isr_ioh)) {
    415 		aprint_error_dev(self, "can't map isr i/o space\n");
    416 		return EIO;
    417 	}
    418 	psc->sc_isr_iosize = 1;
    419 	psc->sc_isr_iot = psc->sc_iot;
    420 
    421 	/* set our version 0.9 ops */
    422 	sc->sc_ops = &virtio_pci_ops_09;
    423 	sc->sc_devcfg_swap = VIODEVRW_SWAP_09;
    424 	return 0;
    425 }
    426 
    427 
    428 #define NMAPREG		((PCI_MAPREG_END - PCI_MAPREG_START) / \
    429 				sizeof(pcireg_t))
    430 static int
    431 virtio_pci_attach_10(device_t self, void *aux)
    432 {
    433 	struct virtio_pci_softc * const psc = device_private(self);
    434 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
    435 	struct virtio_softc * const sc = &psc->sc_sc;
    436 	pci_chipset_tag_t pc = pa->pa_pc;
    437 	pcitag_t tag = pa->pa_tag;
    438 
    439 	struct virtio_pci_cap common, isr, device;
    440 	struct virtio_pci_notify_cap notify;
    441 	int have_device_cfg = 0;
    442 	bus_size_t bars[NMAPREG] = { 0 };
    443 	int bars_idx[NMAPREG] = { 0 };
    444 	struct virtio_pci_cap *caps[] = { &common, &isr, &device, &notify.cap };
    445 	int i, j = 0, ret = 0;
    446 
    447 	if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_COMMON_CFG,
    448 			&common, sizeof(common)))
    449 		return ENODEV;
    450 	if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_NOTIFY_CFG,
    451 			&notify, sizeof(notify)))
    452 		return ENODEV;
    453 	if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_ISR_CFG,
    454 			&isr, sizeof(isr)))
    455 		return ENODEV;
    456 	if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_DEVICE_CFG,
    457 			&device, sizeof(device)))
    458 		memset(&device, 0, sizeof(device));
    459 	else
    460 		have_device_cfg = 1;
    461 
    462 	/*
    463 	 * XXX Maybe there are devices that offer the pci caps but not the
    464 	 * XXX VERSION_1 feature bit? Then we should check the feature bit
    465 	 * XXX here and fall back to 0.9 out if not present.
    466 	 */
    467 
    468 	/* Figure out which bars we need to map */
    469 	for (i = 0; i < __arraycount(caps); i++) {
    470 		int bar = caps[i]->bar;
    471 		bus_size_t len = caps[i]->offset + caps[i]->length;
    472 		if (caps[i]->length == 0)
    473 			continue;
    474 		if (bars[bar] < len)
    475 			bars[bar] = len;
    476 	}
    477 
    478 	for (i = 0; i < __arraycount(bars); i++) {
    479 		int reg;
    480 		pcireg_t type;
    481 		if (bars[i] == 0)
    482 			continue;
    483 		reg = PCI_MAPREG_START + i * 4;
    484 		type = pci_mapreg_type(pc, tag, reg);
    485 		if (pci_mapreg_map(pa, reg, type, 0,
    486 				&psc->sc_bars_iot[j], &psc->sc_bars_ioh[j],
    487 				NULL, &psc->sc_bars_iosize[j])) {
    488 			aprint_error_dev(self, "can't map bar %u \n", i);
    489 			ret = EIO;
    490 			goto err;
    491 		}
    492 		aprint_debug_dev(self,
    493 		    "bar[%d]: iot %p, size 0x%" PRIxBUSSIZE "\n",
    494 		    j, psc->sc_bars_iot[j], psc->sc_bars_iosize[j]);
    495 		bars_idx[i] = j;
    496 		j++;
    497 	}
    498 
    499 	i = bars_idx[notify.cap.bar];
    500 	if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i],
    501 			notify.cap.offset, notify.cap.length,
    502 			&psc->sc_notify_ioh)) {
    503 		aprint_error_dev(self, "can't map notify i/o space\n");
    504 		ret = EIO;
    505 		goto err;
    506 	}
    507 	psc->sc_notify_iosize = notify.cap.length;
    508 	psc->sc_notify_iot = psc->sc_bars_iot[i];
    509 	psc->sc_notify_off_multiplier = le32toh(notify.notify_off_multiplier);
    510 
    511 	if (have_device_cfg) {
    512 		i = bars_idx[device.bar];
    513 		if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i],
    514 				device.offset, device.length,
    515 				&sc->sc_devcfg_ioh)) {
    516 			aprint_error_dev(self, "can't map devcfg i/o space\n");
    517 			ret = EIO;
    518 			goto err;
    519 		}
    520 		aprint_debug_dev(self,
    521 			"device.offset = 0x%x, device.length = 0x%x\n",
    522 			device.offset, device.length);
    523 		sc->sc_devcfg_iosize = device.length;
    524 		sc->sc_devcfg_iot = psc->sc_bars_iot[i];
    525 	}
    526 
    527 	i = bars_idx[isr.bar];
    528 	if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i],
    529 			isr.offset, isr.length, &psc->sc_isr_ioh)) {
    530 		aprint_error_dev(self, "can't map isr i/o space\n");
    531 		ret = EIO;
    532 		goto err;
    533 	}
    534 	psc->sc_isr_iosize = isr.length;
    535 	psc->sc_isr_iot = psc->sc_bars_iot[i];
    536 
    537 	i = bars_idx[common.bar];
    538 	if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i],
    539 			common.offset, common.length, &psc->sc_ioh)) {
    540 		aprint_error_dev(self, "can't map common i/o space\n");
    541 		ret = EIO;
    542 		goto err;
    543 	}
    544 	psc->sc_iosize = common.length;
    545 	psc->sc_iot = psc->sc_bars_iot[i];
    546 	psc->sc_mapped_iosize = psc->sc_bars_iosize[i];
    547 
    548 	psc->sc_sc.sc_version_1 = 1;
    549 
    550 	/* set our version 1.0 ops */
    551 	sc->sc_ops = &virtio_pci_ops_10;
    552 	sc->sc_devcfg_swap = VIODEVRW_SWAP_10;
    553 	return 0;
    554 
    555 err:
    556 	/* there is no pci_mapreg_unmap() */
    557 	return ret;
    558 }
    559 
    560 /* v1.0 attach helper */
    561 static int
    562 virtio_pci_find_cap(struct virtio_pci_softc *psc, int cfg_type, void *buf, int buflen)
    563 {
    564 	device_t self = psc->sc_sc.sc_dev;
    565 	pci_chipset_tag_t pc = psc->sc_pa.pa_pc;
    566 	pcitag_t tag = psc->sc_pa.pa_tag;
    567 	unsigned int offset, i, len;
    568 	union {
    569 		pcireg_t reg[8];
    570 		struct virtio_pci_cap vcap;
    571 	} *v = buf;
    572 
    573 	if (buflen < sizeof(struct virtio_pci_cap))
    574 		return ERANGE;
    575 
    576 	if (!pci_get_capability(pc, tag, PCI_CAP_VENDSPEC, &offset, &v->reg[0]))
    577 		return ENOENT;
    578 
    579 	do {
    580 		for (i = 0; i < 4; i++)
    581 			v->reg[i] =
    582 				le32toh(pci_conf_read(pc, tag, offset + i * 4));
    583 		if (v->vcap.cfg_type == cfg_type)
    584 			break;
    585 		offset = v->vcap.cap_next;
    586 	} while (offset != 0);
    587 
    588 	if (offset == 0)
    589 		return ENOENT;
    590 
    591 	if (v->vcap.cap_len > sizeof(struct virtio_pci_cap)) {
    592 		len = roundup(v->vcap.cap_len, sizeof(pcireg_t));
    593 		if (len > buflen) {
    594 			aprint_error_dev(self, "%s cap too large\n", __func__);
    595 			return ERANGE;
    596 		}
    597 		for (i = 4; i < len / sizeof(pcireg_t);  i++)
    598 			v->reg[i] =
    599 				le32toh(pci_conf_read(pc, tag, offset + i * 4));
    600 	}
    601 
    602 	/* endian fixup */
    603 	v->vcap.offset = le32toh(v->vcap.offset);
    604 	v->vcap.length = le32toh(v->vcap.length);
    605 	return 0;
    606 }
    607 
    608 
    609 /* -------------------------------------
    610  * Version 0.9 support
    611  * -------------------------------------*/
    612 
    613 static void
    614 virtio_pci_kick_09(struct virtio_softc *sc, uint16_t idx)
    615 {
    616 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
    617 
    618 	bus_space_write_2(psc->sc_notify_iot, psc->sc_notify_ioh, 0, idx);
    619 }
    620 
    621 /* only applicable for v 0.9 but also called for 1.0 */
    622 static int
    623 virtio_pci_adjust_config_region(struct virtio_pci_softc *psc)
    624 {
    625 	struct virtio_softc * const sc = (struct virtio_softc *) psc;
    626 	device_t self = psc->sc_sc.sc_dev;
    627 
    628 	if (psc->sc_sc.sc_version_1)
    629 		return 0;
    630 
    631 	sc->sc_devcfg_iosize = psc->sc_iosize - psc->sc_devcfg_offset;
    632 	sc->sc_devcfg_iot = psc->sc_iot;
    633 	if (bus_space_subregion(psc->sc_iot, psc->sc_ioh,
    634 			psc->sc_devcfg_offset, sc->sc_devcfg_iosize,
    635 			&sc->sc_devcfg_ioh)) {
    636 		aprint_error_dev(self, "can't map config i/o space\n");
    637 		return EIO;
    638 	}
    639 
    640 	return 0;
    641 }
    642 
    643 static uint16_t
    644 virtio_pci_read_queue_size_09(struct virtio_softc *sc, uint16_t idx)
    645 {
    646 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
    647 
    648 	bus_space_write_2(psc->sc_iot, psc->sc_ioh,
    649 	    VIRTIO_CONFIG_QUEUE_SELECT, idx);
    650 	return bus_space_read_2(psc->sc_iot, psc->sc_ioh,
    651 	    VIRTIO_CONFIG_QUEUE_SIZE);
    652 }
    653 
    654 static void
    655 virtio_pci_setup_queue_09(struct virtio_softc *sc, uint16_t idx, uint64_t addr)
    656 {
    657 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
    658 
    659 	bus_space_write_2(psc->sc_iot, psc->sc_ioh,
    660 	    VIRTIO_CONFIG_QUEUE_SELECT, idx);
    661 	bus_space_write_4(psc->sc_iot, psc->sc_ioh,
    662 	    VIRTIO_CONFIG_QUEUE_ADDRESS, addr / VIRTIO_PAGE_SIZE);
    663 
    664 	if (psc->sc_ihs_num > 1) {
    665 		int vec = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
    666 		if (sc->sc_child_mq)
    667 			vec += idx;
    668 		bus_space_write_2(psc->sc_iot, psc->sc_ioh,
    669 		    VIRTIO_CONFIG_MSI_QUEUE_VECTOR, vec);
    670 	}
    671 }
    672 
    673 static void
    674 virtio_pci_set_status_09(struct virtio_softc *sc, int status)
    675 {
    676 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
    677 	int old = 0;
    678 
    679 	if (status != 0) {
    680 	    old = bus_space_read_1(psc->sc_iot, psc->sc_ioh,
    681 		VIRTIO_CONFIG_DEVICE_STATUS);
    682 	}
    683 	bus_space_write_1(psc->sc_iot, psc->sc_ioh,
    684 	    VIRTIO_CONFIG_DEVICE_STATUS, status|old);
    685 }
    686 
    687 static void
    688 virtio_pci_negotiate_features_09(struct virtio_softc *sc, uint64_t guest_features)
    689 {
    690 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
    691 	uint32_t r;
    692 
    693 	r = bus_space_read_4(psc->sc_iot, psc->sc_ioh,
    694 	    VIRTIO_CONFIG_DEVICE_FEATURES);
    695 
    696 	r &= guest_features;
    697 
    698 	bus_space_write_4(psc->sc_iot, psc->sc_ioh,
    699 	    VIRTIO_CONFIG_GUEST_FEATURES, r);
    700 
    701 	sc->sc_active_features = r;
    702 }
    703 
    704 /* -------------------------------------
    705  * Version 1.0 support
    706  * -------------------------------------*/
    707 
    708 static void
    709 virtio_pci_kick_10(struct virtio_softc *sc, uint16_t idx)
    710 {
    711 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
    712 	unsigned offset = sc->sc_vqs[idx].vq_notify_off *
    713 		psc->sc_notify_off_multiplier;
    714 
    715 	bus_space_write_2(psc->sc_notify_iot, psc->sc_notify_ioh, offset, idx);
    716 }
    717 
    718 
    719 static uint16_t
    720 virtio_pci_read_queue_size_10(struct virtio_softc *sc, uint16_t idx)
    721 {
    722 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
    723 	bus_space_tag_t	   iot = psc->sc_iot;
    724 	bus_space_handle_t ioh = psc->sc_ioh;
    725 
    726 	bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SELECT, idx);
    727 	return bus_space_read_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SIZE);
    728 }
    729 
    730 static void
    731 virtio_pci_setup_queue_10(struct virtio_softc *sc, uint16_t idx, uint64_t addr)
    732 {
    733 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
    734 	struct virtqueue *vq = &sc->sc_vqs[idx];
    735 	bus_space_tag_t	   iot = psc->sc_iot;
    736 	bus_space_handle_t ioh = psc->sc_ioh;
    737 	KASSERT(vq->vq_index == idx);
    738 
    739 	bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SELECT, vq->vq_index);
    740 	if (addr == 0) {
    741 		bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_ENABLE, 0);
    742 		bus_space_write_8(iot, ioh, VIRTIO_CONFIG1_QUEUE_DESC,   0);
    743 		bus_space_write_8(iot, ioh, VIRTIO_CONFIG1_QUEUE_AVAIL,  0);
    744 		bus_space_write_8(iot, ioh, VIRTIO_CONFIG1_QUEUE_USED,   0);
    745 	} else {
    746 		bus_space_write_8(iot, ioh,
    747 			VIRTIO_CONFIG1_QUEUE_DESC, addr);
    748 		bus_space_write_8(iot, ioh,
    749 			VIRTIO_CONFIG1_QUEUE_AVAIL, addr + vq->vq_availoffset);
    750 		bus_space_write_8(iot, ioh,
    751 			VIRTIO_CONFIG1_QUEUE_USED, addr + vq->vq_usedoffset);
    752 		bus_space_write_2(iot, ioh,
    753 			VIRTIO_CONFIG1_QUEUE_ENABLE, 1);
    754 		vq->vq_notify_off = bus_space_read_2(iot, ioh,
    755 			VIRTIO_CONFIG1_QUEUE_NOTIFY_OFF);
    756 	}
    757 
    758 	if (psc->sc_ihs_num > 1) {
    759 		int vec = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
    760 		if (sc->sc_child_mq)
    761 			vec += idx;
    762 		bus_space_write_2(iot, ioh,
    763 			VIRTIO_CONFIG1_QUEUE_MSIX_VECTOR, vec);
    764 	}
    765 }
    766 
    767 static void
    768 virtio_pci_set_status_10(struct virtio_softc *sc, int status)
    769 {
    770 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
    771 	bus_space_tag_t	   iot = psc->sc_iot;
    772 	bus_space_handle_t ioh = psc->sc_ioh;
    773 	int old = 0;
    774 
    775 	if (status)
    776 		old = bus_space_read_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS);
    777 	bus_space_write_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS, status | old);
    778 }
    779 
    780 void
    781 virtio_pci_negotiate_features_10(struct virtio_softc *sc, uint64_t guest_features)
    782 {
    783 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
    784 	device_t self          =  sc->sc_dev;
    785 	bus_space_tag_t	   iot = psc->sc_iot;
    786 	bus_space_handle_t ioh = psc->sc_ioh;
    787 	uint64_t host, negotiated, device_status;
    788 
    789 	guest_features |= VIRTIO_F_VERSION_1;
    790 	/* notify on empty is 0.9 only */
    791 	guest_features &= ~VIRTIO_F_NOTIFY_ON_EMPTY;
    792 	sc->sc_active_features = 0;
    793 
    794 	bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE_SELECT, 0);
    795 	host = bus_space_read_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE);
    796 	bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE_SELECT, 1);
    797 	host |= (uint64_t)
    798 		bus_space_read_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE) << 32;
    799 
    800 	negotiated = host & guest_features;
    801 
    802 	bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE_SELECT, 0);
    803 	bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE,
    804 			negotiated & 0xffffffff);
    805 	bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE_SELECT, 1);
    806 	bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE,
    807 			negotiated >> 32);
    808 	virtio_pci_set_status_10(sc, VIRTIO_CONFIG_DEVICE_STATUS_FEATURES_OK);
    809 
    810 	device_status = bus_space_read_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS);
    811 	if ((device_status & VIRTIO_CONFIG_DEVICE_STATUS_FEATURES_OK) == 0) {
    812 		aprint_error_dev(self, "feature negotiation failed\n");
    813 		bus_space_write_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS,
    814 				VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
    815 		return;
    816 	}
    817 
    818 	if ((negotiated & VIRTIO_F_VERSION_1) == 0) {
    819 		aprint_error_dev(self, "host rejected version 1\n");
    820 		bus_space_write_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS,
    821 				VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
    822 		return;
    823 	}
    824 
    825 	sc->sc_active_features = negotiated;
    826 	return;
    827 }
    828 
    829 /* -------------------------------------
    830  * Read/write device config code
    831  * -------------------------------------*/
    832 
    833 static uint8_t
    834 virtio_pci_read_device_config_1(struct virtio_softc *vsc, int index)
    835 {
    836 	bus_space_tag_t	   iot = vsc->sc_devcfg_iot;
    837 	bus_space_handle_t ioh = vsc->sc_devcfg_ioh;
    838 
    839 	return bus_space_read_1(iot, ioh, index);
    840 }
    841 
    842 static uint16_t
    843 virtio_pci_read_device_config_2(struct virtio_softc *vsc, int index)
    844 {
    845 	bus_space_tag_t	   iot = vsc->sc_devcfg_iot;
    846 	bus_space_handle_t ioh = vsc->sc_devcfg_ioh;
    847 	uint16_t val;
    848 
    849 #if defined(__aarch64__) && BYTE_ORDER == BIG_ENDIAN
    850 	val = bus_space_read_2(iot, ioh, index);
    851 	return val;
    852 #else
    853 	val = bus_space_read_stream_2(iot, ioh, index);
    854 	if (vsc->sc_devcfg_swap)
    855 		return bswap16(val);
    856 	return val;
    857 #endif
    858 }
    859 
    860 static uint32_t
    861 virtio_pci_read_device_config_4(struct virtio_softc *vsc, int index)
    862 {
    863 	bus_space_tag_t	   iot = vsc->sc_devcfg_iot;
    864 	bus_space_handle_t ioh = vsc->sc_devcfg_ioh;
    865 	uint32_t val;
    866 
    867 #if defined(__aarch64__) && BYTE_ORDER == BIG_ENDIAN
    868 	val = bus_space_read_4(iot, ioh, index);
    869 	return val;
    870 #else
    871 	val = bus_space_read_stream_4(iot, ioh, index);
    872 	if (vsc->sc_devcfg_swap)
    873 		return bswap32(val);
    874 	return val;
    875 #endif
    876 }
    877 
    878 static uint64_t
    879 virtio_pci_read_device_config_8(struct virtio_softc *vsc, int index)
    880 {
    881 	bus_space_tag_t	   iot = vsc->sc_devcfg_iot;
    882 	bus_space_handle_t ioh = vsc->sc_devcfg_ioh;
    883 	uint64_t val, val_h, val_l;
    884 
    885 #if defined(__aarch64__) && BYTE_ORDER == BIG_ENDIAN
    886 	if (vsc->sc_devcfg_swap) {
    887 		val_l = bus_space_read_4(iot, ioh, index);
    888 		val_h = bus_space_read_4(iot, ioh, index + 4);
    889 	} else {
    890 		val_h = bus_space_read_4(iot, ioh, index);
    891 		val_l = bus_space_read_4(iot, ioh, index + 4);
    892 	}
    893 	val = val_h << 32;
    894 	val |= val_l;
    895 	return val;
    896 #elif BYTE_ORDER == BIG_ENDIAN
    897 	val_h = bus_space_read_stream_4(iot, ioh, index);
    898 	val_l = bus_space_read_stream_4(iot, ioh, index + 4);
    899 	val = val_h << 32;
    900 	val |= val_l;
    901 	if (vsc->sc_devcfg_swap)
    902 		return bswap64(val);
    903 	return val;
    904 #else
    905 	val_l = bus_space_read_4(iot, ioh, index);
    906 	val_h = bus_space_read_4(iot, ioh, index + 4);
    907 	val = val_h << 32;
    908 	val |= val_l;
    909 
    910 	return val;
    911 #endif
    912 }
    913 
    914 static void
    915 virtio_pci_write_device_config_1(struct virtio_softc *vsc,
    916 			     int index, uint8_t value)
    917 {
    918 	bus_space_tag_t	   iot = vsc->sc_devcfg_iot;
    919 	bus_space_handle_t ioh = vsc->sc_devcfg_ioh;
    920 
    921 	bus_space_write_1(iot, ioh, index, value);
    922 }
    923 
    924 static void
    925 virtio_pci_write_device_config_2(struct virtio_softc *vsc,
    926 			     int index, uint16_t value)
    927 {
    928 	bus_space_tag_t	   iot = vsc->sc_devcfg_iot;
    929 	bus_space_handle_t ioh = vsc->sc_devcfg_ioh;
    930 
    931 #if defined(__aarch64__) && BYTE_ORDER == BIG_ENDIAN
    932 	bus_space_write_2(iot, ioh, index, value);
    933 #else
    934 	if (vsc->sc_devcfg_swap)
    935 		value = bswap16(value);
    936 	bus_space_write_stream_2(iot, ioh, index, value);
    937 #endif
    938 }
    939 
    940 static void
    941 virtio_pci_write_device_config_4(struct virtio_softc *vsc,
    942 			     int index, uint32_t value)
    943 {
    944 	bus_space_tag_t	   iot = vsc->sc_devcfg_iot;
    945 	bus_space_handle_t ioh = vsc->sc_devcfg_ioh;
    946 
    947 #if defined(__aarch64__) && BYTE_ORDER == BIG_ENDIAN
    948 	bus_space_write_4(iot, ioh, index, value);
    949 #else
    950 	if (vsc->sc_devcfg_swap)
    951 		value = bswap32(value);
    952 	bus_space_write_stream_4(iot, ioh, index, value);
    953 #endif
    954 }
    955 
    956 static void
    957 virtio_pci_write_device_config_8(struct virtio_softc *vsc,
    958 			     int index, uint64_t value)
    959 {
    960 	bus_space_tag_t	   iot = vsc->sc_devcfg_iot;
    961 	bus_space_handle_t ioh = vsc->sc_devcfg_ioh;
    962 	uint64_t val_h, val_l;
    963 
    964 #if defined(__aarch64__) && BYTE_ORDER == BIG_ENDIAN
    965 	val_l = value & 0xffffffff;
    966 	val_h = value >> 32;
    967 	if (vsc->sc_devcfg_swap) {
    968 		bus_space_write_4(iot, ioh, index, val_l);
    969 		bus_space_write_4(iot, ioh, index + 4, val_h);
    970 	} else {
    971 		bus_space_write_4(iot, ioh, index, val_h);
    972 		bus_space_write_4(iot, ioh, index + 4, val_l);
    973 	}
    974 #elif BYTE_ORDER == BIG_ENDIAN
    975 	if (vsc->sc_devcfg_swap)
    976 		value = bswap64(value);
    977 	val_l = value & 0xffffffff;
    978 	val_h = value >> 32;
    979 
    980 	bus_space_write_stream_4(iot, ioh, index, val_h);
    981 	bus_space_write_stream_4(iot, ioh, index + 4, val_l);
    982 #else
    983 	val_l = value & 0xffffffff;
    984 	val_h = value >> 32;
    985 	bus_space_write_stream_4(iot, ioh, index, val_l);
    986 	bus_space_write_stream_4(iot, ioh, index + 4, val_h);
    987 #endif
    988 }
    989 
    990 /* -------------------------------------
    991  * Generic PCI interrupt code
    992  * -------------------------------------*/
    993 
    994 static int
    995 virtio_pci_setup_msix_vectors_10(struct virtio_softc *sc)
    996 {
    997 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
    998 	device_t self          =  sc->sc_dev;
    999 	bus_space_tag_t	   iot = psc->sc_iot;
   1000 	bus_space_handle_t ioh = psc->sc_ioh;
   1001 	int vector, ret, qid;
   1002 
   1003 	vector = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
   1004 	bus_space_write_2(iot, ioh,
   1005 		VIRTIO_CONFIG1_CONFIG_MSIX_VECTOR, vector);
   1006 	ret = bus_space_read_2(iot, ioh, VIRTIO_CONFIG1_CONFIG_MSIX_VECTOR);
   1007 	if (ret != vector) {
   1008 		aprint_error_dev(self, "can't set config msix vector\n");
   1009 		return -1;
   1010 	}
   1011 
   1012 	for (qid = 0; qid < sc->sc_nvqs; qid++) {
   1013 		vector = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
   1014 
   1015 		if (sc->sc_child_mq)
   1016 			vector += qid;
   1017 		bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SELECT, qid);
   1018 		bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_MSIX_VECTOR,
   1019 			vector);
   1020 		ret = bus_space_read_2(iot, ioh,
   1021 			VIRTIO_CONFIG1_QUEUE_MSIX_VECTOR);
   1022 		if (ret != vector) {
   1023 			aprint_error_dev(self, "can't set queue %d "
   1024 				"msix vector\n", qid);
   1025 			return -1;
   1026 		}
   1027 	}
   1028 
   1029 	return 0;
   1030 }
   1031 
   1032 static int
   1033 virtio_pci_setup_msix_vectors_09(struct virtio_softc *sc)
   1034 {
   1035 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
   1036 	device_t self = sc->sc_dev;
   1037 	int offset, vector, ret, qid;
   1038 
   1039 	offset = VIRTIO_CONFIG_MSI_CONFIG_VECTOR;
   1040 	vector = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
   1041 
   1042 	bus_space_write_2(psc->sc_iot, psc->sc_ioh, offset, vector);
   1043 	ret = bus_space_read_2(psc->sc_iot, psc->sc_ioh, offset);
   1044 	aprint_debug_dev(sc->sc_dev, "expected=%d, actual=%d\n",
   1045 	    vector, ret);
   1046 	if (ret != vector) {
   1047 		aprint_error_dev(self, "can't set config msix vector\n");
   1048 		return -1;
   1049 	}
   1050 
   1051 	for (qid = 0; qid < sc->sc_nvqs; qid++) {
   1052 		offset = VIRTIO_CONFIG_QUEUE_SELECT;
   1053 		bus_space_write_2(psc->sc_iot, psc->sc_ioh, offset, qid);
   1054 
   1055 		offset = VIRTIO_CONFIG_MSI_QUEUE_VECTOR;
   1056 		vector = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
   1057 
   1058 		if (sc->sc_child_mq)
   1059 			vector += qid;
   1060 
   1061 		bus_space_write_2(psc->sc_iot, psc->sc_ioh, offset, vector);
   1062 		ret = bus_space_read_2(psc->sc_iot, psc->sc_ioh, offset);
   1063 		aprint_debug_dev(sc->sc_dev, "expected=%d, actual=%d\n",
   1064 		    vector, ret);
   1065 		if (ret != vector) {
   1066 			aprint_error_dev(self, "can't set queue %d "
   1067 				"msix vector\n", qid);
   1068 			return -1;
   1069 		}
   1070 	}
   1071 
   1072 	return 0;
   1073 }
   1074 
   1075 static int
   1076 virtio_pci_setup_msix_interrupts(struct virtio_softc *sc,
   1077     struct pci_attach_args *pa)
   1078 {
   1079 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
   1080 	device_t self = sc->sc_dev;
   1081 	pci_chipset_tag_t pc = pa->pa_pc;
   1082 	struct virtqueue *vq;
   1083 	char intrbuf[PCI_INTRSTR_LEN];
   1084 	char intr_xname[INTRDEVNAMEBUF];
   1085 	char const *intrstr;
   1086 	int idx, qid, n;
   1087 	int ret;
   1088 
   1089 	idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
   1090 	if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE)
   1091 		pci_intr_setattr(pc, &psc->sc_ihp[idx], PCI_INTR_MPSAFE, true);
   1092 
   1093 	snprintf(intr_xname, sizeof(intr_xname), "%s config",
   1094 	    device_xname(sc->sc_dev));
   1095 
   1096 	psc->sc_ihs[idx] = pci_intr_establish_xname(pc, psc->sc_ihp[idx],
   1097 	    sc->sc_ipl, virtio_pci_msix_config_intr, sc, intr_xname);
   1098 	if (psc->sc_ihs[idx] == NULL) {
   1099 		aprint_error_dev(self, "couldn't establish MSI-X for config\n");
   1100 		goto error;
   1101 	}
   1102 
   1103 	idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
   1104 	if (sc->sc_child_mq) {
   1105 		for (qid = 0; qid < sc->sc_nvqs; qid++) {
   1106 			n = idx + qid;
   1107 			vq = &sc->sc_vqs[qid];
   1108 
   1109 			snprintf(intr_xname, sizeof(intr_xname), "%s vq#%d",
   1110 			    device_xname(sc->sc_dev), qid);
   1111 
   1112 			if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE) {
   1113 				pci_intr_setattr(pc, &psc->sc_ihp[n],
   1114 				    PCI_INTR_MPSAFE, true);
   1115 			}
   1116 
   1117 			psc->sc_ihs[n] = pci_intr_establish_xname(pc, psc->sc_ihp[n],
   1118 			    sc->sc_ipl, vq->vq_intrhand, vq->vq_intrhand_arg, intr_xname);
   1119 			if (psc->sc_ihs[n] == NULL) {
   1120 				aprint_error_dev(self, "couldn't establish MSI-X for a vq\n");
   1121 				goto error;
   1122 			}
   1123 		}
   1124 	} else {
   1125 		if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE)
   1126 			pci_intr_setattr(pc, &psc->sc_ihp[idx], PCI_INTR_MPSAFE, true);
   1127 
   1128 		snprintf(intr_xname, sizeof(intr_xname), "%s queues",
   1129 		    device_xname(sc->sc_dev));
   1130 		psc->sc_ihs[idx] = pci_intr_establish_xname(pc, psc->sc_ihp[idx],
   1131 		    sc->sc_ipl, virtio_pci_msix_queue_intr, sc, intr_xname);
   1132 		if (psc->sc_ihs[idx] == NULL) {
   1133 			aprint_error_dev(self, "couldn't establish MSI-X for queues\n");
   1134 			goto error;
   1135 		}
   1136 	}
   1137 
   1138 	if (sc->sc_version_1) {
   1139 		ret = virtio_pci_setup_msix_vectors_10(sc);
   1140 	} else {
   1141 		ret = virtio_pci_setup_msix_vectors_09(sc);
   1142 	}
   1143 	if (ret) {
   1144 		aprint_error_dev(self, "couldn't setup MSI-X vectors\n");
   1145 		goto error;
   1146 	}
   1147 
   1148 	idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
   1149 	intrstr = pci_intr_string(pc, psc->sc_ihp[idx], intrbuf, sizeof(intrbuf));
   1150 	aprint_normal_dev(self, "config interrupting at %s\n", intrstr);
   1151 	idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
   1152 	if (sc->sc_child_mq) {
   1153 		kcpuset_t *affinity;
   1154 		int affinity_to, r;
   1155 
   1156 		kcpuset_create(&affinity, false);
   1157 
   1158 		for (qid = 0; qid < sc->sc_nvqs; qid++) {
   1159 			n = idx + qid;
   1160 			affinity_to = (qid / 2) % ncpu;
   1161 
   1162 			intrstr = pci_intr_string(pc, psc->sc_ihp[n],
   1163 			    intrbuf, sizeof(intrbuf));
   1164 
   1165 			kcpuset_zero(affinity);
   1166 			kcpuset_set(affinity, affinity_to);
   1167 			r = interrupt_distribute(psc->sc_ihs[n], affinity, NULL);
   1168 			if (r == 0) {
   1169 				aprint_normal_dev(self,
   1170 				    "for vq #%d interrupting at %s affinity to %u\n",
   1171 				    qid, intrstr, affinity_to);
   1172 			} else {
   1173 				aprint_normal_dev(self,
   1174 				    "for vq #%d interrupting at %s\n",
   1175 				    qid, intrstr);
   1176 			}
   1177 		}
   1178 
   1179 		kcpuset_destroy(affinity);
   1180 	} else {
   1181 		intrstr = pci_intr_string(pc, psc->sc_ihp[idx], intrbuf, sizeof(intrbuf));
   1182 		aprint_normal_dev(self, "queues interrupting at %s\n", intrstr);
   1183 	}
   1184 
   1185 	return 0;
   1186 
   1187 error:
   1188 	idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
   1189 	if (psc->sc_ihs[idx] != NULL)
   1190 		pci_intr_disestablish(psc->sc_pa.pa_pc, psc->sc_ihs[idx]);
   1191 	idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
   1192 	if (sc->sc_child_mq) {
   1193 		for (qid = 0; qid < sc->sc_nvqs; qid++) {
   1194 			n = idx + qid;
   1195 			if (psc->sc_ihs[n] == NULL)
   1196 				continue;
   1197 			pci_intr_disestablish(psc->sc_pa.pa_pc, psc->sc_ihs[n]);
   1198 		}
   1199 
   1200 	} else {
   1201 		if (psc->sc_ihs[idx] != NULL)
   1202 			pci_intr_disestablish(psc->sc_pa.pa_pc, psc->sc_ihs[idx]);
   1203 	}
   1204 
   1205 	return -1;
   1206 }
   1207 
   1208 static int
   1209 virtio_pci_setup_intx_interrupt(struct virtio_softc *sc,
   1210     struct pci_attach_args *pa)
   1211 {
   1212 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
   1213 	device_t self = sc->sc_dev;
   1214 	pci_chipset_tag_t pc = pa->pa_pc;
   1215 	char intrbuf[PCI_INTRSTR_LEN];
   1216 	char const *intrstr;
   1217 
   1218 	if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE)
   1219 		pci_intr_setattr(pc, &psc->sc_ihp[0], PCI_INTR_MPSAFE, true);
   1220 
   1221 	psc->sc_ihs[0] = pci_intr_establish_xname(pc, psc->sc_ihp[0],
   1222 	    sc->sc_ipl, virtio_pci_intr, sc, device_xname(sc->sc_dev));
   1223 	if (psc->sc_ihs[0] == NULL) {
   1224 		aprint_error_dev(self, "couldn't establish INTx\n");
   1225 		return -1;
   1226 	}
   1227 
   1228 	intrstr = pci_intr_string(pc, psc->sc_ihp[0], intrbuf, sizeof(intrbuf));
   1229 	aprint_normal_dev(self, "interrupting at %s\n", intrstr);
   1230 
   1231 	return 0;
   1232 }
   1233 
   1234 static int
   1235 virtio_pci_setup_interrupts(struct virtio_softc *sc)
   1236 {
   1237 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
   1238 	device_t self = sc->sc_dev;
   1239 	pci_chipset_tag_t pc = psc->sc_pa.pa_pc;
   1240 	pcitag_t tag = psc->sc_pa.pa_tag;
   1241 	int error;
   1242 	int nmsix;
   1243 	int off;
   1244 	int counts[PCI_INTR_TYPE_SIZE];
   1245 	pci_intr_type_t max_type;
   1246 	pcireg_t ctl;
   1247 
   1248 	nmsix = pci_msix_count(psc->sc_pa.pa_pc, psc->sc_pa.pa_tag);
   1249 	aprint_debug_dev(self, "pci_msix_count=%d\n", nmsix);
   1250 
   1251 	/* We need at least two: one for config and the other for queues */
   1252 	if ((sc->sc_flags & VIRTIO_F_INTR_MSIX) == 0 || nmsix < 2) {
   1253 		/* Try INTx only */
   1254 		max_type = PCI_INTR_TYPE_INTX;
   1255 		counts[PCI_INTR_TYPE_INTX] = 1;
   1256 	} else {
   1257 		/* Try MSI-X first and INTx second */
   1258 		if (sc->sc_nvqs + VIRTIO_MSIX_QUEUE_VECTOR_INDEX <= nmsix) {
   1259 			nmsix = sc->sc_nvqs + VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
   1260 		} else {
   1261 			sc->sc_child_mq = false;
   1262 		}
   1263 
   1264 		if (sc->sc_child_mq == false) {
   1265 			nmsix = 2;
   1266 		}
   1267 
   1268 		max_type = PCI_INTR_TYPE_MSIX;
   1269 		counts[PCI_INTR_TYPE_MSIX] = nmsix;
   1270 		counts[PCI_INTR_TYPE_MSI] = 0;
   1271 		counts[PCI_INTR_TYPE_INTX] = 1;
   1272 	}
   1273 
   1274 retry:
   1275 	error = pci_intr_alloc(&psc->sc_pa, &psc->sc_ihp, counts, max_type);
   1276 	if (error != 0) {
   1277 		aprint_error_dev(self, "couldn't map interrupt\n");
   1278 		return -1;
   1279 	}
   1280 
   1281 	if (pci_intr_type(pc, psc->sc_ihp[0]) == PCI_INTR_TYPE_MSIX) {
   1282 		psc->sc_ihs = kmem_zalloc(sizeof(*psc->sc_ihs) * nmsix,
   1283 		    KM_SLEEP);
   1284 
   1285 		error = virtio_pci_setup_msix_interrupts(sc, &psc->sc_pa);
   1286 		if (error != 0) {
   1287 			kmem_free(psc->sc_ihs, sizeof(*psc->sc_ihs) * nmsix);
   1288 			pci_intr_release(pc, psc->sc_ihp, nmsix);
   1289 
   1290 			/* Retry INTx */
   1291 			max_type = PCI_INTR_TYPE_INTX;
   1292 			counts[PCI_INTR_TYPE_INTX] = 1;
   1293 			goto retry;
   1294 		}
   1295 
   1296 		psc->sc_ihs_num = nmsix;
   1297 		psc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_MSI;
   1298 		virtio_pci_adjust_config_region(psc);
   1299 	} else if (pci_intr_type(pc, psc->sc_ihp[0]) == PCI_INTR_TYPE_INTX) {
   1300 		psc->sc_ihs = kmem_zalloc(sizeof(*psc->sc_ihs) * 1,
   1301 		    KM_SLEEP);
   1302 
   1303 		error = virtio_pci_setup_intx_interrupt(sc, &psc->sc_pa);
   1304 		if (error != 0) {
   1305 			kmem_free(psc->sc_ihs, sizeof(*psc->sc_ihs) * 1);
   1306 			pci_intr_release(pc, psc->sc_ihp, 1);
   1307 			return -1;
   1308 		}
   1309 
   1310 		psc->sc_ihs_num = 1;
   1311 		psc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
   1312 		virtio_pci_adjust_config_region(psc);
   1313 
   1314 		error = pci_get_capability(pc, tag, PCI_CAP_MSIX, &off, NULL);
   1315 		if (error != 0) {
   1316 			ctl = pci_conf_read(pc, tag, off + PCI_MSIX_CTL);
   1317 			ctl &= ~PCI_MSIX_CTL_ENABLE;
   1318 			pci_conf_write(pc, tag, off + PCI_MSIX_CTL, ctl);
   1319 		}
   1320 	}
   1321 
   1322 	return 0;
   1323 }
   1324 
   1325 static void
   1326 virtio_pci_free_interrupts(struct virtio_softc *sc)
   1327 {
   1328 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
   1329 
   1330 	for (int i = 0; i < psc->sc_ihs_num; i++) {
   1331 		if (psc->sc_ihs[i] == NULL)
   1332 			continue;
   1333 		pci_intr_disestablish(psc->sc_pa.pa_pc, psc->sc_ihs[i]);
   1334 		psc->sc_ihs[i] = NULL;
   1335 	}
   1336 
   1337 	if (psc->sc_ihs_num > 0)
   1338 		pci_intr_release(psc->sc_pa.pa_pc, psc->sc_ihp, psc->sc_ihs_num);
   1339 
   1340 	if (psc->sc_ihs != NULL) {
   1341 		kmem_free(psc->sc_ihs, sizeof(*psc->sc_ihs) * psc->sc_ihs_num);
   1342 		psc->sc_ihs = NULL;
   1343 	}
   1344 	psc->sc_ihs_num = 0;
   1345 }
   1346 
   1347 /*
   1348  * Interrupt handler.
   1349  */
   1350 static int
   1351 virtio_pci_intr(void *arg)
   1352 {
   1353 	struct virtio_softc *sc = arg;
   1354 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
   1355 	int isr, r = 0;
   1356 
   1357 	/* check and ack the interrupt */
   1358 	isr = bus_space_read_1(psc->sc_isr_iot, psc->sc_isr_ioh, 0);
   1359 	if (isr == 0)
   1360 		return 0;
   1361 	if ((isr & VIRTIO_CONFIG_ISR_CONFIG_CHANGE) &&
   1362 	    (sc->sc_config_change != NULL))
   1363 		r = (sc->sc_config_change)(sc);
   1364 	if (sc->sc_intrhand != NULL) {
   1365 		if (sc->sc_soft_ih != NULL)
   1366 			softint_schedule(sc->sc_soft_ih);
   1367 		else
   1368 			r |= (sc->sc_intrhand)(sc);
   1369 	}
   1370 
   1371 	return r;
   1372 }
   1373 
   1374 static int
   1375 virtio_pci_msix_queue_intr(void *arg)
   1376 {
   1377 	struct virtio_softc *sc = arg;
   1378 	int r = 0;
   1379 
   1380 	if (sc->sc_intrhand != NULL) {
   1381 		if (sc->sc_soft_ih != NULL)
   1382 			softint_schedule(sc->sc_soft_ih);
   1383 		else
   1384 			r |= (sc->sc_intrhand)(sc);
   1385 	}
   1386 
   1387 	return r;
   1388 }
   1389 
   1390 static int
   1391 virtio_pci_msix_config_intr(void *arg)
   1392 {
   1393 	struct virtio_softc *sc = arg;
   1394 	int r = 0;
   1395 
   1396 	if (sc->sc_config_change != NULL)
   1397 		r = (sc->sc_config_change)(sc);
   1398 	return r;
   1399 }
   1400 
   1401 MODULE(MODULE_CLASS_DRIVER, virtio_pci, "pci,virtio");
   1402 
   1403 #ifdef _MODULE
   1404 #include "ioconf.c"
   1405 #endif
   1406 
   1407 static int
   1408 virtio_pci_modcmd(modcmd_t cmd, void *opaque)
   1409 {
   1410 	int error = 0;
   1411 
   1412 #ifdef _MODULE
   1413 	switch (cmd) {
   1414 	case MODULE_CMD_INIT:
   1415 		error = config_init_component(cfdriver_ioconf_virtio_pci,
   1416 		    cfattach_ioconf_virtio_pci, cfdata_ioconf_virtio_pci);
   1417 		break;
   1418 	case MODULE_CMD_FINI:
   1419 		error = config_fini_component(cfdriver_ioconf_virtio_pci,
   1420 		    cfattach_ioconf_virtio_pci, cfdata_ioconf_virtio_pci);
   1421 		break;
   1422 	default:
   1423 		error = ENOTTY;
   1424 		break;
   1425 	}
   1426 #endif
   1427 
   1428 	return error;
   1429 }
   1430