1 1.55 christos /* $NetBSD: virtio_pci.c,v 1.55 2024/09/25 17:12:47 christos Exp $ */ 2 1.1 cherry 3 1.1 cherry /* 4 1.15 reinoud * Copyright (c) 2020 The NetBSD Foundation, Inc. 5 1.15 reinoud * Copyright (c) 2012 Stefan Fritsch. 6 1.1 cherry * Copyright (c) 2010 Minoura Makoto. 7 1.1 cherry * All rights reserved. 8 1.1 cherry * 9 1.1 cherry * Redistribution and use in source and binary forms, with or without 10 1.1 cherry * modification, are permitted provided that the following conditions 11 1.1 cherry * are met: 12 1.1 cherry * 1. Redistributions of source code must retain the above copyright 13 1.1 cherry * notice, this list of conditions and the following disclaimer. 14 1.1 cherry * 2. Redistributions in binary form must reproduce the above copyright 15 1.1 cherry * notice, this list of conditions and the following disclaimer in the 16 1.1 cherry * documentation and/or other materials provided with the distribution. 17 1.1 cherry * 18 1.1 cherry * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 1.1 cherry * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 1.1 cherry * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 1.1 cherry * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 1.1 cherry * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 1.1 cherry * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 1.1 cherry * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 1.1 cherry * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 1.1 cherry * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 1.1 cherry * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 1.1 cherry */ 29 1.1 cherry 30 1.1 cherry #include <sys/cdefs.h> 31 1.55 christos __KERNEL_RCSID(0, "$NetBSD: virtio_pci.c,v 1.55 2024/09/25 17:12:47 christos Exp $"); 32 1.1 cherry 33 1.1 cherry #include <sys/param.h> 34 1.50 riastrad #include <sys/types.h> 35 1.50 riastrad 36 1.50 riastrad #include <sys/device.h> 37 1.50 riastrad #include <sys/endian.h> 38 1.50 riastrad #include <sys/interrupt.h> 39 1.4 jakllsch #include <sys/kmem.h> 40 1.5 jakllsch #include <sys/module.h> 41 1.33 yamaguch #include <sys/syslog.h> 42 1.50 riastrad #include <sys/systm.h> 43 1.1 cherry 44 1.1 cherry #include <dev/pci/pcidevs.h> 45 1.1 cherry #include <dev/pci/pcireg.h> 46 1.1 cherry #include <dev/pci/pcivar.h> 47 1.1 cherry 48 1.15 reinoud #include <dev/pci/virtioreg.h> /* XXX: move to non-pci */ 49 1.15 reinoud #include <dev/pci/virtio_pcireg.h> 50 1.15 reinoud 51 1.1 cherry #define VIRTIO_PRIVATE 52 1.15 reinoud #include <dev/pci/virtiovar.h> /* XXX: move to non-pci */ 53 1.1 cherry 54 1.44 thorpej #if defined(__alpha__) || defined(__sparc64__) 55 1.44 thorpej /* 56 1.44 thorpej * XXX VIRTIO_F_ACCESS_PLATFORM is required for standard PCI DMA 57 1.44 thorpej * XXX to work on these platforms, at least by Qemu. 58 1.44 thorpej * XXX 59 1.44 thorpej * XXX Generalize this later. 60 1.44 thorpej */ 61 1.44 thorpej #define __NEED_VIRTIO_F_ACCESS_PLATFORM 62 1.44 thorpej #endif /* __alpha__ || __sparc64__ */ 63 1.1 cherry 64 1.33 yamaguch #define VIRTIO_PCI_LOG(_sc, _use_log, _fmt, _args...) \ 65 1.33 yamaguch do { \ 66 1.33 yamaguch if ((_use_log)) { \ 67 1.33 yamaguch log(LOG_DEBUG, "%s: " _fmt, \ 68 1.33 yamaguch device_xname((_sc)->sc_dev), \ 69 1.33 yamaguch ##_args); \ 70 1.33 yamaguch } else { \ 71 1.33 yamaguch aprint_error_dev((_sc)->sc_dev, \ 72 1.33 yamaguch _fmt, ##_args); \ 73 1.33 yamaguch } \ 74 1.33 yamaguch } while(0) 75 1.33 yamaguch 76 1.4 jakllsch static int virtio_pci_match(device_t, cfdata_t, void *); 77 1.4 jakllsch static void virtio_pci_attach(device_t, device_t, void *); 78 1.4 jakllsch static int virtio_pci_rescan(device_t, const char *, const int *); 79 1.4 jakllsch static int virtio_pci_detach(device_t, int); 80 1.4 jakllsch 81 1.22 reinoud #define NMAPREG ((PCI_MAPREG_END - PCI_MAPREG_START) / \ 82 1.22 reinoud sizeof(pcireg_t)) 83 1.4 jakllsch struct virtio_pci_softc { 84 1.4 jakllsch struct virtio_softc sc_sc; 85 1.39 yamaguch bool sc_intr_pervq; 86 1.15 reinoud 87 1.15 reinoud /* IO space */ 88 1.4 jakllsch bus_space_tag_t sc_iot; 89 1.4 jakllsch bus_space_handle_t sc_ioh; 90 1.4 jakllsch bus_size_t sc_iosize; 91 1.15 reinoud 92 1.15 reinoud /* BARs */ 93 1.21 reinoud bus_space_tag_t sc_bars_iot[NMAPREG]; 94 1.21 reinoud bus_space_handle_t sc_bars_ioh[NMAPREG]; 95 1.21 reinoud bus_size_t sc_bars_iosize[NMAPREG]; 96 1.15 reinoud 97 1.15 reinoud /* notify space */ 98 1.15 reinoud bus_space_tag_t sc_notify_iot; 99 1.15 reinoud bus_space_handle_t sc_notify_ioh; 100 1.15 reinoud bus_size_t sc_notify_iosize; 101 1.15 reinoud uint32_t sc_notify_off_multiplier; 102 1.15 reinoud 103 1.15 reinoud /* isr space */ 104 1.15 reinoud bus_space_tag_t sc_isr_iot; 105 1.15 reinoud bus_space_handle_t sc_isr_ioh; 106 1.15 reinoud bus_size_t sc_isr_iosize; 107 1.15 reinoud 108 1.15 reinoud /* generic */ 109 1.4 jakllsch struct pci_attach_args sc_pa; 110 1.4 jakllsch pci_intr_handle_t *sc_ihp; 111 1.4 jakllsch void **sc_ihs; 112 1.4 jakllsch int sc_ihs_num; 113 1.15 reinoud int sc_devcfg_offset; /* for 0.9 */ 114 1.4 jakllsch }; 115 1.4 jakllsch 116 1.15 reinoud static int virtio_pci_attach_09(device_t, void *); 117 1.15 reinoud static void virtio_pci_kick_09(struct virtio_softc *, uint16_t); 118 1.15 reinoud static uint16_t virtio_pci_read_queue_size_09(struct virtio_softc *, uint16_t); 119 1.52 riastrad static void virtio_pci_setup_queue_09(struct virtio_softc *, uint16_t, 120 1.52 riastrad uint64_t); 121 1.15 reinoud static void virtio_pci_set_status_09(struct virtio_softc *, int); 122 1.52 riastrad static void virtio_pci_negotiate_features_09(struct virtio_softc *, 123 1.52 riastrad uint64_t); 124 1.15 reinoud 125 1.15 reinoud static int virtio_pci_attach_10(device_t, void *); 126 1.15 reinoud static void virtio_pci_kick_10(struct virtio_softc *, uint16_t); 127 1.15 reinoud static uint16_t virtio_pci_read_queue_size_10(struct virtio_softc *, uint16_t); 128 1.52 riastrad static void virtio_pci_setup_queue_10(struct virtio_softc *, uint16_t, 129 1.52 riastrad uint64_t); 130 1.15 reinoud static void virtio_pci_set_status_10(struct virtio_softc *, int); 131 1.52 riastrad static void virtio_pci_negotiate_features_10(struct virtio_softc *, 132 1.52 riastrad uint64_t); 133 1.52 riastrad static int virtio_pci_find_cap(struct virtio_pci_softc *, int, void *, 134 1.52 riastrad int); 135 1.15 reinoud 136 1.31 yamaguch static int virtio_pci_alloc_interrupts(struct virtio_softc *); 137 1.4 jakllsch static void virtio_pci_free_interrupts(struct virtio_softc *); 138 1.52 riastrad static int virtio_pci_adjust_config_region(struct virtio_pci_softc *); 139 1.52 riastrad static int virtio_pci_intr(void *); 140 1.4 jakllsch static int virtio_pci_msix_queue_intr(void *); 141 1.4 jakllsch static int virtio_pci_msix_config_intr(void *); 142 1.32 yamaguch static int virtio_pci_setup_interrupts_09(struct virtio_softc *, int); 143 1.32 yamaguch static int virtio_pci_setup_interrupts_10(struct virtio_softc *, int); 144 1.31 yamaguch static int virtio_pci_establish_msix_interrupts(struct virtio_softc *, 145 1.53 riastrad const struct pci_attach_args *); 146 1.31 yamaguch static int virtio_pci_establish_intx_interrupt(struct virtio_softc *, 147 1.53 riastrad const struct pci_attach_args *); 148 1.31 yamaguch static bool virtio_pci_msix_enabled(struct virtio_pci_softc *); 149 1.4 jakllsch 150 1.4 jakllsch #define VIRTIO_MSIX_CONFIG_VECTOR_INDEX 0 151 1.4 jakllsch #define VIRTIO_MSIX_QUEUE_VECTOR_INDEX 1 152 1.4 jakllsch 153 1.27 reinoud /* 154 1.43 rin * For big-endian aarch64/armv7 on QEMU (and most real HW), only CPU cores 155 1.43 rin * are running in big-endian mode, with all peripheral being configured to 156 1.43 rin * little-endian mode. Their default bus_space(9) functions forcibly swap 157 1.43 rin * byte-order. This guarantees that PIO'ed data from pci(4), e.g., are 158 1.43 rin * correctly handled by bus_space(9), while DMA'ed ones should be swapped 159 1.43 rin * by hand, in violation of virtio(4) specifications. 160 1.27 reinoud */ 161 1.4 jakllsch 162 1.43 rin #if (defined(__aarch64__) || defined(__arm__)) && BYTE_ORDER == BIG_ENDIAN 163 1.43 rin # define READ_ENDIAN_09 BIG_ENDIAN 164 1.27 reinoud # define READ_ENDIAN_10 BIG_ENDIAN 165 1.27 reinoud # define STRUCT_ENDIAN_09 BIG_ENDIAN 166 1.27 reinoud # define STRUCT_ENDIAN_10 LITTLE_ENDIAN 167 1.27 reinoud #elif BYTE_ORDER == BIG_ENDIAN 168 1.27 reinoud # define READ_ENDIAN_09 LITTLE_ENDIAN 169 1.27 reinoud # define READ_ENDIAN_10 BIG_ENDIAN 170 1.27 reinoud # define STRUCT_ENDIAN_09 BIG_ENDIAN 171 1.27 reinoud # define STRUCT_ENDIAN_10 LITTLE_ENDIAN 172 1.27 reinoud #else /* little endian */ 173 1.27 reinoud # define READ_ENDIAN_09 LITTLE_ENDIAN 174 1.27 reinoud # define READ_ENDIAN_10 LITTLE_ENDIAN 175 1.27 reinoud # define STRUCT_ENDIAN_09 LITTLE_ENDIAN 176 1.27 reinoud # define STRUCT_ENDIAN_10 LITTLE_ENDIAN 177 1.15 reinoud #endif 178 1.15 reinoud 179 1.4 jakllsch CFATTACH_DECL3_NEW(virtio_pci, sizeof(struct virtio_pci_softc), 180 1.4 jakllsch virtio_pci_match, virtio_pci_attach, virtio_pci_detach, NULL, 181 1.48 riastrad virtio_pci_rescan, NULL, 0); 182 1.4 jakllsch 183 1.15 reinoud static const struct virtio_ops virtio_pci_ops_09 = { 184 1.15 reinoud .kick = virtio_pci_kick_09, 185 1.15 reinoud .read_queue_size = virtio_pci_read_queue_size_09, 186 1.15 reinoud .setup_queue = virtio_pci_setup_queue_09, 187 1.15 reinoud .set_status = virtio_pci_set_status_09, 188 1.15 reinoud .neg_features = virtio_pci_negotiate_features_09, 189 1.31 yamaguch .alloc_interrupts = virtio_pci_alloc_interrupts, 190 1.15 reinoud .free_interrupts = virtio_pci_free_interrupts, 191 1.31 yamaguch .setup_interrupts = virtio_pci_setup_interrupts_09, 192 1.15 reinoud }; 193 1.15 reinoud 194 1.15 reinoud static const struct virtio_ops virtio_pci_ops_10 = { 195 1.15 reinoud .kick = virtio_pci_kick_10, 196 1.15 reinoud .read_queue_size = virtio_pci_read_queue_size_10, 197 1.15 reinoud .setup_queue = virtio_pci_setup_queue_10, 198 1.15 reinoud .set_status = virtio_pci_set_status_10, 199 1.15 reinoud .neg_features = virtio_pci_negotiate_features_10, 200 1.31 yamaguch .alloc_interrupts = virtio_pci_alloc_interrupts, 201 1.4 jakllsch .free_interrupts = virtio_pci_free_interrupts, 202 1.31 yamaguch .setup_interrupts = virtio_pci_setup_interrupts_10, 203 1.4 jakllsch }; 204 1.1 cherry 205 1.1 cherry static int 206 1.4 jakllsch virtio_pci_match(device_t parent, cfdata_t match, void *aux) 207 1.1 cherry { 208 1.53 riastrad const struct pci_attach_args * const pa = aux; 209 1.1 cherry 210 1.1 cherry switch (PCI_VENDOR(pa->pa_id)) { 211 1.1 cherry case PCI_VENDOR_QUMRANET: 212 1.34 uwe /* Transitional devices MUST have a PCI Revision ID of 0. */ 213 1.15 reinoud if (((PCI_PRODUCT_QUMRANET_VIRTIO_1000 <= 214 1.52 riastrad PCI_PRODUCT(pa->pa_id)) && 215 1.52 riastrad (PCI_PRODUCT(pa->pa_id) <= 216 1.52 riastrad PCI_PRODUCT_QUMRANET_VIRTIO_103F)) && 217 1.52 riastrad PCI_REVISION(pa->pa_class) == 0) 218 1.15 reinoud return 1; 219 1.34 uwe /* 220 1.34 uwe * Non-transitional devices SHOULD have a PCI Revision 221 1.34 uwe * ID of 1 or higher. Drivers MUST match any PCI 222 1.34 uwe * Revision ID value. 223 1.34 uwe */ 224 1.15 reinoud if (((PCI_PRODUCT_QUMRANET_VIRTIO_1040 <= 225 1.52 riastrad PCI_PRODUCT(pa->pa_id)) && 226 1.52 riastrad (PCI_PRODUCT(pa->pa_id) <= 227 1.52 riastrad PCI_PRODUCT_QUMRANET_VIRTIO_107F)) && 228 1.52 riastrad /* XXX: TODO */ 229 1.52 riastrad PCI_REVISION(pa->pa_class) == 1) 230 1.1 cherry return 1; 231 1.1 cherry break; 232 1.1 cherry } 233 1.1 cherry 234 1.1 cherry return 0; 235 1.1 cherry } 236 1.1 cherry 237 1.1 cherry static void 238 1.4 jakllsch virtio_pci_attach(device_t parent, device_t self, void *aux) 239 1.1 cherry { 240 1.4 jakllsch struct virtio_pci_softc * const psc = device_private(self); 241 1.4 jakllsch struct virtio_softc * const sc = &psc->sc_sc; 242 1.53 riastrad const struct pci_attach_args * const pa = aux; 243 1.1 cherry pci_chipset_tag_t pc = pa->pa_pc; 244 1.1 cherry pcitag_t tag = pa->pa_tag; 245 1.1 cherry int revision; 246 1.15 reinoud int ret; 247 1.1 cherry pcireg_t id; 248 1.2 uwe pcireg_t csr; 249 1.1 cherry 250 1.1 cherry revision = PCI_REVISION(pa->pa_class); 251 1.15 reinoud switch (revision) { 252 1.15 reinoud case 0: 253 1.15 reinoud /* subsystem ID shows what I am */ 254 1.15 reinoud id = PCI_SUBSYS_ID(pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG)); 255 1.15 reinoud break; 256 1.15 reinoud case 1: 257 1.15 reinoud /* pci product number shows what I am */ 258 1.15 reinoud id = PCI_PRODUCT(pa->pa_id) - PCI_PRODUCT_QUMRANET_VIRTIO_1040; 259 1.15 reinoud break; 260 1.15 reinoud default: 261 1.1 cherry aprint_normal(": unknown revision 0x%02x; giving up\n", 262 1.52 riastrad revision); 263 1.1 cherry return; 264 1.1 cherry } 265 1.15 reinoud 266 1.1 cherry aprint_normal("\n"); 267 1.1 cherry aprint_naive("\n"); 268 1.15 reinoud virtio_print_device_type(self, id, revision); 269 1.1 cherry 270 1.2 uwe csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG); 271 1.2 uwe csr |= PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_IO_ENABLE; 272 1.2 uwe pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr); 273 1.2 uwe 274 1.1 cherry sc->sc_dev = self; 275 1.4 jakllsch psc->sc_pa = *pa; 276 1.4 jakllsch psc->sc_iot = pa->pa_iot; 277 1.15 reinoud 278 1.15 reinoud sc->sc_dmat = pa->pa_dmat; 279 1.1 cherry if (pci_dma64_available(pa)) 280 1.1 cherry sc->sc_dmat = pa->pa_dmat64; 281 1.1 cherry 282 1.15 reinoud /* attach is dependent on revision */ 283 1.15 reinoud ret = 0; 284 1.15 reinoud if (revision == 1) { 285 1.15 reinoud /* try to attach 1.0 */ 286 1.15 reinoud ret = virtio_pci_attach_10(self, aux); 287 1.15 reinoud } 288 1.15 reinoud if (ret == 0 && revision == 0) { 289 1.44 thorpej /* 290 1.44 thorpej * revision 0 means 0.9 only or both 0.9 and 1.0. The 291 1.44 thorpej * latter are so-called "Transitional Devices". For 292 1.44 thorpej * those devices, we want to use the 1.0 interface if 293 1.44 thorpej * possible. 294 1.44 thorpej * 295 1.44 thorpej * XXX Currently only on platforms that require 1.0 296 1.44 thorpej * XXX features, such as VIRTIO_F_ACCESS_PLATFORM. 297 1.44 thorpej */ 298 1.44 thorpej #ifdef __NEED_VIRTIO_F_ACCESS_PLATFORM 299 1.44 thorpej /* First, try to attach 1.0 */ 300 1.44 thorpej ret = virtio_pci_attach_10(self, aux); 301 1.44 thorpej if (ret != 0) { 302 1.44 thorpej aprint_error_dev(self, 303 1.44 thorpej "VirtIO 1.0 error = %d, falling back to 0.9\n", 304 1.44 thorpej ret); 305 1.44 thorpej /* Fall back to 0.9. */ 306 1.44 thorpej ret = virtio_pci_attach_09(self, aux); 307 1.44 thorpej } 308 1.44 thorpej #else 309 1.15 reinoud ret = virtio_pci_attach_09(self, aux); 310 1.44 thorpej #endif /* __NEED_VIRTIO_F_ACCESS_PLATFORM */ 311 1.15 reinoud } 312 1.15 reinoud if (ret) { 313 1.15 reinoud aprint_error_dev(self, "cannot attach (%d)\n", ret); 314 1.1 cherry return; 315 1.1 cherry } 316 1.15 reinoud KASSERT(sc->sc_ops); 317 1.15 reinoud 318 1.15 reinoud /* preset config region */ 319 1.15 reinoud psc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI; 320 1.15 reinoud if (virtio_pci_adjust_config_region(psc)) 321 1.15 reinoud return; 322 1.1 cherry 323 1.15 reinoud /* generic */ 324 1.1 cherry virtio_device_reset(sc); 325 1.1 cherry virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_ACK); 326 1.1 cherry virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER); 327 1.1 cherry 328 1.15 reinoud sc->sc_childdevid = id; 329 1.1 cherry sc->sc_child = NULL; 330 1.29 thorpej virtio_pci_rescan(self, NULL, NULL); 331 1.1 cherry return; 332 1.1 cherry } 333 1.1 cherry 334 1.1 cherry /* ARGSUSED */ 335 1.1 cherry static int 336 1.29 thorpej virtio_pci_rescan(device_t self, const char *ifattr, const int *locs) 337 1.1 cherry { 338 1.4 jakllsch struct virtio_pci_softc * const psc = device_private(self); 339 1.4 jakllsch struct virtio_softc * const sc = &psc->sc_sc; 340 1.1 cherry struct virtio_attach_args va; 341 1.1 cherry 342 1.1 cherry if (sc->sc_child) /* Child already attached? */ 343 1.1 cherry return 0; 344 1.1 cherry 345 1.1 cherry memset(&va, 0, sizeof(va)); 346 1.1 cherry va.sc_childdevid = sc->sc_childdevid; 347 1.1 cherry 348 1.30 thorpej config_found(self, &va, NULL, CFARGS_NONE); 349 1.1 cherry 350 1.15 reinoud if (virtio_attach_failed(sc)) 351 1.1 cherry return 0; 352 1.1 cherry 353 1.1 cherry return 0; 354 1.1 cherry } 355 1.1 cherry 356 1.1 cherry static int 357 1.4 jakllsch virtio_pci_detach(device_t self, int flags) 358 1.1 cherry { 359 1.4 jakllsch struct virtio_pci_softc * const psc = device_private(self); 360 1.4 jakllsch struct virtio_softc * const sc = &psc->sc_sc; 361 1.46 riastrad unsigned i; 362 1.1 cherry int r; 363 1.1 cherry 364 1.40 yamaguch r = config_detach_children(self, flags); 365 1.40 yamaguch if (r != 0) 366 1.40 yamaguch return r; 367 1.1 cherry 368 1.41 riastrad /* Check that child never attached, or detached properly */ 369 1.42 yamaguch KASSERT(sc->sc_child == NULL); 370 1.1 cherry KASSERT(sc->sc_vqs == NULL); 371 1.4 jakllsch KASSERT(psc->sc_ihs_num == 0); 372 1.1 cherry 373 1.46 riastrad if (sc->sc_version_1) { 374 1.46 riastrad for (i = 0; i < __arraycount(psc->sc_bars_iot); i++) { 375 1.46 riastrad if (psc->sc_bars_iosize[i] == 0) 376 1.46 riastrad continue; 377 1.46 riastrad bus_space_unmap(psc->sc_bars_iot[i], 378 1.46 riastrad psc->sc_bars_ioh[i], psc->sc_bars_iosize[i]); 379 1.46 riastrad psc->sc_bars_iosize[i] = 0; 380 1.46 riastrad } 381 1.46 riastrad } else { 382 1.46 riastrad if (psc->sc_iosize) { 383 1.46 riastrad bus_space_unmap(psc->sc_iot, psc->sc_ioh, 384 1.47 riastrad psc->sc_iosize); 385 1.46 riastrad psc->sc_iosize = 0; 386 1.46 riastrad } 387 1.46 riastrad } 388 1.1 cherry 389 1.1 cherry return 0; 390 1.1 cherry } 391 1.4 jakllsch 392 1.15 reinoud static int 393 1.15 reinoud virtio_pci_attach_09(device_t self, void *aux) 394 1.15 reinoud { 395 1.15 reinoud struct virtio_pci_softc * const psc = device_private(self); 396 1.53 riastrad const struct pci_attach_args * const pa = aux; 397 1.15 reinoud struct virtio_softc * const sc = &psc->sc_sc; 398 1.15 reinoud 399 1.15 reinoud /* complete IO region */ 400 1.15 reinoud if (pci_mapreg_map(pa, PCI_MAPREG_START, PCI_MAPREG_TYPE_IO, 0, 401 1.52 riastrad &psc->sc_iot, &psc->sc_ioh, NULL, &psc->sc_iosize)) { 402 1.15 reinoud aprint_error_dev(self, "can't map i/o space\n"); 403 1.15 reinoud return EIO; 404 1.15 reinoud } 405 1.15 reinoud 406 1.15 reinoud /* queue space */ 407 1.15 reinoud if (bus_space_subregion(psc->sc_iot, psc->sc_ioh, 408 1.52 riastrad VIRTIO_CONFIG_QUEUE_NOTIFY, 2, &psc->sc_notify_ioh)) { 409 1.15 reinoud aprint_error_dev(self, "can't map notify i/o space\n"); 410 1.15 reinoud return EIO; 411 1.15 reinoud } 412 1.15 reinoud psc->sc_notify_iosize = 2; 413 1.15 reinoud psc->sc_notify_iot = psc->sc_iot; 414 1.15 reinoud 415 1.15 reinoud /* ISR space */ 416 1.15 reinoud if (bus_space_subregion(psc->sc_iot, psc->sc_ioh, 417 1.52 riastrad VIRTIO_CONFIG_ISR_STATUS, 1, &psc->sc_isr_ioh)) { 418 1.15 reinoud aprint_error_dev(self, "can't map isr i/o space\n"); 419 1.15 reinoud return EIO; 420 1.15 reinoud } 421 1.15 reinoud psc->sc_isr_iosize = 1; 422 1.15 reinoud psc->sc_isr_iot = psc->sc_iot; 423 1.15 reinoud 424 1.15 reinoud /* set our version 0.9 ops */ 425 1.15 reinoud sc->sc_ops = &virtio_pci_ops_09; 426 1.52 riastrad sc->sc_bus_endian = READ_ENDIAN_09; 427 1.27 reinoud sc->sc_struct_endian = STRUCT_ENDIAN_09; 428 1.15 reinoud return 0; 429 1.15 reinoud } 430 1.15 reinoud 431 1.15 reinoud static int 432 1.15 reinoud virtio_pci_attach_10(device_t self, void *aux) 433 1.4 jakllsch { 434 1.15 reinoud struct virtio_pci_softc * const psc = device_private(self); 435 1.53 riastrad const struct pci_attach_args * const pa = aux; 436 1.15 reinoud struct virtio_softc * const sc = &psc->sc_sc; 437 1.53 riastrad const pci_chipset_tag_t pc = pa->pa_pc; 438 1.53 riastrad const pcitag_t tag = pa->pa_tag; 439 1.15 reinoud 440 1.15 reinoud struct virtio_pci_cap common, isr, device; 441 1.15 reinoud struct virtio_pci_notify_cap notify; 442 1.15 reinoud int have_device_cfg = 0; 443 1.15 reinoud bus_size_t bars[NMAPREG] = { 0 }; 444 1.15 reinoud int bars_idx[NMAPREG] = { 0 }; 445 1.52 riastrad struct virtio_pci_cap * const caps[] = 446 1.52 riastrad { &common, &isr, &device, ¬ify.cap }; 447 1.26 reinoud int i, j, ret = 0; 448 1.15 reinoud 449 1.15 reinoud if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_COMMON_CFG, 450 1.52 riastrad &common, sizeof(common))) 451 1.15 reinoud return ENODEV; 452 1.15 reinoud if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_NOTIFY_CFG, 453 1.52 riastrad ¬ify, sizeof(notify))) 454 1.15 reinoud return ENODEV; 455 1.15 reinoud if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_ISR_CFG, 456 1.52 riastrad &isr, sizeof(isr))) 457 1.15 reinoud return ENODEV; 458 1.15 reinoud if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_DEVICE_CFG, 459 1.52 riastrad &device, sizeof(device))) 460 1.15 reinoud memset(&device, 0, sizeof(device)); 461 1.15 reinoud else 462 1.15 reinoud have_device_cfg = 1; 463 1.15 reinoud 464 1.15 reinoud /* Figure out which bars we need to map */ 465 1.15 reinoud for (i = 0; i < __arraycount(caps); i++) { 466 1.15 reinoud int bar = caps[i]->bar; 467 1.15 reinoud bus_size_t len = caps[i]->offset + caps[i]->length; 468 1.52 riastrad 469 1.15 reinoud if (caps[i]->length == 0) 470 1.15 reinoud continue; 471 1.15 reinoud if (bars[bar] < len) 472 1.15 reinoud bars[bar] = len; 473 1.15 reinoud } 474 1.15 reinoud 475 1.26 reinoud for (i = j = 0; i < __arraycount(bars); i++) { 476 1.15 reinoud int reg; 477 1.15 reinoud pcireg_t type; 478 1.52 riastrad 479 1.15 reinoud if (bars[i] == 0) 480 1.15 reinoud continue; 481 1.35 uwe reg = PCI_BAR(i); 482 1.15 reinoud type = pci_mapreg_type(pc, tag, reg); 483 1.15 reinoud if (pci_mapreg_map(pa, reg, type, 0, 484 1.52 riastrad &psc->sc_bars_iot[j], &psc->sc_bars_ioh[j], 485 1.52 riastrad NULL, &psc->sc_bars_iosize[j])) { 486 1.15 reinoud aprint_error_dev(self, "can't map bar %u \n", i); 487 1.15 reinoud ret = EIO; 488 1.15 reinoud goto err; 489 1.15 reinoud } 490 1.17 martin aprint_debug_dev(self, 491 1.17 martin "bar[%d]: iot %p, size 0x%" PRIxBUSSIZE "\n", 492 1.17 martin j, psc->sc_bars_iot[j], psc->sc_bars_iosize[j]); 493 1.15 reinoud bars_idx[i] = j; 494 1.15 reinoud j++; 495 1.15 reinoud } 496 1.15 reinoud 497 1.15 reinoud i = bars_idx[notify.cap.bar]; 498 1.15 reinoud if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i], 499 1.52 riastrad notify.cap.offset, notify.cap.length, &psc->sc_notify_ioh)) { 500 1.15 reinoud aprint_error_dev(self, "can't map notify i/o space\n"); 501 1.15 reinoud ret = EIO; 502 1.15 reinoud goto err; 503 1.15 reinoud } 504 1.15 reinoud psc->sc_notify_iosize = notify.cap.length; 505 1.15 reinoud psc->sc_notify_iot = psc->sc_bars_iot[i]; 506 1.15 reinoud psc->sc_notify_off_multiplier = le32toh(notify.notify_off_multiplier); 507 1.15 reinoud 508 1.15 reinoud if (have_device_cfg) { 509 1.15 reinoud i = bars_idx[device.bar]; 510 1.52 riastrad if (bus_space_subregion(psc->sc_bars_iot[i], 511 1.52 riastrad psc->sc_bars_ioh[i], device.offset, device.length, 512 1.52 riastrad &sc->sc_devcfg_ioh)) { 513 1.15 reinoud aprint_error_dev(self, "can't map devcfg i/o space\n"); 514 1.15 reinoud ret = EIO; 515 1.15 reinoud goto err; 516 1.15 reinoud } 517 1.15 reinoud aprint_debug_dev(self, 518 1.52 riastrad "device.offset = 0x%x, device.length = 0x%x\n", 519 1.52 riastrad device.offset, device.length); 520 1.15 reinoud sc->sc_devcfg_iosize = device.length; 521 1.15 reinoud sc->sc_devcfg_iot = psc->sc_bars_iot[i]; 522 1.15 reinoud } 523 1.15 reinoud 524 1.15 reinoud i = bars_idx[isr.bar]; 525 1.15 reinoud if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i], 526 1.52 riastrad isr.offset, isr.length, &psc->sc_isr_ioh)) { 527 1.15 reinoud aprint_error_dev(self, "can't map isr i/o space\n"); 528 1.15 reinoud ret = EIO; 529 1.15 reinoud goto err; 530 1.15 reinoud } 531 1.15 reinoud psc->sc_isr_iosize = isr.length; 532 1.15 reinoud psc->sc_isr_iot = psc->sc_bars_iot[i]; 533 1.15 reinoud 534 1.15 reinoud i = bars_idx[common.bar]; 535 1.15 reinoud if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i], 536 1.52 riastrad common.offset, common.length, &psc->sc_ioh)) { 537 1.15 reinoud aprint_error_dev(self, "can't map common i/o space\n"); 538 1.15 reinoud ret = EIO; 539 1.15 reinoud goto err; 540 1.15 reinoud } 541 1.15 reinoud psc->sc_iosize = common.length; 542 1.15 reinoud psc->sc_iot = psc->sc_bars_iot[i]; 543 1.15 reinoud 544 1.15 reinoud psc->sc_sc.sc_version_1 = 1; 545 1.15 reinoud 546 1.15 reinoud /* set our version 1.0 ops */ 547 1.15 reinoud sc->sc_ops = &virtio_pci_ops_10; 548 1.52 riastrad sc->sc_bus_endian = READ_ENDIAN_10; 549 1.27 reinoud sc->sc_struct_endian = STRUCT_ENDIAN_10; 550 1.15 reinoud return 0; 551 1.4 jakllsch 552 1.15 reinoud err: 553 1.45 riastrad /* undo our pci_mapreg_map()s */ 554 1.23 reinoud for (i = 0; i < __arraycount(bars); i++) { 555 1.26 reinoud if (psc->sc_bars_iosize[i] == 0) 556 1.23 reinoud continue; 557 1.26 reinoud bus_space_unmap(psc->sc_bars_iot[i], psc->sc_bars_ioh[i], 558 1.49 riastrad psc->sc_bars_iosize[i]); 559 1.49 riastrad psc->sc_bars_iosize[i] = 0; 560 1.23 reinoud } 561 1.15 reinoud return ret; 562 1.4 jakllsch } 563 1.4 jakllsch 564 1.15 reinoud /* v1.0 attach helper */ 565 1.15 reinoud static int 566 1.52 riastrad virtio_pci_find_cap(struct virtio_pci_softc *psc, int cfg_type, void *buf, 567 1.52 riastrad int buflen) 568 1.4 jakllsch { 569 1.15 reinoud device_t self = psc->sc_sc.sc_dev; 570 1.15 reinoud pci_chipset_tag_t pc = psc->sc_pa.pa_pc; 571 1.15 reinoud pcitag_t tag = psc->sc_pa.pa_tag; 572 1.15 reinoud unsigned int offset, i, len; 573 1.15 reinoud union { 574 1.15 reinoud pcireg_t reg[8]; 575 1.15 reinoud struct virtio_pci_cap vcap; 576 1.15 reinoud } *v = buf; 577 1.15 reinoud 578 1.15 reinoud if (buflen < sizeof(struct virtio_pci_cap)) 579 1.15 reinoud return ERANGE; 580 1.15 reinoud 581 1.52 riastrad if (!pci_get_capability(pc, tag, PCI_CAP_VENDSPEC, &offset, 582 1.52 riastrad &v->reg[0])) 583 1.15 reinoud return ENOENT; 584 1.15 reinoud 585 1.15 reinoud do { 586 1.15 reinoud for (i = 0; i < 4; i++) 587 1.15 reinoud v->reg[i] = 588 1.52 riastrad le32toh(pci_conf_read(pc, tag, offset + i * 4)); 589 1.15 reinoud if (v->vcap.cfg_type == cfg_type) 590 1.15 reinoud break; 591 1.15 reinoud offset = v->vcap.cap_next; 592 1.15 reinoud } while (offset != 0); 593 1.15 reinoud 594 1.15 reinoud if (offset == 0) 595 1.15 reinoud return ENOENT; 596 1.15 reinoud 597 1.15 reinoud if (v->vcap.cap_len > sizeof(struct virtio_pci_cap)) { 598 1.15 reinoud len = roundup(v->vcap.cap_len, sizeof(pcireg_t)); 599 1.15 reinoud if (len > buflen) { 600 1.15 reinoud aprint_error_dev(self, "%s cap too large\n", __func__); 601 1.15 reinoud return ERANGE; 602 1.15 reinoud } 603 1.15 reinoud for (i = 4; i < len / sizeof(pcireg_t); i++) 604 1.15 reinoud v->reg[i] = 605 1.52 riastrad le32toh(pci_conf_read(pc, tag, offset + i * 4)); 606 1.15 reinoud } 607 1.15 reinoud 608 1.15 reinoud /* endian fixup */ 609 1.15 reinoud v->vcap.offset = le32toh(v->vcap.offset); 610 1.15 reinoud v->vcap.length = le32toh(v->vcap.length); 611 1.15 reinoud return 0; 612 1.4 jakllsch } 613 1.4 jakllsch 614 1.15 reinoud /* ------------------------------------- 615 1.15 reinoud * Version 0.9 support 616 1.15 reinoud * -------------------------------------*/ 617 1.15 reinoud 618 1.15 reinoud static void 619 1.15 reinoud virtio_pci_kick_09(struct virtio_softc *sc, uint16_t idx) 620 1.4 jakllsch { 621 1.54 riastrad struct virtio_pci_softc * const psc = container_of(sc, 622 1.54 riastrad struct virtio_pci_softc, sc_sc); 623 1.15 reinoud 624 1.15 reinoud bus_space_write_2(psc->sc_notify_iot, psc->sc_notify_ioh, 0, idx); 625 1.4 jakllsch } 626 1.4 jakllsch 627 1.15 reinoud /* only applicable for v 0.9 but also called for 1.0 */ 628 1.15 reinoud static int 629 1.15 reinoud virtio_pci_adjust_config_region(struct virtio_pci_softc *psc) 630 1.4 jakllsch { 631 1.37 uwe struct virtio_softc * const sc = &psc->sc_sc; 632 1.37 uwe device_t self = sc->sc_dev; 633 1.15 reinoud 634 1.15 reinoud if (psc->sc_sc.sc_version_1) 635 1.15 reinoud return 0; 636 1.15 reinoud 637 1.15 reinoud sc->sc_devcfg_iosize = psc->sc_iosize - psc->sc_devcfg_offset; 638 1.15 reinoud sc->sc_devcfg_iot = psc->sc_iot; 639 1.15 reinoud if (bus_space_subregion(psc->sc_iot, psc->sc_ioh, 640 1.52 riastrad psc->sc_devcfg_offset, sc->sc_devcfg_iosize, 641 1.52 riastrad &sc->sc_devcfg_ioh)) { 642 1.15 reinoud aprint_error_dev(self, "can't map config i/o space\n"); 643 1.15 reinoud return EIO; 644 1.15 reinoud } 645 1.15 reinoud 646 1.15 reinoud return 0; 647 1.4 jakllsch } 648 1.4 jakllsch 649 1.15 reinoud static uint16_t 650 1.15 reinoud virtio_pci_read_queue_size_09(struct virtio_softc *sc, uint16_t idx) 651 1.4 jakllsch { 652 1.54 riastrad struct virtio_pci_softc * const psc = container_of(sc, 653 1.54 riastrad struct virtio_pci_softc, sc_sc); 654 1.4 jakllsch 655 1.15 reinoud bus_space_write_2(psc->sc_iot, psc->sc_ioh, 656 1.15 reinoud VIRTIO_CONFIG_QUEUE_SELECT, idx); 657 1.15 reinoud return bus_space_read_2(psc->sc_iot, psc->sc_ioh, 658 1.15 reinoud VIRTIO_CONFIG_QUEUE_SIZE); 659 1.4 jakllsch } 660 1.4 jakllsch 661 1.4 jakllsch static void 662 1.15 reinoud virtio_pci_setup_queue_09(struct virtio_softc *sc, uint16_t idx, uint64_t addr) 663 1.4 jakllsch { 664 1.54 riastrad struct virtio_pci_softc * const psc = container_of(sc, 665 1.54 riastrad struct virtio_pci_softc, sc_sc); 666 1.4 jakllsch 667 1.15 reinoud bus_space_write_2(psc->sc_iot, psc->sc_ioh, 668 1.15 reinoud VIRTIO_CONFIG_QUEUE_SELECT, idx); 669 1.15 reinoud bus_space_write_4(psc->sc_iot, psc->sc_ioh, 670 1.15 reinoud VIRTIO_CONFIG_QUEUE_ADDRESS, addr / VIRTIO_PAGE_SIZE); 671 1.15 reinoud 672 1.15 reinoud if (psc->sc_ihs_num > 1) { 673 1.15 reinoud int vec = VIRTIO_MSIX_QUEUE_VECTOR_INDEX; 674 1.39 yamaguch if (psc->sc_intr_pervq) 675 1.15 reinoud vec += idx; 676 1.15 reinoud bus_space_write_2(psc->sc_iot, psc->sc_ioh, 677 1.15 reinoud VIRTIO_CONFIG_MSI_QUEUE_VECTOR, vec); 678 1.15 reinoud } 679 1.4 jakllsch } 680 1.4 jakllsch 681 1.4 jakllsch static void 682 1.15 reinoud virtio_pci_set_status_09(struct virtio_softc *sc, int status) 683 1.4 jakllsch { 684 1.54 riastrad struct virtio_pci_softc * const psc = container_of(sc, 685 1.54 riastrad struct virtio_pci_softc, sc_sc); 686 1.15 reinoud int old = 0; 687 1.4 jakllsch 688 1.15 reinoud if (status != 0) { 689 1.52 riastrad old = bus_space_read_1(psc->sc_iot, psc->sc_ioh, 690 1.52 riastrad VIRTIO_CONFIG_DEVICE_STATUS); 691 1.15 reinoud } 692 1.15 reinoud bus_space_write_1(psc->sc_iot, psc->sc_ioh, 693 1.15 reinoud VIRTIO_CONFIG_DEVICE_STATUS, status|old); 694 1.4 jakllsch } 695 1.4 jakllsch 696 1.4 jakllsch static void 697 1.52 riastrad virtio_pci_negotiate_features_09(struct virtio_softc *sc, 698 1.52 riastrad uint64_t guest_features) 699 1.4 jakllsch { 700 1.54 riastrad struct virtio_pci_softc * const psc = container_of(sc, 701 1.54 riastrad struct virtio_pci_softc, sc_sc); 702 1.15 reinoud uint32_t r; 703 1.15 reinoud 704 1.15 reinoud r = bus_space_read_4(psc->sc_iot, psc->sc_ioh, 705 1.15 reinoud VIRTIO_CONFIG_DEVICE_FEATURES); 706 1.15 reinoud 707 1.15 reinoud r &= guest_features; 708 1.15 reinoud 709 1.15 reinoud bus_space_write_4(psc->sc_iot, psc->sc_ioh, 710 1.15 reinoud VIRTIO_CONFIG_GUEST_FEATURES, r); 711 1.4 jakllsch 712 1.15 reinoud sc->sc_active_features = r; 713 1.4 jakllsch } 714 1.4 jakllsch 715 1.15 reinoud /* ------------------------------------- 716 1.15 reinoud * Version 1.0 support 717 1.15 reinoud * -------------------------------------*/ 718 1.15 reinoud 719 1.4 jakllsch static void 720 1.15 reinoud virtio_pci_kick_10(struct virtio_softc *sc, uint16_t idx) 721 1.4 jakllsch { 722 1.54 riastrad struct virtio_pci_softc * const psc = container_of(sc, 723 1.54 riastrad struct virtio_pci_softc, sc_sc); 724 1.15 reinoud unsigned offset = sc->sc_vqs[idx].vq_notify_off * 725 1.52 riastrad psc->sc_notify_off_multiplier; 726 1.4 jakllsch 727 1.15 reinoud bus_space_write_2(psc->sc_notify_iot, psc->sc_notify_ioh, offset, idx); 728 1.4 jakllsch } 729 1.4 jakllsch 730 1.4 jakllsch static uint16_t 731 1.15 reinoud virtio_pci_read_queue_size_10(struct virtio_softc *sc, uint16_t idx) 732 1.4 jakllsch { 733 1.54 riastrad struct virtio_pci_softc * const psc = container_of(sc, 734 1.54 riastrad struct virtio_pci_softc, sc_sc); 735 1.54 riastrad bus_space_tag_t iot = psc->sc_iot; 736 1.15 reinoud bus_space_handle_t ioh = psc->sc_ioh; 737 1.4 jakllsch 738 1.15 reinoud bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SELECT, idx); 739 1.15 reinoud return bus_space_read_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SIZE); 740 1.4 jakllsch } 741 1.4 jakllsch 742 1.18 reinoud /* 743 1.36 uwe * By definition little endian only in v1.0. NB: "MAY" in the text 744 1.36 uwe * below refers to "independently" (i.e. the order of accesses) not 745 1.36 uwe * "32-bit" (which is restricted by the earlier "MUST"). 746 1.24 thorpej * 747 1.36 uwe * 4.1.3.1 Driver Requirements: PCI Device Layout 748 1.36 uwe * 749 1.36 uwe * For device configuration access, the driver MUST use ... 32-bit 750 1.36 uwe * wide and aligned accesses for ... 64-bit wide fields. For 64-bit 751 1.36 uwe * fields, the driver MAY access each of the high and low 32-bit parts 752 1.36 uwe * of the field independently. 753 1.20 christos */ 754 1.19 christos static __inline void 755 1.24 thorpej virtio_pci_bus_space_write_8(bus_space_tag_t iot, bus_space_handle_t ioh, 756 1.52 riastrad bus_size_t offset, uint64_t value) 757 1.19 christos { 758 1.19 christos bus_space_write_4(iot, ioh, offset, BUS_ADDR_LO32(value)); 759 1.19 christos bus_space_write_4(iot, ioh, offset + 4, BUS_ADDR_HI32(value)); 760 1.19 christos } 761 1.18 reinoud 762 1.4 jakllsch static void 763 1.15 reinoud virtio_pci_setup_queue_10(struct virtio_softc *sc, uint16_t idx, uint64_t addr) 764 1.4 jakllsch { 765 1.54 riastrad struct virtio_pci_softc * const psc = container_of(sc, 766 1.54 riastrad struct virtio_pci_softc, sc_sc); 767 1.15 reinoud struct virtqueue *vq = &sc->sc_vqs[idx]; 768 1.52 riastrad bus_space_tag_t iot = psc->sc_iot; 769 1.15 reinoud bus_space_handle_t ioh = psc->sc_ioh; 770 1.15 reinoud KASSERT(vq->vq_index == idx); 771 1.15 reinoud 772 1.15 reinoud bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SELECT, vq->vq_index); 773 1.15 reinoud if (addr == 0) { 774 1.15 reinoud bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_ENABLE, 0); 775 1.24 thorpej virtio_pci_bus_space_write_8(iot, ioh, 776 1.52 riastrad VIRTIO_CONFIG1_QUEUE_DESC, 0); 777 1.24 thorpej virtio_pci_bus_space_write_8(iot, ioh, 778 1.52 riastrad VIRTIO_CONFIG1_QUEUE_AVAIL, 0); 779 1.24 thorpej virtio_pci_bus_space_write_8(iot, ioh, 780 1.52 riastrad VIRTIO_CONFIG1_QUEUE_USED, 0); 781 1.15 reinoud } else { 782 1.24 thorpej virtio_pci_bus_space_write_8(iot, ioh, 783 1.52 riastrad VIRTIO_CONFIG1_QUEUE_DESC, addr); 784 1.24 thorpej virtio_pci_bus_space_write_8(iot, ioh, 785 1.52 riastrad VIRTIO_CONFIG1_QUEUE_AVAIL, addr + vq->vq_availoffset); 786 1.24 thorpej virtio_pci_bus_space_write_8(iot, ioh, 787 1.52 riastrad VIRTIO_CONFIG1_QUEUE_USED, addr + vq->vq_usedoffset); 788 1.15 reinoud bus_space_write_2(iot, ioh, 789 1.52 riastrad VIRTIO_CONFIG1_QUEUE_ENABLE, 1); 790 1.15 reinoud vq->vq_notify_off = bus_space_read_2(iot, ioh, 791 1.52 riastrad VIRTIO_CONFIG1_QUEUE_NOTIFY_OFF); 792 1.15 reinoud } 793 1.4 jakllsch 794 1.4 jakllsch if (psc->sc_ihs_num > 1) { 795 1.4 jakllsch int vec = VIRTIO_MSIX_QUEUE_VECTOR_INDEX; 796 1.39 yamaguch if (psc->sc_intr_pervq) 797 1.4 jakllsch vec += idx; 798 1.15 reinoud bus_space_write_2(iot, ioh, 799 1.52 riastrad VIRTIO_CONFIG1_QUEUE_MSIX_VECTOR, vec); 800 1.4 jakllsch } 801 1.4 jakllsch } 802 1.4 jakllsch 803 1.4 jakllsch static void 804 1.15 reinoud virtio_pci_set_status_10(struct virtio_softc *sc, int status) 805 1.4 jakllsch { 806 1.54 riastrad struct virtio_pci_softc * const psc = container_of(sc, 807 1.54 riastrad struct virtio_pci_softc, sc_sc); 808 1.54 riastrad bus_space_tag_t iot = psc->sc_iot; 809 1.15 reinoud bus_space_handle_t ioh = psc->sc_ioh; 810 1.4 jakllsch int old = 0; 811 1.4 jakllsch 812 1.15 reinoud if (status) 813 1.15 reinoud old = bus_space_read_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS); 814 1.52 riastrad bus_space_write_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS, 815 1.52 riastrad status | old); 816 1.15 reinoud } 817 1.15 reinoud 818 1.15 reinoud void 819 1.52 riastrad virtio_pci_negotiate_features_10(struct virtio_softc *sc, 820 1.52 riastrad uint64_t guest_features) 821 1.15 reinoud { 822 1.54 riastrad struct virtio_pci_softc * const psc = container_of(sc, 823 1.54 riastrad struct virtio_pci_softc, sc_sc); 824 1.54 riastrad device_t self = sc->sc_dev; 825 1.54 riastrad bus_space_tag_t iot = psc->sc_iot; 826 1.15 reinoud bus_space_handle_t ioh = psc->sc_ioh; 827 1.15 reinoud uint64_t host, negotiated, device_status; 828 1.15 reinoud 829 1.15 reinoud guest_features |= VIRTIO_F_VERSION_1; 830 1.44 thorpej #ifdef __NEED_VIRTIO_F_ACCESS_PLATFORM 831 1.44 thorpej /* XXX This could use some work. */ 832 1.44 thorpej guest_features |= VIRTIO_F_ACCESS_PLATFORM; 833 1.44 thorpej #endif /* __NEED_VIRTIO_F_ACCESS_PLATFORM */ 834 1.15 reinoud /* notify on empty is 0.9 only */ 835 1.15 reinoud guest_features &= ~VIRTIO_F_NOTIFY_ON_EMPTY; 836 1.15 reinoud sc->sc_active_features = 0; 837 1.15 reinoud 838 1.15 reinoud bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE_SELECT, 0); 839 1.15 reinoud host = bus_space_read_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE); 840 1.15 reinoud bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE_SELECT, 1); 841 1.52 riastrad host |= (uint64_t)bus_space_read_4(iot, ioh, 842 1.52 riastrad VIRTIO_CONFIG1_DEVICE_FEATURE) << 32; 843 1.15 reinoud 844 1.15 reinoud negotiated = host & guest_features; 845 1.15 reinoud 846 1.15 reinoud bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE_SELECT, 0); 847 1.15 reinoud bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE, 848 1.52 riastrad negotiated & 0xffffffff); 849 1.15 reinoud bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE_SELECT, 1); 850 1.15 reinoud bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE, 851 1.52 riastrad negotiated >> 32); 852 1.15 reinoud virtio_pci_set_status_10(sc, VIRTIO_CONFIG_DEVICE_STATUS_FEATURES_OK); 853 1.15 reinoud 854 1.52 riastrad device_status = bus_space_read_1(iot, ioh, 855 1.52 riastrad VIRTIO_CONFIG1_DEVICE_STATUS); 856 1.15 reinoud if ((device_status & VIRTIO_CONFIG_DEVICE_STATUS_FEATURES_OK) == 0) { 857 1.15 reinoud aprint_error_dev(self, "feature negotiation failed\n"); 858 1.15 reinoud bus_space_write_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS, 859 1.52 riastrad VIRTIO_CONFIG_DEVICE_STATUS_FAILED); 860 1.15 reinoud return; 861 1.15 reinoud } 862 1.15 reinoud 863 1.15 reinoud if ((negotiated & VIRTIO_F_VERSION_1) == 0) { 864 1.15 reinoud aprint_error_dev(self, "host rejected version 1\n"); 865 1.15 reinoud bus_space_write_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS, 866 1.52 riastrad VIRTIO_CONFIG_DEVICE_STATUS_FAILED); 867 1.15 reinoud return; 868 1.4 jakllsch } 869 1.15 reinoud 870 1.15 reinoud sc->sc_active_features = negotiated; 871 1.15 reinoud return; 872 1.15 reinoud } 873 1.15 reinoud 874 1.15 reinoud /* ------------------------------------- 875 1.15 reinoud * Generic PCI interrupt code 876 1.15 reinoud * -------------------------------------*/ 877 1.15 reinoud 878 1.15 reinoud static int 879 1.32 yamaguch virtio_pci_setup_interrupts_10(struct virtio_softc *sc, int reinit) 880 1.4 jakllsch { 881 1.54 riastrad struct virtio_pci_softc * const psc = container_of(sc, 882 1.54 riastrad struct virtio_pci_softc, sc_sc); 883 1.54 riastrad bus_space_tag_t iot = psc->sc_iot; 884 1.15 reinoud bus_space_handle_t ioh = psc->sc_ioh; 885 1.15 reinoud int vector, ret, qid; 886 1.15 reinoud 887 1.31 yamaguch if (!virtio_pci_msix_enabled(psc)) 888 1.31 yamaguch return 0; 889 1.31 yamaguch 890 1.15 reinoud vector = VIRTIO_MSIX_CONFIG_VECTOR_INDEX; 891 1.52 riastrad bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_CONFIG_MSIX_VECTOR, vector); 892 1.15 reinoud ret = bus_space_read_2(iot, ioh, VIRTIO_CONFIG1_CONFIG_MSIX_VECTOR); 893 1.15 reinoud if (ret != vector) { 894 1.52 riastrad VIRTIO_PCI_LOG(sc, reinit, "can't set config msix vector\n"); 895 1.15 reinoud return -1; 896 1.15 reinoud } 897 1.15 reinoud 898 1.15 reinoud for (qid = 0; qid < sc->sc_nvqs; qid++) { 899 1.15 reinoud vector = VIRTIO_MSIX_QUEUE_VECTOR_INDEX; 900 1.4 jakllsch 901 1.39 yamaguch if (psc->sc_intr_pervq) 902 1.15 reinoud vector += qid; 903 1.15 reinoud bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SELECT, qid); 904 1.15 reinoud bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_MSIX_VECTOR, 905 1.52 riastrad vector); 906 1.15 reinoud ret = bus_space_read_2(iot, ioh, 907 1.52 riastrad VIRTIO_CONFIG1_QUEUE_MSIX_VECTOR); 908 1.15 reinoud if (ret != vector) { 909 1.33 yamaguch VIRTIO_PCI_LOG(sc, reinit, "can't set queue %d " 910 1.33 yamaguch "msix vector\n", qid); 911 1.15 reinoud return -1; 912 1.15 reinoud } 913 1.15 reinoud } 914 1.4 jakllsch 915 1.15 reinoud return 0; 916 1.4 jakllsch } 917 1.4 jakllsch 918 1.4 jakllsch static int 919 1.32 yamaguch virtio_pci_setup_interrupts_09(struct virtio_softc *sc, int reinit) 920 1.4 jakllsch { 921 1.54 riastrad struct virtio_pci_softc * const psc = container_of(sc, 922 1.54 riastrad struct virtio_pci_softc, sc_sc); 923 1.4 jakllsch int offset, vector, ret, qid; 924 1.4 jakllsch 925 1.31 yamaguch if (!virtio_pci_msix_enabled(psc)) 926 1.31 yamaguch return 0; 927 1.31 yamaguch 928 1.4 jakllsch offset = VIRTIO_CONFIG_MSI_CONFIG_VECTOR; 929 1.4 jakllsch vector = VIRTIO_MSIX_CONFIG_VECTOR_INDEX; 930 1.4 jakllsch 931 1.15 reinoud bus_space_write_2(psc->sc_iot, psc->sc_ioh, offset, vector); 932 1.15 reinoud ret = bus_space_read_2(psc->sc_iot, psc->sc_ioh, offset); 933 1.15 reinoud if (ret != vector) { 934 1.38 riastrad aprint_debug_dev(sc->sc_dev, "%s: expected=%d, actual=%d\n", 935 1.38 riastrad __func__, vector, ret); 936 1.33 yamaguch VIRTIO_PCI_LOG(sc, reinit, 937 1.33 yamaguch "can't set config msix vector\n"); 938 1.4 jakllsch return -1; 939 1.15 reinoud } 940 1.4 jakllsch 941 1.4 jakllsch for (qid = 0; qid < sc->sc_nvqs; qid++) { 942 1.4 jakllsch offset = VIRTIO_CONFIG_QUEUE_SELECT; 943 1.15 reinoud bus_space_write_2(psc->sc_iot, psc->sc_ioh, offset, qid); 944 1.4 jakllsch 945 1.4 jakllsch offset = VIRTIO_CONFIG_MSI_QUEUE_VECTOR; 946 1.4 jakllsch vector = VIRTIO_MSIX_QUEUE_VECTOR_INDEX; 947 1.4 jakllsch 948 1.39 yamaguch if (psc->sc_intr_pervq) 949 1.6 yamaguch vector += qid; 950 1.6 yamaguch 951 1.15 reinoud bus_space_write_2(psc->sc_iot, psc->sc_ioh, offset, vector); 952 1.15 reinoud ret = bus_space_read_2(psc->sc_iot, psc->sc_ioh, offset); 953 1.15 reinoud if (ret != vector) { 954 1.38 riastrad aprint_debug_dev(sc->sc_dev, "%s[qid=%d]:" 955 1.38 riastrad " expected=%d, actual=%d\n", 956 1.38 riastrad __func__, qid, vector, ret); 957 1.33 yamaguch VIRTIO_PCI_LOG(sc, reinit, "can't set queue %d " 958 1.33 yamaguch "msix vector\n", qid); 959 1.4 jakllsch return -1; 960 1.15 reinoud } 961 1.4 jakllsch } 962 1.4 jakllsch 963 1.4 jakllsch return 0; 964 1.4 jakllsch } 965 1.4 jakllsch 966 1.4 jakllsch static int 967 1.31 yamaguch virtio_pci_establish_msix_interrupts(struct virtio_softc *sc, 968 1.53 riastrad const struct pci_attach_args *pa) 969 1.4 jakllsch { 970 1.54 riastrad struct virtio_pci_softc * const psc = container_of(sc, 971 1.54 riastrad struct virtio_pci_softc, sc_sc); 972 1.4 jakllsch device_t self = sc->sc_dev; 973 1.4 jakllsch pci_chipset_tag_t pc = pa->pa_pc; 974 1.9 yamaguch struct virtqueue *vq; 975 1.4 jakllsch char intrbuf[PCI_INTRSTR_LEN]; 976 1.6 yamaguch char intr_xname[INTRDEVNAMEBUF]; 977 1.4 jakllsch char const *intrstr; 978 1.6 yamaguch int idx, qid, n; 979 1.4 jakllsch 980 1.4 jakllsch idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX; 981 1.15 reinoud if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE) 982 1.4 jakllsch pci_intr_setattr(pc, &psc->sc_ihp[idx], PCI_INTR_MPSAFE, true); 983 1.4 jakllsch 984 1.6 yamaguch snprintf(intr_xname, sizeof(intr_xname), "%s config", 985 1.6 yamaguch device_xname(sc->sc_dev)); 986 1.6 yamaguch 987 1.4 jakllsch psc->sc_ihs[idx] = pci_intr_establish_xname(pc, psc->sc_ihp[idx], 988 1.6 yamaguch sc->sc_ipl, virtio_pci_msix_config_intr, sc, intr_xname); 989 1.4 jakllsch if (psc->sc_ihs[idx] == NULL) { 990 1.52 riastrad aprint_error_dev(self, 991 1.52 riastrad "couldn't establish MSI-X for config\n"); 992 1.4 jakllsch goto error; 993 1.4 jakllsch } 994 1.4 jakllsch 995 1.4 jakllsch idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX; 996 1.39 yamaguch if (psc->sc_intr_pervq) { 997 1.6 yamaguch for (qid = 0; qid < sc->sc_nvqs; qid++) { 998 1.6 yamaguch n = idx + qid; 999 1.9 yamaguch vq = &sc->sc_vqs[qid]; 1000 1.6 yamaguch 1001 1.6 yamaguch snprintf(intr_xname, sizeof(intr_xname), "%s vq#%d", 1002 1.6 yamaguch device_xname(sc->sc_dev), qid); 1003 1.6 yamaguch 1004 1.15 reinoud if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE) { 1005 1.6 yamaguch pci_intr_setattr(pc, &psc->sc_ihp[n], 1006 1.6 yamaguch PCI_INTR_MPSAFE, true); 1007 1.6 yamaguch } 1008 1.6 yamaguch 1009 1.52 riastrad psc->sc_ihs[n] = pci_intr_establish_xname(pc, 1010 1.52 riastrad psc->sc_ihp[n], sc->sc_ipl, 1011 1.52 riastrad vq->vq_intrhand, vq->vq_intrhand_arg, intr_xname); 1012 1.6 yamaguch if (psc->sc_ihs[n] == NULL) { 1013 1.52 riastrad aprint_error_dev(self, 1014 1.52 riastrad "couldn't establish MSI-X for a vq\n"); 1015 1.6 yamaguch goto error; 1016 1.6 yamaguch } 1017 1.6 yamaguch } 1018 1.6 yamaguch } else { 1019 1.52 riastrad if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE) { 1020 1.52 riastrad pci_intr_setattr(pc, &psc->sc_ihp[idx], 1021 1.52 riastrad PCI_INTR_MPSAFE, true); 1022 1.52 riastrad } 1023 1.4 jakllsch 1024 1.6 yamaguch snprintf(intr_xname, sizeof(intr_xname), "%s queues", 1025 1.6 yamaguch device_xname(sc->sc_dev)); 1026 1.52 riastrad psc->sc_ihs[idx] = pci_intr_establish_xname(pc, 1027 1.52 riastrad psc->sc_ihp[idx], sc->sc_ipl, 1028 1.52 riastrad virtio_pci_msix_queue_intr, sc, intr_xname); 1029 1.6 yamaguch if (psc->sc_ihs[idx] == NULL) { 1030 1.52 riastrad aprint_error_dev(self, 1031 1.52 riastrad "couldn't establish MSI-X for queues\n"); 1032 1.6 yamaguch goto error; 1033 1.6 yamaguch } 1034 1.4 jakllsch } 1035 1.4 jakllsch 1036 1.4 jakllsch idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX; 1037 1.52 riastrad intrstr = pci_intr_string(pc, psc->sc_ihp[idx], intrbuf, 1038 1.52 riastrad sizeof(intrbuf)); 1039 1.4 jakllsch aprint_normal_dev(self, "config interrupting at %s\n", intrstr); 1040 1.4 jakllsch idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX; 1041 1.39 yamaguch if (psc->sc_intr_pervq) { 1042 1.6 yamaguch kcpuset_t *affinity; 1043 1.6 yamaguch int affinity_to, r; 1044 1.6 yamaguch 1045 1.6 yamaguch kcpuset_create(&affinity, false); 1046 1.6 yamaguch 1047 1.6 yamaguch for (qid = 0; qid < sc->sc_nvqs; qid++) { 1048 1.6 yamaguch n = idx + qid; 1049 1.6 yamaguch affinity_to = (qid / 2) % ncpu; 1050 1.6 yamaguch 1051 1.6 yamaguch intrstr = pci_intr_string(pc, psc->sc_ihp[n], 1052 1.6 yamaguch intrbuf, sizeof(intrbuf)); 1053 1.6 yamaguch 1054 1.6 yamaguch kcpuset_zero(affinity); 1055 1.6 yamaguch kcpuset_set(affinity, affinity_to); 1056 1.52 riastrad r = interrupt_distribute(psc->sc_ihs[n], affinity, 1057 1.52 riastrad NULL); 1058 1.6 yamaguch if (r == 0) { 1059 1.6 yamaguch aprint_normal_dev(self, 1060 1.52 riastrad "for vq #%d interrupting at %s" 1061 1.52 riastrad " affinity to %u\n", 1062 1.6 yamaguch qid, intrstr, affinity_to); 1063 1.6 yamaguch } else { 1064 1.6 yamaguch aprint_normal_dev(self, 1065 1.6 yamaguch "for vq #%d interrupting at %s\n", 1066 1.6 yamaguch qid, intrstr); 1067 1.6 yamaguch } 1068 1.6 yamaguch } 1069 1.6 yamaguch 1070 1.6 yamaguch kcpuset_destroy(affinity); 1071 1.6 yamaguch } else { 1072 1.52 riastrad intrstr = pci_intr_string(pc, psc->sc_ihp[idx], intrbuf, 1073 1.52 riastrad sizeof(intrbuf)); 1074 1.52 riastrad aprint_normal_dev(self, "queues interrupting at %s\n", 1075 1.52 riastrad intrstr); 1076 1.6 yamaguch } 1077 1.4 jakllsch 1078 1.4 jakllsch return 0; 1079 1.4 jakllsch 1080 1.4 jakllsch error: 1081 1.4 jakllsch idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX; 1082 1.4 jakllsch if (psc->sc_ihs[idx] != NULL) 1083 1.4 jakllsch pci_intr_disestablish(psc->sc_pa.pa_pc, psc->sc_ihs[idx]); 1084 1.4 jakllsch idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX; 1085 1.39 yamaguch if (psc->sc_intr_pervq) { 1086 1.6 yamaguch for (qid = 0; qid < sc->sc_nvqs; qid++) { 1087 1.6 yamaguch n = idx + qid; 1088 1.6 yamaguch if (psc->sc_ihs[n] == NULL) 1089 1.6 yamaguch continue; 1090 1.52 riastrad pci_intr_disestablish(psc->sc_pa.pa_pc, 1091 1.52 riastrad psc->sc_ihs[n]); 1092 1.6 yamaguch } 1093 1.6 yamaguch 1094 1.6 yamaguch } else { 1095 1.52 riastrad if (psc->sc_ihs[idx] != NULL) { 1096 1.52 riastrad pci_intr_disestablish(psc->sc_pa.pa_pc, 1097 1.52 riastrad psc->sc_ihs[idx]); 1098 1.52 riastrad } 1099 1.6 yamaguch } 1100 1.4 jakllsch 1101 1.4 jakllsch return -1; 1102 1.4 jakllsch } 1103 1.4 jakllsch 1104 1.4 jakllsch static int 1105 1.31 yamaguch virtio_pci_establish_intx_interrupt(struct virtio_softc *sc, 1106 1.53 riastrad const struct pci_attach_args *pa) 1107 1.4 jakllsch { 1108 1.54 riastrad struct virtio_pci_softc * const psc = container_of(sc, 1109 1.54 riastrad struct virtio_pci_softc, sc_sc); 1110 1.4 jakllsch device_t self = sc->sc_dev; 1111 1.4 jakllsch pci_chipset_tag_t pc = pa->pa_pc; 1112 1.4 jakllsch char intrbuf[PCI_INTRSTR_LEN]; 1113 1.4 jakllsch char const *intrstr; 1114 1.4 jakllsch 1115 1.15 reinoud if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE) 1116 1.4 jakllsch pci_intr_setattr(pc, &psc->sc_ihp[0], PCI_INTR_MPSAFE, true); 1117 1.4 jakllsch 1118 1.4 jakllsch psc->sc_ihs[0] = pci_intr_establish_xname(pc, psc->sc_ihp[0], 1119 1.4 jakllsch sc->sc_ipl, virtio_pci_intr, sc, device_xname(sc->sc_dev)); 1120 1.4 jakllsch if (psc->sc_ihs[0] == NULL) { 1121 1.4 jakllsch aprint_error_dev(self, "couldn't establish INTx\n"); 1122 1.4 jakllsch return -1; 1123 1.4 jakllsch } 1124 1.4 jakllsch 1125 1.52 riastrad intrstr = pci_intr_string(pc, psc->sc_ihp[0], intrbuf, 1126 1.52 riastrad sizeof(intrbuf)); 1127 1.4 jakllsch aprint_normal_dev(self, "interrupting at %s\n", intrstr); 1128 1.4 jakllsch 1129 1.4 jakllsch return 0; 1130 1.4 jakllsch } 1131 1.4 jakllsch 1132 1.4 jakllsch static int 1133 1.31 yamaguch virtio_pci_alloc_interrupts(struct virtio_softc *sc) 1134 1.4 jakllsch { 1135 1.54 riastrad struct virtio_pci_softc * const psc = container_of(sc, 1136 1.54 riastrad struct virtio_pci_softc, sc_sc); 1137 1.4 jakllsch device_t self = sc->sc_dev; 1138 1.4 jakllsch pci_chipset_tag_t pc = psc->sc_pa.pa_pc; 1139 1.13 jakllsch pcitag_t tag = psc->sc_pa.pa_tag; 1140 1.4 jakllsch int error; 1141 1.4 jakllsch int nmsix; 1142 1.13 jakllsch int off; 1143 1.4 jakllsch int counts[PCI_INTR_TYPE_SIZE]; 1144 1.4 jakllsch pci_intr_type_t max_type; 1145 1.13 jakllsch pcireg_t ctl; 1146 1.4 jakllsch 1147 1.4 jakllsch nmsix = pci_msix_count(psc->sc_pa.pa_pc, psc->sc_pa.pa_tag); 1148 1.4 jakllsch aprint_debug_dev(self, "pci_msix_count=%d\n", nmsix); 1149 1.4 jakllsch 1150 1.4 jakllsch /* We need at least two: one for config and the other for queues */ 1151 1.15 reinoud if ((sc->sc_flags & VIRTIO_F_INTR_MSIX) == 0 || nmsix < 2) { 1152 1.4 jakllsch /* Try INTx only */ 1153 1.4 jakllsch max_type = PCI_INTR_TYPE_INTX; 1154 1.4 jakllsch counts[PCI_INTR_TYPE_INTX] = 1; 1155 1.4 jakllsch } else { 1156 1.4 jakllsch /* Try MSI-X first and INTx second */ 1157 1.39 yamaguch if (ISSET(sc->sc_flags, VIRTIO_F_INTR_PERVQ) && 1158 1.39 yamaguch sc->sc_nvqs + VIRTIO_MSIX_QUEUE_VECTOR_INDEX <= nmsix) { 1159 1.11 yamaguch nmsix = sc->sc_nvqs + VIRTIO_MSIX_QUEUE_VECTOR_INDEX; 1160 1.11 yamaguch } else { 1161 1.6 yamaguch nmsix = 2; 1162 1.6 yamaguch } 1163 1.6 yamaguch 1164 1.4 jakllsch max_type = PCI_INTR_TYPE_MSIX; 1165 1.6 yamaguch counts[PCI_INTR_TYPE_MSIX] = nmsix; 1166 1.4 jakllsch counts[PCI_INTR_TYPE_MSI] = 0; 1167 1.4 jakllsch counts[PCI_INTR_TYPE_INTX] = 1; 1168 1.4 jakllsch } 1169 1.4 jakllsch 1170 1.4 jakllsch retry: 1171 1.4 jakllsch error = pci_intr_alloc(&psc->sc_pa, &psc->sc_ihp, counts, max_type); 1172 1.4 jakllsch if (error != 0) { 1173 1.4 jakllsch aprint_error_dev(self, "couldn't map interrupt\n"); 1174 1.4 jakllsch return -1; 1175 1.4 jakllsch } 1176 1.4 jakllsch 1177 1.4 jakllsch if (pci_intr_type(pc, psc->sc_ihp[0]) == PCI_INTR_TYPE_MSIX) { 1178 1.39 yamaguch psc->sc_intr_pervq = nmsix > 2 ? true : false; 1179 1.12 jakllsch psc->sc_ihs = kmem_zalloc(sizeof(*psc->sc_ihs) * nmsix, 1180 1.4 jakllsch KM_SLEEP); 1181 1.4 jakllsch 1182 1.31 yamaguch error = virtio_pci_establish_msix_interrupts(sc, &psc->sc_pa); 1183 1.4 jakllsch if (error != 0) { 1184 1.6 yamaguch kmem_free(psc->sc_ihs, sizeof(*psc->sc_ihs) * nmsix); 1185 1.6 yamaguch pci_intr_release(pc, psc->sc_ihp, nmsix); 1186 1.4 jakllsch 1187 1.4 jakllsch /* Retry INTx */ 1188 1.4 jakllsch max_type = PCI_INTR_TYPE_INTX; 1189 1.4 jakllsch counts[PCI_INTR_TYPE_INTX] = 1; 1190 1.4 jakllsch goto retry; 1191 1.4 jakllsch } 1192 1.4 jakllsch 1193 1.6 yamaguch psc->sc_ihs_num = nmsix; 1194 1.15 reinoud psc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_MSI; 1195 1.15 reinoud virtio_pci_adjust_config_region(psc); 1196 1.4 jakllsch } else if (pci_intr_type(pc, psc->sc_ihp[0]) == PCI_INTR_TYPE_INTX) { 1197 1.39 yamaguch psc->sc_intr_pervq = false; 1198 1.12 jakllsch psc->sc_ihs = kmem_zalloc(sizeof(*psc->sc_ihs) * 1, 1199 1.4 jakllsch KM_SLEEP); 1200 1.4 jakllsch 1201 1.31 yamaguch error = virtio_pci_establish_intx_interrupt(sc, &psc->sc_pa); 1202 1.4 jakllsch if (error != 0) { 1203 1.4 jakllsch kmem_free(psc->sc_ihs, sizeof(*psc->sc_ihs) * 1); 1204 1.4 jakllsch pci_intr_release(pc, psc->sc_ihp, 1); 1205 1.4 jakllsch return -1; 1206 1.4 jakllsch } 1207 1.4 jakllsch 1208 1.4 jakllsch psc->sc_ihs_num = 1; 1209 1.15 reinoud psc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI; 1210 1.15 reinoud virtio_pci_adjust_config_region(psc); 1211 1.13 jakllsch 1212 1.14 jakllsch error = pci_get_capability(pc, tag, PCI_CAP_MSIX, &off, NULL); 1213 1.13 jakllsch if (error != 0) { 1214 1.13 jakllsch ctl = pci_conf_read(pc, tag, off + PCI_MSIX_CTL); 1215 1.13 jakllsch ctl &= ~PCI_MSIX_CTL_ENABLE; 1216 1.13 jakllsch pci_conf_write(pc, tag, off + PCI_MSIX_CTL, ctl); 1217 1.13 jakllsch } 1218 1.4 jakllsch } 1219 1.4 jakllsch 1220 1.39 yamaguch if (!psc->sc_intr_pervq) 1221 1.39 yamaguch CLR(sc->sc_flags, VIRTIO_F_INTR_PERVQ); 1222 1.4 jakllsch return 0; 1223 1.4 jakllsch } 1224 1.4 jakllsch 1225 1.4 jakllsch static void 1226 1.4 jakllsch virtio_pci_free_interrupts(struct virtio_softc *sc) 1227 1.4 jakllsch { 1228 1.54 riastrad struct virtio_pci_softc * const psc = container_of(sc, 1229 1.54 riastrad struct virtio_pci_softc, sc_sc); 1230 1.4 jakllsch 1231 1.4 jakllsch for (int i = 0; i < psc->sc_ihs_num; i++) { 1232 1.4 jakllsch if (psc->sc_ihs[i] == NULL) 1233 1.4 jakllsch continue; 1234 1.4 jakllsch pci_intr_disestablish(psc->sc_pa.pa_pc, psc->sc_ihs[i]); 1235 1.4 jakllsch psc->sc_ihs[i] = NULL; 1236 1.4 jakllsch } 1237 1.4 jakllsch 1238 1.52 riastrad if (psc->sc_ihs_num > 0) { 1239 1.52 riastrad pci_intr_release(psc->sc_pa.pa_pc, psc->sc_ihp, 1240 1.52 riastrad psc->sc_ihs_num); 1241 1.52 riastrad } 1242 1.4 jakllsch 1243 1.4 jakllsch if (psc->sc_ihs != NULL) { 1244 1.4 jakllsch kmem_free(psc->sc_ihs, sizeof(*psc->sc_ihs) * psc->sc_ihs_num); 1245 1.4 jakllsch psc->sc_ihs = NULL; 1246 1.4 jakllsch } 1247 1.4 jakllsch psc->sc_ihs_num = 0; 1248 1.4 jakllsch } 1249 1.4 jakllsch 1250 1.31 yamaguch static bool 1251 1.31 yamaguch virtio_pci_msix_enabled(struct virtio_pci_softc *psc) 1252 1.31 yamaguch { 1253 1.31 yamaguch pci_chipset_tag_t pc = psc->sc_pa.pa_pc; 1254 1.31 yamaguch 1255 1.31 yamaguch if (pci_intr_type(pc, psc->sc_ihp[0]) == PCI_INTR_TYPE_MSIX) 1256 1.31 yamaguch return true; 1257 1.31 yamaguch 1258 1.31 yamaguch return false; 1259 1.31 yamaguch } 1260 1.31 yamaguch 1261 1.4 jakllsch /* 1262 1.4 jakllsch * Interrupt handler. 1263 1.4 jakllsch */ 1264 1.4 jakllsch static int 1265 1.4 jakllsch virtio_pci_intr(void *arg) 1266 1.4 jakllsch { 1267 1.4 jakllsch struct virtio_softc *sc = arg; 1268 1.54 riastrad struct virtio_pci_softc * const psc = container_of(sc, 1269 1.54 riastrad struct virtio_pci_softc, sc_sc); 1270 1.4 jakllsch int isr, r = 0; 1271 1.4 jakllsch 1272 1.4 jakllsch /* check and ack the interrupt */ 1273 1.15 reinoud isr = bus_space_read_1(psc->sc_isr_iot, psc->sc_isr_ioh, 0); 1274 1.4 jakllsch if (isr == 0) 1275 1.4 jakllsch return 0; 1276 1.4 jakllsch if ((isr & VIRTIO_CONFIG_ISR_CONFIG_CHANGE) && 1277 1.4 jakllsch (sc->sc_config_change != NULL)) 1278 1.4 jakllsch r = (sc->sc_config_change)(sc); 1279 1.4 jakllsch if (sc->sc_intrhand != NULL) { 1280 1.4 jakllsch if (sc->sc_soft_ih != NULL) 1281 1.4 jakllsch softint_schedule(sc->sc_soft_ih); 1282 1.4 jakllsch else 1283 1.4 jakllsch r |= (sc->sc_intrhand)(sc); 1284 1.4 jakllsch } 1285 1.4 jakllsch 1286 1.4 jakllsch return r; 1287 1.4 jakllsch } 1288 1.4 jakllsch 1289 1.4 jakllsch static int 1290 1.4 jakllsch virtio_pci_msix_queue_intr(void *arg) 1291 1.4 jakllsch { 1292 1.4 jakllsch struct virtio_softc *sc = arg; 1293 1.4 jakllsch int r = 0; 1294 1.4 jakllsch 1295 1.4 jakllsch if (sc->sc_intrhand != NULL) { 1296 1.4 jakllsch if (sc->sc_soft_ih != NULL) 1297 1.4 jakllsch softint_schedule(sc->sc_soft_ih); 1298 1.4 jakllsch else 1299 1.4 jakllsch r |= (sc->sc_intrhand)(sc); 1300 1.4 jakllsch } 1301 1.4 jakllsch 1302 1.4 jakllsch return r; 1303 1.4 jakllsch } 1304 1.4 jakllsch 1305 1.4 jakllsch static int 1306 1.4 jakllsch virtio_pci_msix_config_intr(void *arg) 1307 1.4 jakllsch { 1308 1.4 jakllsch struct virtio_softc *sc = arg; 1309 1.4 jakllsch int r = 0; 1310 1.4 jakllsch 1311 1.4 jakllsch if (sc->sc_config_change != NULL) 1312 1.4 jakllsch r = (sc->sc_config_change)(sc); 1313 1.4 jakllsch return r; 1314 1.4 jakllsch } 1315 1.5 jakllsch 1316 1.5 jakllsch MODULE(MODULE_CLASS_DRIVER, virtio_pci, "pci,virtio"); 1317 1.5 jakllsch 1318 1.5 jakllsch #ifdef _MODULE 1319 1.5 jakllsch #include "ioconf.c" 1320 1.5 jakllsch #endif 1321 1.5 jakllsch 1322 1.5 jakllsch static int 1323 1.5 jakllsch virtio_pci_modcmd(modcmd_t cmd, void *opaque) 1324 1.5 jakllsch { 1325 1.5 jakllsch int error = 0; 1326 1.5 jakllsch 1327 1.5 jakllsch #ifdef _MODULE 1328 1.5 jakllsch switch (cmd) { 1329 1.5 jakllsch case MODULE_CMD_INIT: 1330 1.5 jakllsch error = config_init_component(cfdriver_ioconf_virtio_pci, 1331 1.5 jakllsch cfattach_ioconf_virtio_pci, cfdata_ioconf_virtio_pci); 1332 1.5 jakllsch break; 1333 1.5 jakllsch case MODULE_CMD_FINI: 1334 1.5 jakllsch error = config_fini_component(cfdriver_ioconf_virtio_pci, 1335 1.5 jakllsch cfattach_ioconf_virtio_pci, cfdata_ioconf_virtio_pci); 1336 1.5 jakllsch break; 1337 1.5 jakllsch default: 1338 1.5 jakllsch error = ENOTTY; 1339 1.5 jakllsch break; 1340 1.5 jakllsch } 1341 1.5 jakllsch #endif 1342 1.5 jakllsch 1343 1.5 jakllsch return error; 1344 1.5 jakllsch } 1345