virtio_pci.c revision 1.38.4.5 1 /* $NetBSD: virtio_pci.c,v 1.38.4.5 2024/11/28 16:33:25 martin Exp $ */
2
3 /*
4 * Copyright (c) 2020 The NetBSD Foundation, Inc.
5 * Copyright (c) 2012 Stefan Fritsch.
6 * Copyright (c) 2010 Minoura Makoto.
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: virtio_pci.c,v 1.38.4.5 2024/11/28 16:33:25 martin Exp $");
32
33 #include <sys/param.h>
34 #include <sys/types.h>
35
36 #include <sys/device.h>
37 #include <sys/endian.h>
38 #include <sys/interrupt.h>
39 #include <sys/kmem.h>
40 #include <sys/module.h>
41 #include <sys/syslog.h>
42 #include <sys/systm.h>
43
44 #include <dev/pci/pcidevs.h>
45 #include <dev/pci/pcireg.h>
46 #include <dev/pci/pcivar.h>
47
48 #include <dev/pci/virtioreg.h> /* XXX: move to non-pci */
49 #include <dev/pci/virtio_pcireg.h>
50
51 #define VIRTIO_PRIVATE
52 #include <dev/pci/virtiovar.h> /* XXX: move to non-pci */
53
54
55 #define VIRTIO_PCI_LOG(_sc, _use_log, _fmt, _args...) \
56 do { \
57 if ((_use_log)) { \
58 log(LOG_DEBUG, "%s: " _fmt, \
59 device_xname((_sc)->sc_dev), \
60 ##_args); \
61 } else { \
62 aprint_error_dev((_sc)->sc_dev, \
63 _fmt, ##_args); \
64 } \
65 } while(0)
66
67 static int virtio_pci_match(device_t, cfdata_t, void *);
68 static void virtio_pci_attach(device_t, device_t, void *);
69 static int virtio_pci_rescan(device_t, const char *, const int *);
70 static int virtio_pci_detach(device_t, int);
71
72 #define NMAPREG ((PCI_MAPREG_END - PCI_MAPREG_START) / \
73 sizeof(pcireg_t))
74 struct virtio_pci_softc {
75 struct virtio_softc sc_sc;
76 bool sc_intr_pervq;
77
78 /* IO space */
79 bus_space_tag_t sc_iot;
80 bus_space_handle_t sc_ioh;
81 bus_size_t sc_iosize;
82
83 /* BARs */
84 bus_space_tag_t sc_bars_iot[NMAPREG];
85 bus_space_handle_t sc_bars_ioh[NMAPREG];
86 bus_size_t sc_bars_iosize[NMAPREG];
87
88 /* notify space */
89 bus_space_tag_t sc_notify_iot;
90 bus_space_handle_t sc_notify_ioh;
91 bus_size_t sc_notify_iosize;
92 uint32_t sc_notify_off_multiplier;
93
94 /* isr space */
95 bus_space_tag_t sc_isr_iot;
96 bus_space_handle_t sc_isr_ioh;
97 bus_size_t sc_isr_iosize;
98
99 /* generic */
100 struct pci_attach_args sc_pa;
101 pci_intr_handle_t *sc_ihp;
102 void **sc_ihs;
103 int sc_ihs_num;
104 int sc_devcfg_offset; /* for 0.9 */
105 };
106
107 static int virtio_pci_attach_09(device_t, void *);
108 static void virtio_pci_kick_09(struct virtio_softc *, uint16_t);
109 static uint16_t virtio_pci_read_queue_size_09(struct virtio_softc *, uint16_t);
110 static void virtio_pci_setup_queue_09(struct virtio_softc *, uint16_t,
111 uint64_t);
112 static void virtio_pci_set_status_09(struct virtio_softc *, int);
113 static void virtio_pci_negotiate_features_09(struct virtio_softc *,
114 uint64_t);
115
116 static int virtio_pci_attach_10(device_t, void *);
117 static void virtio_pci_kick_10(struct virtio_softc *, uint16_t);
118 static uint16_t virtio_pci_read_queue_size_10(struct virtio_softc *, uint16_t);
119 static void virtio_pci_setup_queue_10(struct virtio_softc *, uint16_t,
120 uint64_t);
121 static void virtio_pci_set_status_10(struct virtio_softc *, int);
122 static void virtio_pci_negotiate_features_10(struct virtio_softc *,
123 uint64_t);
124 static int virtio_pci_find_cap(struct virtio_pci_softc *, int, void *,
125 int);
126
127 static int virtio_pci_alloc_interrupts(struct virtio_softc *);
128 static void virtio_pci_free_interrupts(struct virtio_softc *);
129 static int virtio_pci_adjust_config_region(struct virtio_pci_softc *);
130 static int virtio_pci_intr(void *);
131 static int virtio_pci_msix_queue_intr(void *);
132 static int virtio_pci_msix_config_intr(void *);
133 static int virtio_pci_setup_interrupts_09(struct virtio_softc *, int);
134 static int virtio_pci_setup_interrupts_10(struct virtio_softc *, int);
135 static int virtio_pci_establish_msix_interrupts(struct virtio_softc *,
136 const struct pci_attach_args *);
137 static int virtio_pci_establish_intx_interrupt(struct virtio_softc *,
138 const struct pci_attach_args *);
139 static bool virtio_pci_msix_enabled(struct virtio_pci_softc *);
140
141 #define VIRTIO_MSIX_CONFIG_VECTOR_INDEX 0
142 #define VIRTIO_MSIX_QUEUE_VECTOR_INDEX 1
143
144 /*
145 * For big-endian aarch64/armv7 on QEMU (and most real HW), only CPU cores
146 * are running in big-endian mode, with all peripheral being configured to
147 * little-endian mode. Their default bus_space(9) functions forcibly swap
148 * byte-order. This guarantees that PIO'ed data from pci(4), e.g., are
149 * correctly handled by bus_space(9), while DMA'ed ones should be swapped
150 * by hand, in violation of virtio(4) specifications.
151 */
152
153 #if (defined(__aarch64__) || defined(__arm__)) && BYTE_ORDER == BIG_ENDIAN
154 # define READ_ENDIAN_09 BIG_ENDIAN
155 # define READ_ENDIAN_10 BIG_ENDIAN
156 # define STRUCT_ENDIAN_09 BIG_ENDIAN
157 # define STRUCT_ENDIAN_10 LITTLE_ENDIAN
158 #elif BYTE_ORDER == BIG_ENDIAN
159 # define READ_ENDIAN_09 LITTLE_ENDIAN
160 # define READ_ENDIAN_10 BIG_ENDIAN
161 # define STRUCT_ENDIAN_09 BIG_ENDIAN
162 # define STRUCT_ENDIAN_10 LITTLE_ENDIAN
163 #else /* little endian */
164 # define READ_ENDIAN_09 LITTLE_ENDIAN
165 # define READ_ENDIAN_10 LITTLE_ENDIAN
166 # define STRUCT_ENDIAN_09 LITTLE_ENDIAN
167 # define STRUCT_ENDIAN_10 LITTLE_ENDIAN
168 #endif
169
170 CFATTACH_DECL3_NEW(virtio_pci, sizeof(struct virtio_pci_softc),
171 virtio_pci_match, virtio_pci_attach, virtio_pci_detach, NULL,
172 virtio_pci_rescan, NULL, 0);
173
174 static const struct virtio_ops virtio_pci_ops_09 = {
175 .kick = virtio_pci_kick_09,
176 .read_queue_size = virtio_pci_read_queue_size_09,
177 .setup_queue = virtio_pci_setup_queue_09,
178 .set_status = virtio_pci_set_status_09,
179 .neg_features = virtio_pci_negotiate_features_09,
180 .alloc_interrupts = virtio_pci_alloc_interrupts,
181 .free_interrupts = virtio_pci_free_interrupts,
182 .setup_interrupts = virtio_pci_setup_interrupts_09,
183 };
184
185 static const struct virtio_ops virtio_pci_ops_10 = {
186 .kick = virtio_pci_kick_10,
187 .read_queue_size = virtio_pci_read_queue_size_10,
188 .setup_queue = virtio_pci_setup_queue_10,
189 .set_status = virtio_pci_set_status_10,
190 .neg_features = virtio_pci_negotiate_features_10,
191 .alloc_interrupts = virtio_pci_alloc_interrupts,
192 .free_interrupts = virtio_pci_free_interrupts,
193 .setup_interrupts = virtio_pci_setup_interrupts_10,
194 };
195
196 static int
197 virtio_pci_match(device_t parent, cfdata_t match, void *aux)
198 {
199 const struct pci_attach_args * const pa = aux;
200
201 switch (PCI_VENDOR(pa->pa_id)) {
202 case PCI_VENDOR_QUMRANET:
203 /* Transitional devices MUST have a PCI Revision ID of 0. */
204 if (((PCI_PRODUCT_QUMRANET_VIRTIO_1000 <=
205 PCI_PRODUCT(pa->pa_id)) &&
206 (PCI_PRODUCT(pa->pa_id) <=
207 PCI_PRODUCT_QUMRANET_VIRTIO_103F)) &&
208 PCI_REVISION(pa->pa_class) == 0)
209 return 1;
210 /*
211 * Non-transitional devices SHOULD have a PCI Revision
212 * ID of 1 or higher. Drivers MUST match any PCI
213 * Revision ID value.
214 */
215 if (((PCI_PRODUCT_QUMRANET_VIRTIO_1040 <=
216 PCI_PRODUCT(pa->pa_id)) &&
217 (PCI_PRODUCT(pa->pa_id) <=
218 PCI_PRODUCT_QUMRANET_VIRTIO_107F)) &&
219 /* XXX: TODO */
220 PCI_REVISION(pa->pa_class) == 1)
221 return 1;
222 break;
223 }
224
225 return 0;
226 }
227
228 static void
229 virtio_pci_attach(device_t parent, device_t self, void *aux)
230 {
231 struct virtio_pci_softc * const psc = device_private(self);
232 struct virtio_softc * const sc = &psc->sc_sc;
233 const struct pci_attach_args * const pa = aux;
234 pci_chipset_tag_t pc = pa->pa_pc;
235 pcitag_t tag = pa->pa_tag;
236 int revision;
237 int ret;
238 pcireg_t id;
239 pcireg_t csr;
240
241 revision = PCI_REVISION(pa->pa_class);
242 switch (revision) {
243 case 0:
244 /* subsystem ID shows what I am */
245 id = PCI_SUBSYS_ID(pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG));
246 break;
247 case 1:
248 /* pci product number shows what I am */
249 id = PCI_PRODUCT(pa->pa_id) - PCI_PRODUCT_QUMRANET_VIRTIO_1040;
250 break;
251 default:
252 aprint_normal(": unknown revision 0x%02x; giving up\n",
253 revision);
254 return;
255 }
256
257 aprint_normal("\n");
258 aprint_naive("\n");
259 virtio_print_device_type(self, id, revision);
260
261 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
262 csr |= PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_IO_ENABLE;
263 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
264
265 sc->sc_dev = self;
266 psc->sc_pa = *pa;
267 psc->sc_iot = pa->pa_iot;
268
269 sc->sc_dmat = pa->pa_dmat;
270 if (pci_dma64_available(pa))
271 sc->sc_dmat = pa->pa_dmat64;
272
273 /* attach is dependent on revision */
274 ret = 0;
275 if (revision == 1) {
276 /* try to attach 1.0 */
277 ret = virtio_pci_attach_10(self, aux);
278 }
279 if (ret == 0 && revision == 0) {
280 /* revision 0 means 0.9 only or both 0.9 and 1.0 */
281 ret = virtio_pci_attach_09(self, aux);
282 }
283 if (ret) {
284 aprint_error_dev(self, "cannot attach (%d)\n", ret);
285 return;
286 }
287 KASSERT(sc->sc_ops);
288
289 /* preset config region */
290 psc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
291 if (virtio_pci_adjust_config_region(psc))
292 return;
293
294 /* generic */
295 virtio_device_reset(sc);
296 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_ACK);
297 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER);
298
299 sc->sc_childdevid = id;
300 sc->sc_child = NULL;
301 virtio_pci_rescan(self, NULL, NULL);
302 return;
303 }
304
305 /* ARGSUSED */
306 static int
307 virtio_pci_rescan(device_t self, const char *ifattr, const int *locs)
308 {
309 struct virtio_pci_softc * const psc = device_private(self);
310 struct virtio_softc * const sc = &psc->sc_sc;
311 struct virtio_attach_args va;
312
313 if (sc->sc_child) /* Child already attached? */
314 return 0;
315
316 memset(&va, 0, sizeof(va));
317 va.sc_childdevid = sc->sc_childdevid;
318
319 config_found(self, &va, NULL, CFARGS_NONE);
320
321 if (virtio_attach_failed(sc))
322 return 0;
323
324 return 0;
325 }
326
327 static int
328 virtio_pci_detach(device_t self, int flags)
329 {
330 struct virtio_pci_softc * const psc = device_private(self);
331 struct virtio_softc * const sc = &psc->sc_sc;
332 unsigned i;
333 int r;
334
335 r = config_detach_children(self, flags);
336 if (r != 0)
337 return r;
338
339 /* Check that child never attached, or detached properly */
340 KASSERT(sc->sc_child == NULL);
341 KASSERT(sc->sc_vqs == NULL);
342 KASSERT(psc->sc_ihs_num == 0);
343
344 if (sc->sc_version_1) {
345 for (i = 0; i < __arraycount(psc->sc_bars_iot); i++) {
346 if (psc->sc_bars_iosize[i] == 0)
347 continue;
348 bus_space_unmap(psc->sc_bars_iot[i],
349 psc->sc_bars_ioh[i], psc->sc_bars_iosize[i]);
350 psc->sc_bars_iosize[i] = 0;
351 }
352 } else {
353 if (psc->sc_iosize) {
354 bus_space_unmap(psc->sc_iot, psc->sc_ioh,
355 psc->sc_iosize);
356 psc->sc_iosize = 0;
357 }
358 }
359
360 return 0;
361 }
362
363 static int
364 virtio_pci_attach_09(device_t self, void *aux)
365 {
366 struct virtio_pci_softc * const psc = device_private(self);
367 const struct pci_attach_args * const pa = aux;
368 struct virtio_softc * const sc = &psc->sc_sc;
369
370 /* complete IO region */
371 if (pci_mapreg_map(pa, PCI_MAPREG_START, PCI_MAPREG_TYPE_IO, 0,
372 &psc->sc_iot, &psc->sc_ioh, NULL, &psc->sc_iosize)) {
373 aprint_error_dev(self, "can't map i/o space\n");
374 return EIO;
375 }
376
377 /* queue space */
378 if (bus_space_subregion(psc->sc_iot, psc->sc_ioh,
379 VIRTIO_CONFIG_QUEUE_NOTIFY, 2, &psc->sc_notify_ioh)) {
380 aprint_error_dev(self, "can't map notify i/o space\n");
381 return EIO;
382 }
383 psc->sc_notify_iosize = 2;
384 psc->sc_notify_iot = psc->sc_iot;
385
386 /* ISR space */
387 if (bus_space_subregion(psc->sc_iot, psc->sc_ioh,
388 VIRTIO_CONFIG_ISR_STATUS, 1, &psc->sc_isr_ioh)) {
389 aprint_error_dev(self, "can't map isr i/o space\n");
390 return EIO;
391 }
392 psc->sc_isr_iosize = 1;
393 psc->sc_isr_iot = psc->sc_iot;
394
395 /* set our version 0.9 ops */
396 sc->sc_ops = &virtio_pci_ops_09;
397 sc->sc_bus_endian = READ_ENDIAN_09;
398 sc->sc_struct_endian = STRUCT_ENDIAN_09;
399 return 0;
400 }
401
402 static int
403 virtio_pci_attach_10(device_t self, void *aux)
404 {
405 struct virtio_pci_softc * const psc = device_private(self);
406 const struct pci_attach_args * const pa = aux;
407 struct virtio_softc * const sc = &psc->sc_sc;
408 const pci_chipset_tag_t pc = pa->pa_pc;
409 const pcitag_t tag = pa->pa_tag;
410
411 struct virtio_pci_cap common, isr, device;
412 struct virtio_pci_notify_cap notify;
413 int have_device_cfg = 0;
414 bus_size_t bars[NMAPREG] = { 0 };
415 int bars_idx[NMAPREG] = { 0 };
416 struct virtio_pci_cap * const caps[] =
417 { &common, &isr, &device, ¬ify.cap };
418 int i, j, ret = 0;
419
420 if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_COMMON_CFG,
421 &common, sizeof(common)))
422 return ENODEV;
423 if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_NOTIFY_CFG,
424 ¬ify, sizeof(notify)))
425 return ENODEV;
426 if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_ISR_CFG,
427 &isr, sizeof(isr)))
428 return ENODEV;
429 if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_DEVICE_CFG,
430 &device, sizeof(device)))
431 memset(&device, 0, sizeof(device));
432 else
433 have_device_cfg = 1;
434
435 /* Figure out which bars we need to map */
436 for (i = 0; i < __arraycount(caps); i++) {
437 int bar = caps[i]->bar;
438 bus_size_t len = caps[i]->offset + caps[i]->length;
439
440 if (caps[i]->length == 0)
441 continue;
442 if (bars[bar] < len)
443 bars[bar] = len;
444 }
445
446 for (i = j = 0; i < __arraycount(bars); i++) {
447 int reg;
448 pcireg_t type;
449
450 if (bars[i] == 0)
451 continue;
452 reg = PCI_BAR(i);
453 type = pci_mapreg_type(pc, tag, reg);
454 if (pci_mapreg_map(pa, reg, type, 0,
455 &psc->sc_bars_iot[j], &psc->sc_bars_ioh[j],
456 NULL, &psc->sc_bars_iosize[j])) {
457 aprint_error_dev(self, "can't map bar %u \n", i);
458 ret = EIO;
459 goto err;
460 }
461 aprint_debug_dev(self,
462 "bar[%d]: iot %p, size 0x%" PRIxBUSSIZE "\n",
463 j, psc->sc_bars_iot[j], psc->sc_bars_iosize[j]);
464 bars_idx[i] = j;
465 j++;
466 }
467
468 i = bars_idx[notify.cap.bar];
469 if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i],
470 notify.cap.offset, notify.cap.length, &psc->sc_notify_ioh)) {
471 aprint_error_dev(self, "can't map notify i/o space\n");
472 ret = EIO;
473 goto err;
474 }
475 psc->sc_notify_iosize = notify.cap.length;
476 psc->sc_notify_iot = psc->sc_bars_iot[i];
477 psc->sc_notify_off_multiplier = le32toh(notify.notify_off_multiplier);
478
479 if (have_device_cfg) {
480 i = bars_idx[device.bar];
481 if (bus_space_subregion(psc->sc_bars_iot[i],
482 psc->sc_bars_ioh[i], device.offset, device.length,
483 &sc->sc_devcfg_ioh)) {
484 aprint_error_dev(self, "can't map devcfg i/o space\n");
485 ret = EIO;
486 goto err;
487 }
488 aprint_debug_dev(self,
489 "device.offset = 0x%x, device.length = 0x%x\n",
490 device.offset, device.length);
491 sc->sc_devcfg_iosize = device.length;
492 sc->sc_devcfg_iot = psc->sc_bars_iot[i];
493 }
494
495 i = bars_idx[isr.bar];
496 if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i],
497 isr.offset, isr.length, &psc->sc_isr_ioh)) {
498 aprint_error_dev(self, "can't map isr i/o space\n");
499 ret = EIO;
500 goto err;
501 }
502 psc->sc_isr_iosize = isr.length;
503 psc->sc_isr_iot = psc->sc_bars_iot[i];
504
505 i = bars_idx[common.bar];
506 if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i],
507 common.offset, common.length, &psc->sc_ioh)) {
508 aprint_error_dev(self, "can't map common i/o space\n");
509 ret = EIO;
510 goto err;
511 }
512 psc->sc_iosize = common.length;
513 psc->sc_iot = psc->sc_bars_iot[i];
514
515 psc->sc_sc.sc_version_1 = 1;
516
517 /* set our version 1.0 ops */
518 sc->sc_ops = &virtio_pci_ops_10;
519 sc->sc_bus_endian = READ_ENDIAN_10;
520 sc->sc_struct_endian = STRUCT_ENDIAN_10;
521 return 0;
522
523 err:
524 /* undo our pci_mapreg_map()s */
525 for (i = 0; i < __arraycount(bars); i++) {
526 if (psc->sc_bars_iosize[i] == 0)
527 continue;
528 bus_space_unmap(psc->sc_bars_iot[i], psc->sc_bars_ioh[i],
529 psc->sc_bars_iosize[i]);
530 psc->sc_bars_iosize[i] = 0;
531 }
532 return ret;
533 }
534
535 /* v1.0 attach helper */
536 static int
537 virtio_pci_find_cap(struct virtio_pci_softc *psc, int cfg_type, void *buf,
538 int buflen)
539 {
540 device_t self = psc->sc_sc.sc_dev;
541 pci_chipset_tag_t pc = psc->sc_pa.pa_pc;
542 pcitag_t tag = psc->sc_pa.pa_tag;
543 unsigned int offset, i, len;
544 union {
545 pcireg_t reg[8];
546 struct virtio_pci_cap vcap;
547 } *v = buf;
548
549 if (buflen < sizeof(struct virtio_pci_cap))
550 return ERANGE;
551
552 if (!pci_get_capability(pc, tag, PCI_CAP_VENDSPEC, &offset,
553 &v->reg[0]))
554 return ENOENT;
555
556 do {
557 for (i = 0; i < 4; i++)
558 v->reg[i] =
559 le32toh(pci_conf_read(pc, tag, offset + i * 4));
560 if (v->vcap.cfg_type == cfg_type)
561 break;
562 offset = v->vcap.cap_next;
563 } while (offset != 0);
564
565 if (offset == 0)
566 return ENOENT;
567
568 if (v->vcap.cap_len > sizeof(struct virtio_pci_cap)) {
569 len = roundup(v->vcap.cap_len, sizeof(pcireg_t));
570 if (len > buflen) {
571 aprint_error_dev(self, "%s cap too large\n", __func__);
572 return ERANGE;
573 }
574 for (i = 4; i < len / sizeof(pcireg_t); i++)
575 v->reg[i] =
576 le32toh(pci_conf_read(pc, tag, offset + i * 4));
577 }
578
579 /* endian fixup */
580 v->vcap.offset = le32toh(v->vcap.offset);
581 v->vcap.length = le32toh(v->vcap.length);
582 return 0;
583 }
584
585 /* -------------------------------------
586 * Version 0.9 support
587 * -------------------------------------*/
588
589 static void
590 virtio_pci_kick_09(struct virtio_softc *sc, uint16_t idx)
591 {
592 struct virtio_pci_softc * const psc = container_of(sc,
593 struct virtio_pci_softc, sc_sc);
594
595 bus_space_write_2(psc->sc_notify_iot, psc->sc_notify_ioh, 0, idx);
596 }
597
598 /* only applicable for v 0.9 but also called for 1.0 */
599 static int
600 virtio_pci_adjust_config_region(struct virtio_pci_softc *psc)
601 {
602 struct virtio_softc * const sc = &psc->sc_sc;
603 device_t self = sc->sc_dev;
604
605 if (psc->sc_sc.sc_version_1)
606 return 0;
607
608 sc->sc_devcfg_iosize = psc->sc_iosize - psc->sc_devcfg_offset;
609 sc->sc_devcfg_iot = psc->sc_iot;
610 if (bus_space_subregion(psc->sc_iot, psc->sc_ioh,
611 psc->sc_devcfg_offset, sc->sc_devcfg_iosize,
612 &sc->sc_devcfg_ioh)) {
613 aprint_error_dev(self, "can't map config i/o space\n");
614 return EIO;
615 }
616
617 return 0;
618 }
619
620 static uint16_t
621 virtio_pci_read_queue_size_09(struct virtio_softc *sc, uint16_t idx)
622 {
623 struct virtio_pci_softc * const psc = container_of(sc,
624 struct virtio_pci_softc, sc_sc);
625
626 bus_space_write_2(psc->sc_iot, psc->sc_ioh,
627 VIRTIO_CONFIG_QUEUE_SELECT, idx);
628 return bus_space_read_2(psc->sc_iot, psc->sc_ioh,
629 VIRTIO_CONFIG_QUEUE_SIZE);
630 }
631
632 static void
633 virtio_pci_setup_queue_09(struct virtio_softc *sc, uint16_t idx, uint64_t addr)
634 {
635 struct virtio_pci_softc * const psc = container_of(sc,
636 struct virtio_pci_softc, sc_sc);
637
638 bus_space_write_2(psc->sc_iot, psc->sc_ioh,
639 VIRTIO_CONFIG_QUEUE_SELECT, idx);
640 bus_space_write_4(psc->sc_iot, psc->sc_ioh,
641 VIRTIO_CONFIG_QUEUE_ADDRESS, addr / VIRTIO_PAGE_SIZE);
642
643 if (psc->sc_ihs_num > 1) {
644 int vec = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
645 if (psc->sc_intr_pervq)
646 vec += idx;
647 bus_space_write_2(psc->sc_iot, psc->sc_ioh,
648 VIRTIO_CONFIG_MSI_QUEUE_VECTOR, vec);
649 }
650 }
651
652 static void
653 virtio_pci_set_status_09(struct virtio_softc *sc, int status)
654 {
655 struct virtio_pci_softc * const psc = container_of(sc,
656 struct virtio_pci_softc, sc_sc);
657 int old = 0;
658
659 if (status != 0) {
660 old = bus_space_read_1(psc->sc_iot, psc->sc_ioh,
661 VIRTIO_CONFIG_DEVICE_STATUS);
662 }
663 bus_space_write_1(psc->sc_iot, psc->sc_ioh,
664 VIRTIO_CONFIG_DEVICE_STATUS, status|old);
665 }
666
667 static void
668 virtio_pci_negotiate_features_09(struct virtio_softc *sc,
669 uint64_t guest_features)
670 {
671 struct virtio_pci_softc * const psc = container_of(sc,
672 struct virtio_pci_softc, sc_sc);
673 uint32_t r;
674
675 r = bus_space_read_4(psc->sc_iot, psc->sc_ioh,
676 VIRTIO_CONFIG_DEVICE_FEATURES);
677
678 r &= guest_features;
679
680 bus_space_write_4(psc->sc_iot, psc->sc_ioh,
681 VIRTIO_CONFIG_GUEST_FEATURES, r);
682
683 sc->sc_active_features = r;
684 }
685
686 /* -------------------------------------
687 * Version 1.0 support
688 * -------------------------------------*/
689
690 static void
691 virtio_pci_kick_10(struct virtio_softc *sc, uint16_t idx)
692 {
693 struct virtio_pci_softc * const psc = container_of(sc,
694 struct virtio_pci_softc, sc_sc);
695 unsigned offset = sc->sc_vqs[idx].vq_notify_off *
696 psc->sc_notify_off_multiplier;
697
698 bus_space_write_2(psc->sc_notify_iot, psc->sc_notify_ioh, offset, idx);
699 }
700
701 static uint16_t
702 virtio_pci_read_queue_size_10(struct virtio_softc *sc, uint16_t idx)
703 {
704 struct virtio_pci_softc * const psc = container_of(sc,
705 struct virtio_pci_softc, sc_sc);
706 bus_space_tag_t iot = psc->sc_iot;
707 bus_space_handle_t ioh = psc->sc_ioh;
708
709 bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SELECT, idx);
710 return bus_space_read_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SIZE);
711 }
712
713 /*
714 * By definition little endian only in v1.0. NB: "MAY" in the text
715 * below refers to "independently" (i.e. the order of accesses) not
716 * "32-bit" (which is restricted by the earlier "MUST").
717 *
718 * 4.1.3.1 Driver Requirements: PCI Device Layout
719 *
720 * For device configuration access, the driver MUST use ... 32-bit
721 * wide and aligned accesses for ... 64-bit wide fields. For 64-bit
722 * fields, the driver MAY access each of the high and low 32-bit parts
723 * of the field independently.
724 */
725 static __inline void
726 virtio_pci_bus_space_write_8(bus_space_tag_t iot, bus_space_handle_t ioh,
727 bus_size_t offset, uint64_t value)
728 {
729 bus_space_write_4(iot, ioh, offset, BUS_ADDR_LO32(value));
730 bus_space_write_4(iot, ioh, offset + 4, BUS_ADDR_HI32(value));
731 }
732
733 static void
734 virtio_pci_setup_queue_10(struct virtio_softc *sc, uint16_t idx, uint64_t addr)
735 {
736 struct virtio_pci_softc * const psc = container_of(sc,
737 struct virtio_pci_softc, sc_sc);
738 struct virtqueue *vq = &sc->sc_vqs[idx];
739 bus_space_tag_t iot = psc->sc_iot;
740 bus_space_handle_t ioh = psc->sc_ioh;
741 KASSERT(vq->vq_index == idx);
742
743 bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SELECT, vq->vq_index);
744 if (addr == 0) {
745 bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_ENABLE, 0);
746 virtio_pci_bus_space_write_8(iot, ioh,
747 VIRTIO_CONFIG1_QUEUE_DESC, 0);
748 virtio_pci_bus_space_write_8(iot, ioh,
749 VIRTIO_CONFIG1_QUEUE_AVAIL, 0);
750 virtio_pci_bus_space_write_8(iot, ioh,
751 VIRTIO_CONFIG1_QUEUE_USED, 0);
752 } else {
753 virtio_pci_bus_space_write_8(iot, ioh,
754 VIRTIO_CONFIG1_QUEUE_DESC, addr);
755 virtio_pci_bus_space_write_8(iot, ioh,
756 VIRTIO_CONFIG1_QUEUE_AVAIL, addr + vq->vq_availoffset);
757 virtio_pci_bus_space_write_8(iot, ioh,
758 VIRTIO_CONFIG1_QUEUE_USED, addr + vq->vq_usedoffset);
759 bus_space_write_2(iot, ioh,
760 VIRTIO_CONFIG1_QUEUE_ENABLE, 1);
761 vq->vq_notify_off = bus_space_read_2(iot, ioh,
762 VIRTIO_CONFIG1_QUEUE_NOTIFY_OFF);
763 }
764
765 if (psc->sc_ihs_num > 1) {
766 int vec = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
767 if (psc->sc_intr_pervq)
768 vec += idx;
769 bus_space_write_2(iot, ioh,
770 VIRTIO_CONFIG1_QUEUE_MSIX_VECTOR, vec);
771 }
772 }
773
774 static void
775 virtio_pci_set_status_10(struct virtio_softc *sc, int status)
776 {
777 struct virtio_pci_softc * const psc = container_of(sc,
778 struct virtio_pci_softc, sc_sc);
779 bus_space_tag_t iot = psc->sc_iot;
780 bus_space_handle_t ioh = psc->sc_ioh;
781 int old = 0;
782
783 if (status)
784 old = bus_space_read_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS);
785 bus_space_write_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS,
786 status | old);
787 }
788
789 void
790 virtio_pci_negotiate_features_10(struct virtio_softc *sc,
791 uint64_t guest_features)
792 {
793 struct virtio_pci_softc * const psc = container_of(sc,
794 struct virtio_pci_softc, sc_sc);
795 device_t self = sc->sc_dev;
796 bus_space_tag_t iot = psc->sc_iot;
797 bus_space_handle_t ioh = psc->sc_ioh;
798 uint64_t host, negotiated, device_status;
799
800 guest_features |= VIRTIO_F_VERSION_1;
801 /* notify on empty is 0.9 only */
802 guest_features &= ~VIRTIO_F_NOTIFY_ON_EMPTY;
803 sc->sc_active_features = 0;
804
805 bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE_SELECT, 0);
806 host = bus_space_read_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE);
807 bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE_SELECT, 1);
808 host |= (uint64_t)bus_space_read_4(iot, ioh,
809 VIRTIO_CONFIG1_DEVICE_FEATURE) << 32;
810
811 negotiated = host & guest_features;
812
813 bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE_SELECT, 0);
814 bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE,
815 negotiated & 0xffffffff);
816 bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE_SELECT, 1);
817 bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE,
818 negotiated >> 32);
819 virtio_pci_set_status_10(sc, VIRTIO_CONFIG_DEVICE_STATUS_FEATURES_OK);
820
821 device_status = bus_space_read_1(iot, ioh,
822 VIRTIO_CONFIG1_DEVICE_STATUS);
823 if ((device_status & VIRTIO_CONFIG_DEVICE_STATUS_FEATURES_OK) == 0) {
824 aprint_error_dev(self, "feature negotiation failed\n");
825 bus_space_write_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS,
826 VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
827 return;
828 }
829
830 if ((negotiated & VIRTIO_F_VERSION_1) == 0) {
831 aprint_error_dev(self, "host rejected version 1\n");
832 bus_space_write_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS,
833 VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
834 return;
835 }
836
837 sc->sc_active_features = negotiated;
838 return;
839 }
840
841 /* -------------------------------------
842 * Generic PCI interrupt code
843 * -------------------------------------*/
844
845 static int
846 virtio_pci_setup_interrupts_10(struct virtio_softc *sc, int reinit)
847 {
848 struct virtio_pci_softc * const psc = container_of(sc,
849 struct virtio_pci_softc, sc_sc);
850 bus_space_tag_t iot = psc->sc_iot;
851 bus_space_handle_t ioh = psc->sc_ioh;
852 int vector, ret, qid;
853
854 if (!virtio_pci_msix_enabled(psc))
855 return 0;
856
857 vector = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
858 bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_CONFIG_MSIX_VECTOR, vector);
859 ret = bus_space_read_2(iot, ioh, VIRTIO_CONFIG1_CONFIG_MSIX_VECTOR);
860 if (ret != vector) {
861 VIRTIO_PCI_LOG(sc, reinit, "can't set config msix vector\n");
862 return -1;
863 }
864
865 for (qid = 0; qid < sc->sc_nvqs; qid++) {
866 vector = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
867
868 if (psc->sc_intr_pervq)
869 vector += qid;
870 bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SELECT, qid);
871 bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_MSIX_VECTOR,
872 vector);
873 ret = bus_space_read_2(iot, ioh,
874 VIRTIO_CONFIG1_QUEUE_MSIX_VECTOR);
875 if (ret != vector) {
876 VIRTIO_PCI_LOG(sc, reinit, "can't set queue %d "
877 "msix vector\n", qid);
878 return -1;
879 }
880 }
881
882 return 0;
883 }
884
885 static int
886 virtio_pci_setup_interrupts_09(struct virtio_softc *sc, int reinit)
887 {
888 struct virtio_pci_softc * const psc = container_of(sc,
889 struct virtio_pci_softc, sc_sc);
890 int offset, vector, ret, qid;
891
892 if (!virtio_pci_msix_enabled(psc))
893 return 0;
894
895 offset = VIRTIO_CONFIG_MSI_CONFIG_VECTOR;
896 vector = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
897
898 bus_space_write_2(psc->sc_iot, psc->sc_ioh, offset, vector);
899 ret = bus_space_read_2(psc->sc_iot, psc->sc_ioh, offset);
900 if (ret != vector) {
901 aprint_debug_dev(sc->sc_dev, "%s: expected=%d, actual=%d\n",
902 __func__, vector, ret);
903 VIRTIO_PCI_LOG(sc, reinit,
904 "can't set config msix vector\n");
905 return -1;
906 }
907
908 for (qid = 0; qid < sc->sc_nvqs; qid++) {
909 offset = VIRTIO_CONFIG_QUEUE_SELECT;
910 bus_space_write_2(psc->sc_iot, psc->sc_ioh, offset, qid);
911
912 offset = VIRTIO_CONFIG_MSI_QUEUE_VECTOR;
913 vector = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
914
915 if (psc->sc_intr_pervq)
916 vector += qid;
917
918 bus_space_write_2(psc->sc_iot, psc->sc_ioh, offset, vector);
919 ret = bus_space_read_2(psc->sc_iot, psc->sc_ioh, offset);
920 if (ret != vector) {
921 aprint_debug_dev(sc->sc_dev, "%s[qid=%d]:"
922 " expected=%d, actual=%d\n",
923 __func__, qid, vector, ret);
924 VIRTIO_PCI_LOG(sc, reinit, "can't set queue %d "
925 "msix vector\n", qid);
926 return -1;
927 }
928 }
929
930 return 0;
931 }
932
933 static int
934 virtio_pci_establish_msix_interrupts(struct virtio_softc *sc,
935 const struct pci_attach_args *pa)
936 {
937 struct virtio_pci_softc * const psc = container_of(sc,
938 struct virtio_pci_softc, sc_sc);
939 device_t self = sc->sc_dev;
940 pci_chipset_tag_t pc = pa->pa_pc;
941 struct virtqueue *vq;
942 char intrbuf[PCI_INTRSTR_LEN];
943 char intr_xname[INTRDEVNAMEBUF];
944 char const *intrstr;
945 int idx, qid, n;
946
947 idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
948 if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE)
949 pci_intr_setattr(pc, &psc->sc_ihp[idx], PCI_INTR_MPSAFE, true);
950
951 snprintf(intr_xname, sizeof(intr_xname), "%s config",
952 device_xname(sc->sc_dev));
953
954 psc->sc_ihs[idx] = pci_intr_establish_xname(pc, psc->sc_ihp[idx],
955 sc->sc_ipl, virtio_pci_msix_config_intr, sc, intr_xname);
956 if (psc->sc_ihs[idx] == NULL) {
957 aprint_error_dev(self,
958 "couldn't establish MSI-X for config\n");
959 goto error;
960 }
961
962 idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
963 if (psc->sc_intr_pervq) {
964 for (qid = 0; qid < sc->sc_nvqs; qid++) {
965 n = idx + qid;
966 vq = &sc->sc_vqs[qid];
967
968 snprintf(intr_xname, sizeof(intr_xname), "%s vq#%d",
969 device_xname(sc->sc_dev), qid);
970
971 if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE) {
972 pci_intr_setattr(pc, &psc->sc_ihp[n],
973 PCI_INTR_MPSAFE, true);
974 }
975
976 psc->sc_ihs[n] = pci_intr_establish_xname(pc,
977 psc->sc_ihp[n], sc->sc_ipl,
978 vq->vq_intrhand, vq->vq_intrhand_arg, intr_xname);
979 if (psc->sc_ihs[n] == NULL) {
980 aprint_error_dev(self,
981 "couldn't establish MSI-X for a vq\n");
982 goto error;
983 }
984 }
985 } else {
986 if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE) {
987 pci_intr_setattr(pc, &psc->sc_ihp[idx],
988 PCI_INTR_MPSAFE, true);
989 }
990
991 snprintf(intr_xname, sizeof(intr_xname), "%s queues",
992 device_xname(sc->sc_dev));
993 psc->sc_ihs[idx] = pci_intr_establish_xname(pc,
994 psc->sc_ihp[idx], sc->sc_ipl,
995 virtio_pci_msix_queue_intr, sc, intr_xname);
996 if (psc->sc_ihs[idx] == NULL) {
997 aprint_error_dev(self,
998 "couldn't establish MSI-X for queues\n");
999 goto error;
1000 }
1001 }
1002
1003 idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
1004 intrstr = pci_intr_string(pc, psc->sc_ihp[idx], intrbuf,
1005 sizeof(intrbuf));
1006 aprint_normal_dev(self, "config interrupting at %s\n", intrstr);
1007 idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
1008 if (psc->sc_intr_pervq) {
1009 kcpuset_t *affinity;
1010 int affinity_to, r;
1011
1012 kcpuset_create(&affinity, false);
1013
1014 for (qid = 0; qid < sc->sc_nvqs; qid++) {
1015 n = idx + qid;
1016 affinity_to = (qid / 2) % ncpu;
1017
1018 intrstr = pci_intr_string(pc, psc->sc_ihp[n],
1019 intrbuf, sizeof(intrbuf));
1020
1021 kcpuset_zero(affinity);
1022 kcpuset_set(affinity, affinity_to);
1023 r = interrupt_distribute(psc->sc_ihs[n], affinity,
1024 NULL);
1025 if (r == 0) {
1026 aprint_normal_dev(self,
1027 "for vq #%d interrupting at %s"
1028 " affinity to %u\n",
1029 qid, intrstr, affinity_to);
1030 } else {
1031 aprint_normal_dev(self,
1032 "for vq #%d interrupting at %s\n",
1033 qid, intrstr);
1034 }
1035 }
1036
1037 kcpuset_destroy(affinity);
1038 } else {
1039 intrstr = pci_intr_string(pc, psc->sc_ihp[idx], intrbuf,
1040 sizeof(intrbuf));
1041 aprint_normal_dev(self, "queues interrupting at %s\n",
1042 intrstr);
1043 }
1044
1045 return 0;
1046
1047 error:
1048 idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
1049 if (psc->sc_ihs[idx] != NULL)
1050 pci_intr_disestablish(psc->sc_pa.pa_pc, psc->sc_ihs[idx]);
1051 idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
1052 if (psc->sc_intr_pervq) {
1053 for (qid = 0; qid < sc->sc_nvqs; qid++) {
1054 n = idx + qid;
1055 if (psc->sc_ihs[n] == NULL)
1056 continue;
1057 pci_intr_disestablish(psc->sc_pa.pa_pc,
1058 psc->sc_ihs[n]);
1059 }
1060
1061 } else {
1062 if (psc->sc_ihs[idx] != NULL) {
1063 pci_intr_disestablish(psc->sc_pa.pa_pc,
1064 psc->sc_ihs[idx]);
1065 }
1066 }
1067
1068 return -1;
1069 }
1070
1071 static int
1072 virtio_pci_establish_intx_interrupt(struct virtio_softc *sc,
1073 const struct pci_attach_args *pa)
1074 {
1075 struct virtio_pci_softc * const psc = container_of(sc,
1076 struct virtio_pci_softc, sc_sc);
1077 device_t self = sc->sc_dev;
1078 pci_chipset_tag_t pc = pa->pa_pc;
1079 char intrbuf[PCI_INTRSTR_LEN];
1080 char const *intrstr;
1081
1082 if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE)
1083 pci_intr_setattr(pc, &psc->sc_ihp[0], PCI_INTR_MPSAFE, true);
1084
1085 psc->sc_ihs[0] = pci_intr_establish_xname(pc, psc->sc_ihp[0],
1086 sc->sc_ipl, virtio_pci_intr, sc, device_xname(sc->sc_dev));
1087 if (psc->sc_ihs[0] == NULL) {
1088 aprint_error_dev(self, "couldn't establish INTx\n");
1089 return -1;
1090 }
1091
1092 intrstr = pci_intr_string(pc, psc->sc_ihp[0], intrbuf,
1093 sizeof(intrbuf));
1094 aprint_normal_dev(self, "interrupting at %s\n", intrstr);
1095
1096 return 0;
1097 }
1098
1099 static int
1100 virtio_pci_alloc_interrupts(struct virtio_softc *sc)
1101 {
1102 struct virtio_pci_softc * const psc = container_of(sc,
1103 struct virtio_pci_softc, sc_sc);
1104 device_t self = sc->sc_dev;
1105 pci_chipset_tag_t pc = psc->sc_pa.pa_pc;
1106 pcitag_t tag = psc->sc_pa.pa_tag;
1107 int error;
1108 int nmsix;
1109 int off;
1110 int counts[PCI_INTR_TYPE_SIZE];
1111 pci_intr_type_t max_type;
1112 pcireg_t ctl;
1113
1114 nmsix = pci_msix_count(psc->sc_pa.pa_pc, psc->sc_pa.pa_tag);
1115 aprint_debug_dev(self, "pci_msix_count=%d\n", nmsix);
1116
1117 /* We need at least two: one for config and the other for queues */
1118 if ((sc->sc_flags & VIRTIO_F_INTR_MSIX) == 0 || nmsix < 2) {
1119 /* Try INTx only */
1120 max_type = PCI_INTR_TYPE_INTX;
1121 counts[PCI_INTR_TYPE_INTX] = 1;
1122 } else {
1123 /* Try MSI-X first and INTx second */
1124 if (ISSET(sc->sc_flags, VIRTIO_F_INTR_PERVQ) &&
1125 sc->sc_nvqs + VIRTIO_MSIX_QUEUE_VECTOR_INDEX <= nmsix) {
1126 nmsix = sc->sc_nvqs + VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
1127 } else {
1128 nmsix = 2;
1129 }
1130
1131 max_type = PCI_INTR_TYPE_MSIX;
1132 counts[PCI_INTR_TYPE_MSIX] = nmsix;
1133 counts[PCI_INTR_TYPE_MSI] = 0;
1134 counts[PCI_INTR_TYPE_INTX] = 1;
1135 }
1136
1137 retry:
1138 error = pci_intr_alloc(&psc->sc_pa, &psc->sc_ihp, counts, max_type);
1139 if (error != 0) {
1140 aprint_error_dev(self, "couldn't map interrupt\n");
1141 return -1;
1142 }
1143
1144 if (pci_intr_type(pc, psc->sc_ihp[0]) == PCI_INTR_TYPE_MSIX) {
1145 psc->sc_intr_pervq = nmsix > 2 ? true : false;
1146 psc->sc_ihs = kmem_zalloc(sizeof(*psc->sc_ihs) * nmsix,
1147 KM_SLEEP);
1148
1149 error = virtio_pci_establish_msix_interrupts(sc, &psc->sc_pa);
1150 if (error != 0) {
1151 kmem_free(psc->sc_ihs, sizeof(*psc->sc_ihs) * nmsix);
1152 pci_intr_release(pc, psc->sc_ihp, nmsix);
1153
1154 /* Retry INTx */
1155 max_type = PCI_INTR_TYPE_INTX;
1156 counts[PCI_INTR_TYPE_INTX] = 1;
1157 goto retry;
1158 }
1159
1160 psc->sc_ihs_num = nmsix;
1161 psc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_MSI;
1162 virtio_pci_adjust_config_region(psc);
1163 } else if (pci_intr_type(pc, psc->sc_ihp[0]) == PCI_INTR_TYPE_INTX) {
1164 psc->sc_intr_pervq = false;
1165 psc->sc_ihs = kmem_zalloc(sizeof(*psc->sc_ihs) * 1,
1166 KM_SLEEP);
1167
1168 error = virtio_pci_establish_intx_interrupt(sc, &psc->sc_pa);
1169 if (error != 0) {
1170 kmem_free(psc->sc_ihs, sizeof(*psc->sc_ihs) * 1);
1171 pci_intr_release(pc, psc->sc_ihp, 1);
1172 return -1;
1173 }
1174
1175 psc->sc_ihs_num = 1;
1176 psc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
1177 virtio_pci_adjust_config_region(psc);
1178
1179 error = pci_get_capability(pc, tag, PCI_CAP_MSIX, &off, NULL);
1180 if (error != 0) {
1181 ctl = pci_conf_read(pc, tag, off + PCI_MSIX_CTL);
1182 ctl &= ~PCI_MSIX_CTL_ENABLE;
1183 pci_conf_write(pc, tag, off + PCI_MSIX_CTL, ctl);
1184 }
1185 }
1186
1187 if (!psc->sc_intr_pervq)
1188 CLR(sc->sc_flags, VIRTIO_F_INTR_PERVQ);
1189 return 0;
1190 }
1191
1192 static void
1193 virtio_pci_free_interrupts(struct virtio_softc *sc)
1194 {
1195 struct virtio_pci_softc * const psc = container_of(sc,
1196 struct virtio_pci_softc, sc_sc);
1197
1198 for (int i = 0; i < psc->sc_ihs_num; i++) {
1199 if (psc->sc_ihs[i] == NULL)
1200 continue;
1201 pci_intr_disestablish(psc->sc_pa.pa_pc, psc->sc_ihs[i]);
1202 psc->sc_ihs[i] = NULL;
1203 }
1204
1205 if (psc->sc_ihs_num > 0) {
1206 pci_intr_release(psc->sc_pa.pa_pc, psc->sc_ihp,
1207 psc->sc_ihs_num);
1208 }
1209
1210 if (psc->sc_ihs != NULL) {
1211 kmem_free(psc->sc_ihs, sizeof(*psc->sc_ihs) * psc->sc_ihs_num);
1212 psc->sc_ihs = NULL;
1213 }
1214 psc->sc_ihs_num = 0;
1215 }
1216
1217 static bool
1218 virtio_pci_msix_enabled(struct virtio_pci_softc *psc)
1219 {
1220 pci_chipset_tag_t pc = psc->sc_pa.pa_pc;
1221
1222 if (pci_intr_type(pc, psc->sc_ihp[0]) == PCI_INTR_TYPE_MSIX)
1223 return true;
1224
1225 return false;
1226 }
1227
1228 /*
1229 * Interrupt handler.
1230 */
1231 static int
1232 virtio_pci_intr(void *arg)
1233 {
1234 struct virtio_softc *sc = arg;
1235 struct virtio_pci_softc * const psc = container_of(sc,
1236 struct virtio_pci_softc, sc_sc);
1237 int isr, r = 0;
1238
1239 /* check and ack the interrupt */
1240 isr = bus_space_read_1(psc->sc_isr_iot, psc->sc_isr_ioh, 0);
1241 if (isr == 0)
1242 return 0;
1243 if ((isr & VIRTIO_CONFIG_ISR_CONFIG_CHANGE) &&
1244 (sc->sc_config_change != NULL))
1245 r = (sc->sc_config_change)(sc);
1246 if (sc->sc_intrhand != NULL) {
1247 if (sc->sc_soft_ih != NULL)
1248 softint_schedule(sc->sc_soft_ih);
1249 else
1250 r |= (sc->sc_intrhand)(sc);
1251 }
1252
1253 return r;
1254 }
1255
1256 static int
1257 virtio_pci_msix_queue_intr(void *arg)
1258 {
1259 struct virtio_softc *sc = arg;
1260 int r = 0;
1261
1262 if (sc->sc_intrhand != NULL) {
1263 if (sc->sc_soft_ih != NULL)
1264 softint_schedule(sc->sc_soft_ih);
1265 else
1266 r |= (sc->sc_intrhand)(sc);
1267 }
1268
1269 return r;
1270 }
1271
1272 static int
1273 virtio_pci_msix_config_intr(void *arg)
1274 {
1275 struct virtio_softc *sc = arg;
1276 int r = 0;
1277
1278 if (sc->sc_config_change != NULL)
1279 r = (sc->sc_config_change)(sc);
1280 return r;
1281 }
1282
1283 MODULE(MODULE_CLASS_DRIVER, virtio_pci, "pci,virtio");
1284
1285 #ifdef _MODULE
1286 #include "ioconf.c"
1287 #endif
1288
1289 static int
1290 virtio_pci_modcmd(modcmd_t cmd, void *opaque)
1291 {
1292 int error = 0;
1293
1294 #ifdef _MODULE
1295 switch (cmd) {
1296 case MODULE_CMD_INIT:
1297 error = config_init_component(cfdriver_ioconf_virtio_pci,
1298 cfattach_ioconf_virtio_pci, cfdata_ioconf_virtio_pci);
1299 break;
1300 case MODULE_CMD_FINI:
1301 error = config_fini_component(cfdriver_ioconf_virtio_pci,
1302 cfattach_ioconf_virtio_pci, cfdata_ioconf_virtio_pci);
1303 break;
1304 default:
1305 error = ENOTTY;
1306 break;
1307 }
1308 #endif
1309
1310 return error;
1311 }
1312