virtio_pci.c revision 1.49 1 /* $NetBSD: virtio_pci.c,v 1.49 2024/06/25 14:22:48 riastradh Exp $ */
2
3 /*
4 * Copyright (c) 2020 The NetBSD Foundation, Inc.
5 * Copyright (c) 2012 Stefan Fritsch.
6 * Copyright (c) 2010 Minoura Makoto.
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: virtio_pci.c,v 1.49 2024/06/25 14:22:48 riastradh Exp $");
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/kmem.h>
36 #include <sys/module.h>
37 #include <sys/endian.h>
38 #include <sys/interrupt.h>
39 #include <sys/syslog.h>
40
41 #include <sys/device.h>
42
43 #include <dev/pci/pcidevs.h>
44 #include <dev/pci/pcireg.h>
45 #include <dev/pci/pcivar.h>
46
47 #include <dev/pci/virtioreg.h> /* XXX: move to non-pci */
48 #include <dev/pci/virtio_pcireg.h>
49
50 #define VIRTIO_PRIVATE
51 #include <dev/pci/virtiovar.h> /* XXX: move to non-pci */
52
53 #if defined(__alpha__) || defined(__sparc64__)
54 /*
55 * XXX VIRTIO_F_ACCESS_PLATFORM is required for standard PCI DMA
56 * XXX to work on these platforms, at least by Qemu.
57 * XXX
58 * XXX Generalize this later.
59 */
60 #define __NEED_VIRTIO_F_ACCESS_PLATFORM
61 #endif /* __alpha__ || __sparc64__ */
62
63 #define VIRTIO_PCI_LOG(_sc, _use_log, _fmt, _args...) \
64 do { \
65 if ((_use_log)) { \
66 log(LOG_DEBUG, "%s: " _fmt, \
67 device_xname((_sc)->sc_dev), \
68 ##_args); \
69 } else { \
70 aprint_error_dev((_sc)->sc_dev, \
71 _fmt, ##_args); \
72 } \
73 } while(0)
74
75 static int virtio_pci_match(device_t, cfdata_t, void *);
76 static void virtio_pci_attach(device_t, device_t, void *);
77 static int virtio_pci_rescan(device_t, const char *, const int *);
78 static int virtio_pci_detach(device_t, int);
79
80
81 #define NMAPREG ((PCI_MAPREG_END - PCI_MAPREG_START) / \
82 sizeof(pcireg_t))
83 struct virtio_pci_softc {
84 struct virtio_softc sc_sc;
85 bool sc_intr_pervq;
86
87 /* IO space */
88 bus_space_tag_t sc_iot;
89 bus_space_handle_t sc_ioh;
90 bus_size_t sc_iosize;
91
92 /* BARs */
93 bus_space_tag_t sc_bars_iot[NMAPREG];
94 bus_space_handle_t sc_bars_ioh[NMAPREG];
95 bus_size_t sc_bars_iosize[NMAPREG];
96
97 /* notify space */
98 bus_space_tag_t sc_notify_iot;
99 bus_space_handle_t sc_notify_ioh;
100 bus_size_t sc_notify_iosize;
101 uint32_t sc_notify_off_multiplier;
102
103 /* isr space */
104 bus_space_tag_t sc_isr_iot;
105 bus_space_handle_t sc_isr_ioh;
106 bus_size_t sc_isr_iosize;
107
108 /* generic */
109 struct pci_attach_args sc_pa;
110 pci_intr_handle_t *sc_ihp;
111 void **sc_ihs;
112 int sc_ihs_num;
113 int sc_devcfg_offset; /* for 0.9 */
114 };
115
116 static int virtio_pci_attach_09(device_t, void *);
117 static void virtio_pci_kick_09(struct virtio_softc *, uint16_t);
118 static uint16_t virtio_pci_read_queue_size_09(struct virtio_softc *, uint16_t);
119 static void virtio_pci_setup_queue_09(struct virtio_softc *, uint16_t, uint64_t);
120 static void virtio_pci_set_status_09(struct virtio_softc *, int);
121 static void virtio_pci_negotiate_features_09(struct virtio_softc *, uint64_t);
122
123 static int virtio_pci_attach_10(device_t, void *);
124 static void virtio_pci_kick_10(struct virtio_softc *, uint16_t);
125 static uint16_t virtio_pci_read_queue_size_10(struct virtio_softc *, uint16_t);
126 static void virtio_pci_setup_queue_10(struct virtio_softc *, uint16_t, uint64_t);
127 static void virtio_pci_set_status_10(struct virtio_softc *, int);
128 static void virtio_pci_negotiate_features_10(struct virtio_softc *, uint64_t);
129 static int virtio_pci_find_cap(struct virtio_pci_softc *psc, int cfg_type, void *buf, int buflen);
130
131 static int virtio_pci_alloc_interrupts(struct virtio_softc *);
132 static void virtio_pci_free_interrupts(struct virtio_softc *);
133 static int virtio_pci_adjust_config_region(struct virtio_pci_softc *psc);
134 static int virtio_pci_intr(void *arg);
135 static int virtio_pci_msix_queue_intr(void *);
136 static int virtio_pci_msix_config_intr(void *);
137 static int virtio_pci_setup_interrupts_09(struct virtio_softc *, int);
138 static int virtio_pci_setup_interrupts_10(struct virtio_softc *, int);
139 static int virtio_pci_establish_msix_interrupts(struct virtio_softc *,
140 struct pci_attach_args *);
141 static int virtio_pci_establish_intx_interrupt(struct virtio_softc *,
142 struct pci_attach_args *);
143 static bool virtio_pci_msix_enabled(struct virtio_pci_softc *);
144
145 #define VIRTIO_MSIX_CONFIG_VECTOR_INDEX 0
146 #define VIRTIO_MSIX_QUEUE_VECTOR_INDEX 1
147
148 /*
149 * For big-endian aarch64/armv7 on QEMU (and most real HW), only CPU cores
150 * are running in big-endian mode, with all peripheral being configured to
151 * little-endian mode. Their default bus_space(9) functions forcibly swap
152 * byte-order. This guarantees that PIO'ed data from pci(4), e.g., are
153 * correctly handled by bus_space(9), while DMA'ed ones should be swapped
154 * by hand, in violation of virtio(4) specifications.
155 */
156
157 #if (defined(__aarch64__) || defined(__arm__)) && BYTE_ORDER == BIG_ENDIAN
158 # define READ_ENDIAN_09 BIG_ENDIAN
159 # define READ_ENDIAN_10 BIG_ENDIAN
160 # define STRUCT_ENDIAN_09 BIG_ENDIAN
161 # define STRUCT_ENDIAN_10 LITTLE_ENDIAN
162 #elif BYTE_ORDER == BIG_ENDIAN
163 # define READ_ENDIAN_09 LITTLE_ENDIAN
164 # define READ_ENDIAN_10 BIG_ENDIAN
165 # define STRUCT_ENDIAN_09 BIG_ENDIAN
166 # define STRUCT_ENDIAN_10 LITTLE_ENDIAN
167 #else /* little endian */
168 # define READ_ENDIAN_09 LITTLE_ENDIAN
169 # define READ_ENDIAN_10 LITTLE_ENDIAN
170 # define STRUCT_ENDIAN_09 LITTLE_ENDIAN
171 # define STRUCT_ENDIAN_10 LITTLE_ENDIAN
172 #endif
173
174
175 CFATTACH_DECL3_NEW(virtio_pci, sizeof(struct virtio_pci_softc),
176 virtio_pci_match, virtio_pci_attach, virtio_pci_detach, NULL,
177 virtio_pci_rescan, NULL, 0);
178
179 static const struct virtio_ops virtio_pci_ops_09 = {
180 .kick = virtio_pci_kick_09,
181 .read_queue_size = virtio_pci_read_queue_size_09,
182 .setup_queue = virtio_pci_setup_queue_09,
183 .set_status = virtio_pci_set_status_09,
184 .neg_features = virtio_pci_negotiate_features_09,
185 .alloc_interrupts = virtio_pci_alloc_interrupts,
186 .free_interrupts = virtio_pci_free_interrupts,
187 .setup_interrupts = virtio_pci_setup_interrupts_09,
188 };
189
190 static const struct virtio_ops virtio_pci_ops_10 = {
191 .kick = virtio_pci_kick_10,
192 .read_queue_size = virtio_pci_read_queue_size_10,
193 .setup_queue = virtio_pci_setup_queue_10,
194 .set_status = virtio_pci_set_status_10,
195 .neg_features = virtio_pci_negotiate_features_10,
196 .alloc_interrupts = virtio_pci_alloc_interrupts,
197 .free_interrupts = virtio_pci_free_interrupts,
198 .setup_interrupts = virtio_pci_setup_interrupts_10,
199 };
200
201 static int
202 virtio_pci_match(device_t parent, cfdata_t match, void *aux)
203 {
204 struct pci_attach_args *pa;
205
206 pa = (struct pci_attach_args *)aux;
207 switch (PCI_VENDOR(pa->pa_id)) {
208 case PCI_VENDOR_QUMRANET:
209 /* Transitional devices MUST have a PCI Revision ID of 0. */
210 if (((PCI_PRODUCT_QUMRANET_VIRTIO_1000 <=
211 PCI_PRODUCT(pa->pa_id)) &&
212 (PCI_PRODUCT(pa->pa_id) <=
213 PCI_PRODUCT_QUMRANET_VIRTIO_103F)) &&
214 PCI_REVISION(pa->pa_class) == 0)
215 return 1;
216 /*
217 * Non-transitional devices SHOULD have a PCI Revision
218 * ID of 1 or higher. Drivers MUST match any PCI
219 * Revision ID value.
220 */
221 if (((PCI_PRODUCT_QUMRANET_VIRTIO_1040 <=
222 PCI_PRODUCT(pa->pa_id)) &&
223 (PCI_PRODUCT(pa->pa_id) <=
224 PCI_PRODUCT_QUMRANET_VIRTIO_107F)) &&
225 /* XXX: TODO */
226 PCI_REVISION(pa->pa_class) == 1)
227 return 1;
228 break;
229 }
230
231 return 0;
232 }
233
234 static void
235 virtio_pci_attach(device_t parent, device_t self, void *aux)
236 {
237 struct virtio_pci_softc * const psc = device_private(self);
238 struct virtio_softc * const sc = &psc->sc_sc;
239 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
240 pci_chipset_tag_t pc = pa->pa_pc;
241 pcitag_t tag = pa->pa_tag;
242 int revision;
243 int ret;
244 pcireg_t id;
245 pcireg_t csr;
246
247 revision = PCI_REVISION(pa->pa_class);
248 switch (revision) {
249 case 0:
250 /* subsystem ID shows what I am */
251 id = PCI_SUBSYS_ID(pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG));
252 break;
253 case 1:
254 /* pci product number shows what I am */
255 id = PCI_PRODUCT(pa->pa_id) - PCI_PRODUCT_QUMRANET_VIRTIO_1040;
256 break;
257 default:
258 aprint_normal(": unknown revision 0x%02x; giving up\n",
259 revision);
260 return;
261 }
262
263 aprint_normal("\n");
264 aprint_naive("\n");
265 virtio_print_device_type(self, id, revision);
266
267 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
268 csr |= PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_IO_ENABLE;
269 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
270
271 sc->sc_dev = self;
272 psc->sc_pa = *pa;
273 psc->sc_iot = pa->pa_iot;
274
275 sc->sc_dmat = pa->pa_dmat;
276 if (pci_dma64_available(pa))
277 sc->sc_dmat = pa->pa_dmat64;
278
279 /* attach is dependent on revision */
280 ret = 0;
281 if (revision == 1) {
282 /* try to attach 1.0 */
283 ret = virtio_pci_attach_10(self, aux);
284 }
285 if (ret == 0 && revision == 0) {
286 /*
287 * revision 0 means 0.9 only or both 0.9 and 1.0. The
288 * latter are so-called "Transitional Devices". For
289 * those devices, we want to use the 1.0 interface if
290 * possible.
291 *
292 * XXX Currently only on platforms that require 1.0
293 * XXX features, such as VIRTIO_F_ACCESS_PLATFORM.
294 */
295 #ifdef __NEED_VIRTIO_F_ACCESS_PLATFORM
296 /* First, try to attach 1.0 */
297 ret = virtio_pci_attach_10(self, aux);
298 if (ret != 0) {
299 aprint_error_dev(self,
300 "VirtIO 1.0 error = %d, falling back to 0.9\n",
301 ret);
302 /* Fall back to 0.9. */
303 ret = virtio_pci_attach_09(self, aux);
304 }
305 #else
306 ret = virtio_pci_attach_09(self, aux);
307 #endif /* __NEED_VIRTIO_F_ACCESS_PLATFORM */
308 }
309 if (ret) {
310 aprint_error_dev(self, "cannot attach (%d)\n", ret);
311 return;
312 }
313 KASSERT(sc->sc_ops);
314
315 /* preset config region */
316 psc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
317 if (virtio_pci_adjust_config_region(psc))
318 return;
319
320 /* generic */
321 virtio_device_reset(sc);
322 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_ACK);
323 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER);
324
325 sc->sc_childdevid = id;
326 sc->sc_child = NULL;
327 virtio_pci_rescan(self, NULL, NULL);
328 return;
329 }
330
331 /* ARGSUSED */
332 static int
333 virtio_pci_rescan(device_t self, const char *ifattr, const int *locs)
334 {
335 struct virtio_pci_softc * const psc = device_private(self);
336 struct virtio_softc * const sc = &psc->sc_sc;
337 struct virtio_attach_args va;
338
339 if (sc->sc_child) /* Child already attached? */
340 return 0;
341
342 memset(&va, 0, sizeof(va));
343 va.sc_childdevid = sc->sc_childdevid;
344
345 config_found(self, &va, NULL, CFARGS_NONE);
346
347 if (virtio_attach_failed(sc))
348 return 0;
349
350 return 0;
351 }
352
353
354 static int
355 virtio_pci_detach(device_t self, int flags)
356 {
357 struct virtio_pci_softc * const psc = device_private(self);
358 struct virtio_softc * const sc = &psc->sc_sc;
359 unsigned i;
360 int r;
361
362 r = config_detach_children(self, flags);
363 if (r != 0)
364 return r;
365
366 /* Check that child never attached, or detached properly */
367 KASSERT(sc->sc_child == NULL);
368 KASSERT(sc->sc_vqs == NULL);
369 KASSERT(psc->sc_ihs_num == 0);
370
371 if (sc->sc_version_1) {
372 for (i = 0; i < __arraycount(psc->sc_bars_iot); i++) {
373 if (psc->sc_bars_iosize[i] == 0)
374 continue;
375 bus_space_unmap(psc->sc_bars_iot[i],
376 psc->sc_bars_ioh[i], psc->sc_bars_iosize[i]);
377 psc->sc_bars_iosize[i] = 0;
378 }
379 } else {
380 if (psc->sc_iosize) {
381 bus_space_unmap(psc->sc_iot, psc->sc_ioh,
382 psc->sc_iosize);
383 psc->sc_iosize = 0;
384 }
385 }
386
387 return 0;
388 }
389
390
391 static int
392 virtio_pci_attach_09(device_t self, void *aux)
393 //struct virtio_pci_softc *psc, struct pci_attach_args *pa)
394 {
395 struct virtio_pci_softc * const psc = device_private(self);
396 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
397 struct virtio_softc * const sc = &psc->sc_sc;
398 // pci_chipset_tag_t pc = pa->pa_pc;
399 // pcitag_t tag = pa->pa_tag;
400
401 /* complete IO region */
402 if (pci_mapreg_map(pa, PCI_MAPREG_START, PCI_MAPREG_TYPE_IO, 0,
403 &psc->sc_iot, &psc->sc_ioh, NULL, &psc->sc_iosize)) {
404 aprint_error_dev(self, "can't map i/o space\n");
405 return EIO;
406 }
407
408 /* queue space */
409 if (bus_space_subregion(psc->sc_iot, psc->sc_ioh,
410 VIRTIO_CONFIG_QUEUE_NOTIFY, 2, &psc->sc_notify_ioh)) {
411 aprint_error_dev(self, "can't map notify i/o space\n");
412 return EIO;
413 }
414 psc->sc_notify_iosize = 2;
415 psc->sc_notify_iot = psc->sc_iot;
416
417 /* ISR space */
418 if (bus_space_subregion(psc->sc_iot, psc->sc_ioh,
419 VIRTIO_CONFIG_ISR_STATUS, 1, &psc->sc_isr_ioh)) {
420 aprint_error_dev(self, "can't map isr i/o space\n");
421 return EIO;
422 }
423 psc->sc_isr_iosize = 1;
424 psc->sc_isr_iot = psc->sc_iot;
425
426 /* set our version 0.9 ops */
427 sc->sc_ops = &virtio_pci_ops_09;
428 sc->sc_bus_endian = READ_ENDIAN_09;
429 sc->sc_struct_endian = STRUCT_ENDIAN_09;
430 return 0;
431 }
432
433
434 static int
435 virtio_pci_attach_10(device_t self, void *aux)
436 {
437 struct virtio_pci_softc * const psc = device_private(self);
438 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
439 struct virtio_softc * const sc = &psc->sc_sc;
440 pci_chipset_tag_t pc = pa->pa_pc;
441 pcitag_t tag = pa->pa_tag;
442
443 struct virtio_pci_cap common, isr, device;
444 struct virtio_pci_notify_cap notify;
445 int have_device_cfg = 0;
446 bus_size_t bars[NMAPREG] = { 0 };
447 int bars_idx[NMAPREG] = { 0 };
448 struct virtio_pci_cap *caps[] = { &common, &isr, &device, ¬ify.cap };
449 int i, j, ret = 0;
450
451 if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_COMMON_CFG,
452 &common, sizeof(common)))
453 return ENODEV;
454 if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_NOTIFY_CFG,
455 ¬ify, sizeof(notify)))
456 return ENODEV;
457 if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_ISR_CFG,
458 &isr, sizeof(isr)))
459 return ENODEV;
460 if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_DEVICE_CFG,
461 &device, sizeof(device)))
462 memset(&device, 0, sizeof(device));
463 else
464 have_device_cfg = 1;
465
466 /* Figure out which bars we need to map */
467 for (i = 0; i < __arraycount(caps); i++) {
468 int bar = caps[i]->bar;
469 bus_size_t len = caps[i]->offset + caps[i]->length;
470 if (caps[i]->length == 0)
471 continue;
472 if (bars[bar] < len)
473 bars[bar] = len;
474 }
475
476 for (i = j = 0; i < __arraycount(bars); i++) {
477 int reg;
478 pcireg_t type;
479 if (bars[i] == 0)
480 continue;
481 reg = PCI_BAR(i);
482 type = pci_mapreg_type(pc, tag, reg);
483 if (pci_mapreg_map(pa, reg, type, 0,
484 &psc->sc_bars_iot[j], &psc->sc_bars_ioh[j],
485 NULL, &psc->sc_bars_iosize[j])) {
486 aprint_error_dev(self, "can't map bar %u \n", i);
487 ret = EIO;
488 goto err;
489 }
490 aprint_debug_dev(self,
491 "bar[%d]: iot %p, size 0x%" PRIxBUSSIZE "\n",
492 j, psc->sc_bars_iot[j], psc->sc_bars_iosize[j]);
493 bars_idx[i] = j;
494 j++;
495 }
496
497 i = bars_idx[notify.cap.bar];
498 if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i],
499 notify.cap.offset, notify.cap.length,
500 &psc->sc_notify_ioh)) {
501 aprint_error_dev(self, "can't map notify i/o space\n");
502 ret = EIO;
503 goto err;
504 }
505 psc->sc_notify_iosize = notify.cap.length;
506 psc->sc_notify_iot = psc->sc_bars_iot[i];
507 psc->sc_notify_off_multiplier = le32toh(notify.notify_off_multiplier);
508
509 if (have_device_cfg) {
510 i = bars_idx[device.bar];
511 if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i],
512 device.offset, device.length,
513 &sc->sc_devcfg_ioh)) {
514 aprint_error_dev(self, "can't map devcfg i/o space\n");
515 ret = EIO;
516 goto err;
517 }
518 aprint_debug_dev(self,
519 "device.offset = 0x%x, device.length = 0x%x\n",
520 device.offset, device.length);
521 sc->sc_devcfg_iosize = device.length;
522 sc->sc_devcfg_iot = psc->sc_bars_iot[i];
523 }
524
525 i = bars_idx[isr.bar];
526 if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i],
527 isr.offset, isr.length, &psc->sc_isr_ioh)) {
528 aprint_error_dev(self, "can't map isr i/o space\n");
529 ret = EIO;
530 goto err;
531 }
532 psc->sc_isr_iosize = isr.length;
533 psc->sc_isr_iot = psc->sc_bars_iot[i];
534
535 i = bars_idx[common.bar];
536 if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i],
537 common.offset, common.length, &psc->sc_ioh)) {
538 aprint_error_dev(self, "can't map common i/o space\n");
539 ret = EIO;
540 goto err;
541 }
542 psc->sc_iosize = common.length;
543 psc->sc_iot = psc->sc_bars_iot[i];
544
545 psc->sc_sc.sc_version_1 = 1;
546
547 /* set our version 1.0 ops */
548 sc->sc_ops = &virtio_pci_ops_10;
549 sc->sc_bus_endian = READ_ENDIAN_10;
550 sc->sc_struct_endian = STRUCT_ENDIAN_10;
551 return 0;
552
553 err:
554 /* undo our pci_mapreg_map()s */
555 for (i = 0; i < __arraycount(bars); i++) {
556 if (psc->sc_bars_iosize[i] == 0)
557 continue;
558 bus_space_unmap(psc->sc_bars_iot[i], psc->sc_bars_ioh[i],
559 psc->sc_bars_iosize[i]);
560 psc->sc_bars_iosize[i] = 0;
561 }
562 return ret;
563 }
564
565 /* v1.0 attach helper */
566 static int
567 virtio_pci_find_cap(struct virtio_pci_softc *psc, int cfg_type, void *buf, int buflen)
568 {
569 device_t self = psc->sc_sc.sc_dev;
570 pci_chipset_tag_t pc = psc->sc_pa.pa_pc;
571 pcitag_t tag = psc->sc_pa.pa_tag;
572 unsigned int offset, i, len;
573 union {
574 pcireg_t reg[8];
575 struct virtio_pci_cap vcap;
576 } *v = buf;
577
578 if (buflen < sizeof(struct virtio_pci_cap))
579 return ERANGE;
580
581 if (!pci_get_capability(pc, tag, PCI_CAP_VENDSPEC, &offset, &v->reg[0]))
582 return ENOENT;
583
584 do {
585 for (i = 0; i < 4; i++)
586 v->reg[i] =
587 le32toh(pci_conf_read(pc, tag, offset + i * 4));
588 if (v->vcap.cfg_type == cfg_type)
589 break;
590 offset = v->vcap.cap_next;
591 } while (offset != 0);
592
593 if (offset == 0)
594 return ENOENT;
595
596 if (v->vcap.cap_len > sizeof(struct virtio_pci_cap)) {
597 len = roundup(v->vcap.cap_len, sizeof(pcireg_t));
598 if (len > buflen) {
599 aprint_error_dev(self, "%s cap too large\n", __func__);
600 return ERANGE;
601 }
602 for (i = 4; i < len / sizeof(pcireg_t); i++)
603 v->reg[i] =
604 le32toh(pci_conf_read(pc, tag, offset + i * 4));
605 }
606
607 /* endian fixup */
608 v->vcap.offset = le32toh(v->vcap.offset);
609 v->vcap.length = le32toh(v->vcap.length);
610 return 0;
611 }
612
613
614 /* -------------------------------------
615 * Version 0.9 support
616 * -------------------------------------*/
617
618 static void
619 virtio_pci_kick_09(struct virtio_softc *sc, uint16_t idx)
620 {
621 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
622
623 bus_space_write_2(psc->sc_notify_iot, psc->sc_notify_ioh, 0, idx);
624 }
625
626 /* only applicable for v 0.9 but also called for 1.0 */
627 static int
628 virtio_pci_adjust_config_region(struct virtio_pci_softc *psc)
629 {
630 struct virtio_softc * const sc = &psc->sc_sc;
631 device_t self = sc->sc_dev;
632
633 if (psc->sc_sc.sc_version_1)
634 return 0;
635
636 sc->sc_devcfg_iosize = psc->sc_iosize - psc->sc_devcfg_offset;
637 sc->sc_devcfg_iot = psc->sc_iot;
638 if (bus_space_subregion(psc->sc_iot, psc->sc_ioh,
639 psc->sc_devcfg_offset, sc->sc_devcfg_iosize,
640 &sc->sc_devcfg_ioh)) {
641 aprint_error_dev(self, "can't map config i/o space\n");
642 return EIO;
643 }
644
645 return 0;
646 }
647
648 static uint16_t
649 virtio_pci_read_queue_size_09(struct virtio_softc *sc, uint16_t idx)
650 {
651 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
652
653 bus_space_write_2(psc->sc_iot, psc->sc_ioh,
654 VIRTIO_CONFIG_QUEUE_SELECT, idx);
655 return bus_space_read_2(psc->sc_iot, psc->sc_ioh,
656 VIRTIO_CONFIG_QUEUE_SIZE);
657 }
658
659 static void
660 virtio_pci_setup_queue_09(struct virtio_softc *sc, uint16_t idx, uint64_t addr)
661 {
662 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
663
664 bus_space_write_2(psc->sc_iot, psc->sc_ioh,
665 VIRTIO_CONFIG_QUEUE_SELECT, idx);
666 bus_space_write_4(psc->sc_iot, psc->sc_ioh,
667 VIRTIO_CONFIG_QUEUE_ADDRESS, addr / VIRTIO_PAGE_SIZE);
668
669 if (psc->sc_ihs_num > 1) {
670 int vec = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
671 if (psc->sc_intr_pervq)
672 vec += idx;
673 bus_space_write_2(psc->sc_iot, psc->sc_ioh,
674 VIRTIO_CONFIG_MSI_QUEUE_VECTOR, vec);
675 }
676 }
677
678 static void
679 virtio_pci_set_status_09(struct virtio_softc *sc, int status)
680 {
681 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
682 int old = 0;
683
684 if (status != 0) {
685 old = bus_space_read_1(psc->sc_iot, psc->sc_ioh,
686 VIRTIO_CONFIG_DEVICE_STATUS);
687 }
688 bus_space_write_1(psc->sc_iot, psc->sc_ioh,
689 VIRTIO_CONFIG_DEVICE_STATUS, status|old);
690 }
691
692 static void
693 virtio_pci_negotiate_features_09(struct virtio_softc *sc, uint64_t guest_features)
694 {
695 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
696 uint32_t r;
697
698 r = bus_space_read_4(psc->sc_iot, psc->sc_ioh,
699 VIRTIO_CONFIG_DEVICE_FEATURES);
700
701 r &= guest_features;
702
703 bus_space_write_4(psc->sc_iot, psc->sc_ioh,
704 VIRTIO_CONFIG_GUEST_FEATURES, r);
705
706 sc->sc_active_features = r;
707 }
708
709 /* -------------------------------------
710 * Version 1.0 support
711 * -------------------------------------*/
712
713 static void
714 virtio_pci_kick_10(struct virtio_softc *sc, uint16_t idx)
715 {
716 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
717 unsigned offset = sc->sc_vqs[idx].vq_notify_off *
718 psc->sc_notify_off_multiplier;
719
720 bus_space_write_2(psc->sc_notify_iot, psc->sc_notify_ioh, offset, idx);
721 }
722
723
724 static uint16_t
725 virtio_pci_read_queue_size_10(struct virtio_softc *sc, uint16_t idx)
726 {
727 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
728 bus_space_tag_t iot = psc->sc_iot;
729 bus_space_handle_t ioh = psc->sc_ioh;
730
731 bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SELECT, idx);
732 return bus_space_read_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SIZE);
733 }
734
735 /*
736 * By definition little endian only in v1.0. NB: "MAY" in the text
737 * below refers to "independently" (i.e. the order of accesses) not
738 * "32-bit" (which is restricted by the earlier "MUST").
739 *
740 * 4.1.3.1 Driver Requirements: PCI Device Layout
741 *
742 * For device configuration access, the driver MUST use ... 32-bit
743 * wide and aligned accesses for ... 64-bit wide fields. For 64-bit
744 * fields, the driver MAY access each of the high and low 32-bit parts
745 * of the field independently.
746 */
747 static __inline void
748 virtio_pci_bus_space_write_8(bus_space_tag_t iot, bus_space_handle_t ioh,
749 bus_size_t offset, uint64_t value)
750 {
751 #if _QUAD_HIGHWORD
752 bus_space_write_4(iot, ioh, offset, BUS_ADDR_LO32(value));
753 bus_space_write_4(iot, ioh, offset + 4, BUS_ADDR_HI32(value));
754 #else
755 bus_space_write_4(iot, ioh, offset, BUS_ADDR_HI32(value));
756 bus_space_write_4(iot, ioh, offset + 4, BUS_ADDR_LO32(value));
757 #endif
758 }
759
760 static void
761 virtio_pci_setup_queue_10(struct virtio_softc *sc, uint16_t idx, uint64_t addr)
762 {
763 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
764 struct virtqueue *vq = &sc->sc_vqs[idx];
765 bus_space_tag_t iot = psc->sc_iot;
766 bus_space_handle_t ioh = psc->sc_ioh;
767 KASSERT(vq->vq_index == idx);
768
769 bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SELECT, vq->vq_index);
770 if (addr == 0) {
771 bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_ENABLE, 0);
772 virtio_pci_bus_space_write_8(iot, ioh,
773 VIRTIO_CONFIG1_QUEUE_DESC, 0);
774 virtio_pci_bus_space_write_8(iot, ioh,
775 VIRTIO_CONFIG1_QUEUE_AVAIL, 0);
776 virtio_pci_bus_space_write_8(iot, ioh,
777 VIRTIO_CONFIG1_QUEUE_USED, 0);
778 } else {
779 virtio_pci_bus_space_write_8(iot, ioh,
780 VIRTIO_CONFIG1_QUEUE_DESC, addr);
781 virtio_pci_bus_space_write_8(iot, ioh,
782 VIRTIO_CONFIG1_QUEUE_AVAIL, addr + vq->vq_availoffset);
783 virtio_pci_bus_space_write_8(iot, ioh,
784 VIRTIO_CONFIG1_QUEUE_USED, addr + vq->vq_usedoffset);
785 bus_space_write_2(iot, ioh,
786 VIRTIO_CONFIG1_QUEUE_ENABLE, 1);
787 vq->vq_notify_off = bus_space_read_2(iot, ioh,
788 VIRTIO_CONFIG1_QUEUE_NOTIFY_OFF);
789 }
790
791 if (psc->sc_ihs_num > 1) {
792 int vec = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
793 if (psc->sc_intr_pervq)
794 vec += idx;
795 bus_space_write_2(iot, ioh,
796 VIRTIO_CONFIG1_QUEUE_MSIX_VECTOR, vec);
797 }
798 }
799
800 static void
801 virtio_pci_set_status_10(struct virtio_softc *sc, int status)
802 {
803 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
804 bus_space_tag_t iot = psc->sc_iot;
805 bus_space_handle_t ioh = psc->sc_ioh;
806 int old = 0;
807
808 if (status)
809 old = bus_space_read_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS);
810 bus_space_write_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS, status | old);
811 }
812
813 void
814 virtio_pci_negotiate_features_10(struct virtio_softc *sc, uint64_t guest_features)
815 {
816 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
817 device_t self = sc->sc_dev;
818 bus_space_tag_t iot = psc->sc_iot;
819 bus_space_handle_t ioh = psc->sc_ioh;
820 uint64_t host, negotiated, device_status;
821
822 guest_features |= VIRTIO_F_VERSION_1;
823 #ifdef __NEED_VIRTIO_F_ACCESS_PLATFORM
824 /* XXX This could use some work. */
825 guest_features |= VIRTIO_F_ACCESS_PLATFORM;
826 #endif /* __NEED_VIRTIO_F_ACCESS_PLATFORM */
827 /* notify on empty is 0.9 only */
828 guest_features &= ~VIRTIO_F_NOTIFY_ON_EMPTY;
829 sc->sc_active_features = 0;
830
831 bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE_SELECT, 0);
832 host = bus_space_read_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE);
833 bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE_SELECT, 1);
834 host |= (uint64_t)
835 bus_space_read_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE) << 32;
836
837 negotiated = host & guest_features;
838
839 bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE_SELECT, 0);
840 bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE,
841 negotiated & 0xffffffff);
842 bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE_SELECT, 1);
843 bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE,
844 negotiated >> 32);
845 virtio_pci_set_status_10(sc, VIRTIO_CONFIG_DEVICE_STATUS_FEATURES_OK);
846
847 device_status = bus_space_read_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS);
848 if ((device_status & VIRTIO_CONFIG_DEVICE_STATUS_FEATURES_OK) == 0) {
849 aprint_error_dev(self, "feature negotiation failed\n");
850 bus_space_write_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS,
851 VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
852 return;
853 }
854
855 if ((negotiated & VIRTIO_F_VERSION_1) == 0) {
856 aprint_error_dev(self, "host rejected version 1\n");
857 bus_space_write_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS,
858 VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
859 return;
860 }
861
862 sc->sc_active_features = negotiated;
863 return;
864 }
865
866
867 /* -------------------------------------
868 * Generic PCI interrupt code
869 * -------------------------------------*/
870
871 static int
872 virtio_pci_setup_interrupts_10(struct virtio_softc *sc, int reinit)
873 {
874 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
875 bus_space_tag_t iot = psc->sc_iot;
876 bus_space_handle_t ioh = psc->sc_ioh;
877 int vector, ret, qid;
878
879 if (!virtio_pci_msix_enabled(psc))
880 return 0;
881
882 vector = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
883 bus_space_write_2(iot, ioh,
884 VIRTIO_CONFIG1_CONFIG_MSIX_VECTOR, vector);
885 ret = bus_space_read_2(iot, ioh, VIRTIO_CONFIG1_CONFIG_MSIX_VECTOR);
886 if (ret != vector) {
887 VIRTIO_PCI_LOG(sc, reinit,
888 "can't set config msix vector\n");
889 return -1;
890 }
891
892 for (qid = 0; qid < sc->sc_nvqs; qid++) {
893 vector = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
894
895 if (psc->sc_intr_pervq)
896 vector += qid;
897 bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SELECT, qid);
898 bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_MSIX_VECTOR,
899 vector);
900 ret = bus_space_read_2(iot, ioh,
901 VIRTIO_CONFIG1_QUEUE_MSIX_VECTOR);
902 if (ret != vector) {
903 VIRTIO_PCI_LOG(sc, reinit, "can't set queue %d "
904 "msix vector\n", qid);
905 return -1;
906 }
907 }
908
909 return 0;
910 }
911
912 static int
913 virtio_pci_setup_interrupts_09(struct virtio_softc *sc, int reinit)
914 {
915 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
916 int offset, vector, ret, qid;
917
918 if (!virtio_pci_msix_enabled(psc))
919 return 0;
920
921 offset = VIRTIO_CONFIG_MSI_CONFIG_VECTOR;
922 vector = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
923
924 bus_space_write_2(psc->sc_iot, psc->sc_ioh, offset, vector);
925 ret = bus_space_read_2(psc->sc_iot, psc->sc_ioh, offset);
926 if (ret != vector) {
927 aprint_debug_dev(sc->sc_dev, "%s: expected=%d, actual=%d\n",
928 __func__, vector, ret);
929 VIRTIO_PCI_LOG(sc, reinit,
930 "can't set config msix vector\n");
931 return -1;
932 }
933
934 for (qid = 0; qid < sc->sc_nvqs; qid++) {
935 offset = VIRTIO_CONFIG_QUEUE_SELECT;
936 bus_space_write_2(psc->sc_iot, psc->sc_ioh, offset, qid);
937
938 offset = VIRTIO_CONFIG_MSI_QUEUE_VECTOR;
939 vector = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
940
941 if (psc->sc_intr_pervq)
942 vector += qid;
943
944 bus_space_write_2(psc->sc_iot, psc->sc_ioh, offset, vector);
945 ret = bus_space_read_2(psc->sc_iot, psc->sc_ioh, offset);
946 if (ret != vector) {
947 aprint_debug_dev(sc->sc_dev, "%s[qid=%d]:"
948 " expected=%d, actual=%d\n",
949 __func__, qid, vector, ret);
950 VIRTIO_PCI_LOG(sc, reinit, "can't set queue %d "
951 "msix vector\n", qid);
952 return -1;
953 }
954 }
955
956 return 0;
957 }
958
959 static int
960 virtio_pci_establish_msix_interrupts(struct virtio_softc *sc,
961 struct pci_attach_args *pa)
962 {
963 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
964 device_t self = sc->sc_dev;
965 pci_chipset_tag_t pc = pa->pa_pc;
966 struct virtqueue *vq;
967 char intrbuf[PCI_INTRSTR_LEN];
968 char intr_xname[INTRDEVNAMEBUF];
969 char const *intrstr;
970 int idx, qid, n;
971
972 idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
973 if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE)
974 pci_intr_setattr(pc, &psc->sc_ihp[idx], PCI_INTR_MPSAFE, true);
975
976 snprintf(intr_xname, sizeof(intr_xname), "%s config",
977 device_xname(sc->sc_dev));
978
979 psc->sc_ihs[idx] = pci_intr_establish_xname(pc, psc->sc_ihp[idx],
980 sc->sc_ipl, virtio_pci_msix_config_intr, sc, intr_xname);
981 if (psc->sc_ihs[idx] == NULL) {
982 aprint_error_dev(self, "couldn't establish MSI-X for config\n");
983 goto error;
984 }
985
986 idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
987 if (psc->sc_intr_pervq) {
988 for (qid = 0; qid < sc->sc_nvqs; qid++) {
989 n = idx + qid;
990 vq = &sc->sc_vqs[qid];
991
992 snprintf(intr_xname, sizeof(intr_xname), "%s vq#%d",
993 device_xname(sc->sc_dev), qid);
994
995 if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE) {
996 pci_intr_setattr(pc, &psc->sc_ihp[n],
997 PCI_INTR_MPSAFE, true);
998 }
999
1000 psc->sc_ihs[n] = pci_intr_establish_xname(pc, psc->sc_ihp[n],
1001 sc->sc_ipl, vq->vq_intrhand, vq->vq_intrhand_arg, intr_xname);
1002 if (psc->sc_ihs[n] == NULL) {
1003 aprint_error_dev(self, "couldn't establish MSI-X for a vq\n");
1004 goto error;
1005 }
1006 }
1007 } else {
1008 if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE)
1009 pci_intr_setattr(pc, &psc->sc_ihp[idx], PCI_INTR_MPSAFE, true);
1010
1011 snprintf(intr_xname, sizeof(intr_xname), "%s queues",
1012 device_xname(sc->sc_dev));
1013 psc->sc_ihs[idx] = pci_intr_establish_xname(pc, psc->sc_ihp[idx],
1014 sc->sc_ipl, virtio_pci_msix_queue_intr, sc, intr_xname);
1015 if (psc->sc_ihs[idx] == NULL) {
1016 aprint_error_dev(self, "couldn't establish MSI-X for queues\n");
1017 goto error;
1018 }
1019 }
1020
1021 idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
1022 intrstr = pci_intr_string(pc, psc->sc_ihp[idx], intrbuf, sizeof(intrbuf));
1023 aprint_normal_dev(self, "config interrupting at %s\n", intrstr);
1024 idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
1025 if (psc->sc_intr_pervq) {
1026 kcpuset_t *affinity;
1027 int affinity_to, r;
1028
1029 kcpuset_create(&affinity, false);
1030
1031 for (qid = 0; qid < sc->sc_nvqs; qid++) {
1032 n = idx + qid;
1033 affinity_to = (qid / 2) % ncpu;
1034
1035 intrstr = pci_intr_string(pc, psc->sc_ihp[n],
1036 intrbuf, sizeof(intrbuf));
1037
1038 kcpuset_zero(affinity);
1039 kcpuset_set(affinity, affinity_to);
1040 r = interrupt_distribute(psc->sc_ihs[n], affinity, NULL);
1041 if (r == 0) {
1042 aprint_normal_dev(self,
1043 "for vq #%d interrupting at %s affinity to %u\n",
1044 qid, intrstr, affinity_to);
1045 } else {
1046 aprint_normal_dev(self,
1047 "for vq #%d interrupting at %s\n",
1048 qid, intrstr);
1049 }
1050 }
1051
1052 kcpuset_destroy(affinity);
1053 } else {
1054 intrstr = pci_intr_string(pc, psc->sc_ihp[idx], intrbuf, sizeof(intrbuf));
1055 aprint_normal_dev(self, "queues interrupting at %s\n", intrstr);
1056 }
1057
1058 return 0;
1059
1060 error:
1061 idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
1062 if (psc->sc_ihs[idx] != NULL)
1063 pci_intr_disestablish(psc->sc_pa.pa_pc, psc->sc_ihs[idx]);
1064 idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
1065 if (psc->sc_intr_pervq) {
1066 for (qid = 0; qid < sc->sc_nvqs; qid++) {
1067 n = idx + qid;
1068 if (psc->sc_ihs[n] == NULL)
1069 continue;
1070 pci_intr_disestablish(psc->sc_pa.pa_pc, psc->sc_ihs[n]);
1071 }
1072
1073 } else {
1074 if (psc->sc_ihs[idx] != NULL)
1075 pci_intr_disestablish(psc->sc_pa.pa_pc, psc->sc_ihs[idx]);
1076 }
1077
1078 return -1;
1079 }
1080
1081 static int
1082 virtio_pci_establish_intx_interrupt(struct virtio_softc *sc,
1083 struct pci_attach_args *pa)
1084 {
1085 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
1086 device_t self = sc->sc_dev;
1087 pci_chipset_tag_t pc = pa->pa_pc;
1088 char intrbuf[PCI_INTRSTR_LEN];
1089 char const *intrstr;
1090
1091 if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE)
1092 pci_intr_setattr(pc, &psc->sc_ihp[0], PCI_INTR_MPSAFE, true);
1093
1094 psc->sc_ihs[0] = pci_intr_establish_xname(pc, psc->sc_ihp[0],
1095 sc->sc_ipl, virtio_pci_intr, sc, device_xname(sc->sc_dev));
1096 if (psc->sc_ihs[0] == NULL) {
1097 aprint_error_dev(self, "couldn't establish INTx\n");
1098 return -1;
1099 }
1100
1101 intrstr = pci_intr_string(pc, psc->sc_ihp[0], intrbuf, sizeof(intrbuf));
1102 aprint_normal_dev(self, "interrupting at %s\n", intrstr);
1103
1104 return 0;
1105 }
1106
1107 static int
1108 virtio_pci_alloc_interrupts(struct virtio_softc *sc)
1109 {
1110 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
1111 device_t self = sc->sc_dev;
1112 pci_chipset_tag_t pc = psc->sc_pa.pa_pc;
1113 pcitag_t tag = psc->sc_pa.pa_tag;
1114 int error;
1115 int nmsix;
1116 int off;
1117 int counts[PCI_INTR_TYPE_SIZE];
1118 pci_intr_type_t max_type;
1119 pcireg_t ctl;
1120
1121 nmsix = pci_msix_count(psc->sc_pa.pa_pc, psc->sc_pa.pa_tag);
1122 aprint_debug_dev(self, "pci_msix_count=%d\n", nmsix);
1123
1124 /* We need at least two: one for config and the other for queues */
1125 if ((sc->sc_flags & VIRTIO_F_INTR_MSIX) == 0 || nmsix < 2) {
1126 /* Try INTx only */
1127 max_type = PCI_INTR_TYPE_INTX;
1128 counts[PCI_INTR_TYPE_INTX] = 1;
1129 } else {
1130 /* Try MSI-X first and INTx second */
1131 if (ISSET(sc->sc_flags, VIRTIO_F_INTR_PERVQ) &&
1132 sc->sc_nvqs + VIRTIO_MSIX_QUEUE_VECTOR_INDEX <= nmsix) {
1133 nmsix = sc->sc_nvqs + VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
1134 } else {
1135 nmsix = 2;
1136 }
1137
1138 max_type = PCI_INTR_TYPE_MSIX;
1139 counts[PCI_INTR_TYPE_MSIX] = nmsix;
1140 counts[PCI_INTR_TYPE_MSI] = 0;
1141 counts[PCI_INTR_TYPE_INTX] = 1;
1142 }
1143
1144 retry:
1145 error = pci_intr_alloc(&psc->sc_pa, &psc->sc_ihp, counts, max_type);
1146 if (error != 0) {
1147 aprint_error_dev(self, "couldn't map interrupt\n");
1148 return -1;
1149 }
1150
1151 if (pci_intr_type(pc, psc->sc_ihp[0]) == PCI_INTR_TYPE_MSIX) {
1152 psc->sc_intr_pervq = nmsix > 2 ? true : false;
1153 psc->sc_ihs = kmem_zalloc(sizeof(*psc->sc_ihs) * nmsix,
1154 KM_SLEEP);
1155
1156 error = virtio_pci_establish_msix_interrupts(sc, &psc->sc_pa);
1157 if (error != 0) {
1158 kmem_free(psc->sc_ihs, sizeof(*psc->sc_ihs) * nmsix);
1159 pci_intr_release(pc, psc->sc_ihp, nmsix);
1160
1161 /* Retry INTx */
1162 max_type = PCI_INTR_TYPE_INTX;
1163 counts[PCI_INTR_TYPE_INTX] = 1;
1164 goto retry;
1165 }
1166
1167 psc->sc_ihs_num = nmsix;
1168 psc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_MSI;
1169 virtio_pci_adjust_config_region(psc);
1170 } else if (pci_intr_type(pc, psc->sc_ihp[0]) == PCI_INTR_TYPE_INTX) {
1171 psc->sc_intr_pervq = false;
1172 psc->sc_ihs = kmem_zalloc(sizeof(*psc->sc_ihs) * 1,
1173 KM_SLEEP);
1174
1175 error = virtio_pci_establish_intx_interrupt(sc, &psc->sc_pa);
1176 if (error != 0) {
1177 kmem_free(psc->sc_ihs, sizeof(*psc->sc_ihs) * 1);
1178 pci_intr_release(pc, psc->sc_ihp, 1);
1179 return -1;
1180 }
1181
1182 psc->sc_ihs_num = 1;
1183 psc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
1184 virtio_pci_adjust_config_region(psc);
1185
1186 error = pci_get_capability(pc, tag, PCI_CAP_MSIX, &off, NULL);
1187 if (error != 0) {
1188 ctl = pci_conf_read(pc, tag, off + PCI_MSIX_CTL);
1189 ctl &= ~PCI_MSIX_CTL_ENABLE;
1190 pci_conf_write(pc, tag, off + PCI_MSIX_CTL, ctl);
1191 }
1192 }
1193
1194 if (!psc->sc_intr_pervq)
1195 CLR(sc->sc_flags, VIRTIO_F_INTR_PERVQ);
1196 return 0;
1197 }
1198
1199 static void
1200 virtio_pci_free_interrupts(struct virtio_softc *sc)
1201 {
1202 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
1203
1204 for (int i = 0; i < psc->sc_ihs_num; i++) {
1205 if (psc->sc_ihs[i] == NULL)
1206 continue;
1207 pci_intr_disestablish(psc->sc_pa.pa_pc, psc->sc_ihs[i]);
1208 psc->sc_ihs[i] = NULL;
1209 }
1210
1211 if (psc->sc_ihs_num > 0)
1212 pci_intr_release(psc->sc_pa.pa_pc, psc->sc_ihp, psc->sc_ihs_num);
1213
1214 if (psc->sc_ihs != NULL) {
1215 kmem_free(psc->sc_ihs, sizeof(*psc->sc_ihs) * psc->sc_ihs_num);
1216 psc->sc_ihs = NULL;
1217 }
1218 psc->sc_ihs_num = 0;
1219 }
1220
1221 static bool
1222 virtio_pci_msix_enabled(struct virtio_pci_softc *psc)
1223 {
1224 pci_chipset_tag_t pc = psc->sc_pa.pa_pc;
1225
1226 if (pci_intr_type(pc, psc->sc_ihp[0]) == PCI_INTR_TYPE_MSIX)
1227 return true;
1228
1229 return false;
1230 }
1231
1232 /*
1233 * Interrupt handler.
1234 */
1235 static int
1236 virtio_pci_intr(void *arg)
1237 {
1238 struct virtio_softc *sc = arg;
1239 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
1240 int isr, r = 0;
1241
1242 /* check and ack the interrupt */
1243 isr = bus_space_read_1(psc->sc_isr_iot, psc->sc_isr_ioh, 0);
1244 if (isr == 0)
1245 return 0;
1246 if ((isr & VIRTIO_CONFIG_ISR_CONFIG_CHANGE) &&
1247 (sc->sc_config_change != NULL))
1248 r = (sc->sc_config_change)(sc);
1249 if (sc->sc_intrhand != NULL) {
1250 if (sc->sc_soft_ih != NULL)
1251 softint_schedule(sc->sc_soft_ih);
1252 else
1253 r |= (sc->sc_intrhand)(sc);
1254 }
1255
1256 return r;
1257 }
1258
1259 static int
1260 virtio_pci_msix_queue_intr(void *arg)
1261 {
1262 struct virtio_softc *sc = arg;
1263 int r = 0;
1264
1265 if (sc->sc_intrhand != NULL) {
1266 if (sc->sc_soft_ih != NULL)
1267 softint_schedule(sc->sc_soft_ih);
1268 else
1269 r |= (sc->sc_intrhand)(sc);
1270 }
1271
1272 return r;
1273 }
1274
1275 static int
1276 virtio_pci_msix_config_intr(void *arg)
1277 {
1278 struct virtio_softc *sc = arg;
1279 int r = 0;
1280
1281 if (sc->sc_config_change != NULL)
1282 r = (sc->sc_config_change)(sc);
1283 return r;
1284 }
1285
1286 MODULE(MODULE_CLASS_DRIVER, virtio_pci, "pci,virtio");
1287
1288 #ifdef _MODULE
1289 #include "ioconf.c"
1290 #endif
1291
1292 static int
1293 virtio_pci_modcmd(modcmd_t cmd, void *opaque)
1294 {
1295 int error = 0;
1296
1297 #ifdef _MODULE
1298 switch (cmd) {
1299 case MODULE_CMD_INIT:
1300 error = config_init_component(cfdriver_ioconf_virtio_pci,
1301 cfattach_ioconf_virtio_pci, cfdata_ioconf_virtio_pci);
1302 break;
1303 case MODULE_CMD_FINI:
1304 error = config_fini_component(cfdriver_ioconf_virtio_pci,
1305 cfattach_ioconf_virtio_pci, cfdata_ioconf_virtio_pci);
1306 break;
1307 default:
1308 error = ENOTTY;
1309 break;
1310 }
1311 #endif
1312
1313 return error;
1314 }
1315