virtio_pci.c revision 1.52 1 /* $NetBSD: virtio_pci.c,v 1.52 2024/06/25 14:54:55 riastradh Exp $ */
2
3 /*
4 * Copyright (c) 2020 The NetBSD Foundation, Inc.
5 * Copyright (c) 2012 Stefan Fritsch.
6 * Copyright (c) 2010 Minoura Makoto.
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: virtio_pci.c,v 1.52 2024/06/25 14:54:55 riastradh Exp $");
32
33 #include <sys/param.h>
34 #include <sys/types.h>
35
36 #include <sys/device.h>
37 #include <sys/endian.h>
38 #include <sys/interrupt.h>
39 #include <sys/kmem.h>
40 #include <sys/module.h>
41 #include <sys/syslog.h>
42 #include <sys/systm.h>
43
44 #include <dev/pci/pcidevs.h>
45 #include <dev/pci/pcireg.h>
46 #include <dev/pci/pcivar.h>
47
48 #include <dev/pci/virtioreg.h> /* XXX: move to non-pci */
49 #include <dev/pci/virtio_pcireg.h>
50
51 #define VIRTIO_PRIVATE
52 #include <dev/pci/virtiovar.h> /* XXX: move to non-pci */
53
54 #if defined(__alpha__) || defined(__sparc64__)
55 /*
56 * XXX VIRTIO_F_ACCESS_PLATFORM is required for standard PCI DMA
57 * XXX to work on these platforms, at least by Qemu.
58 * XXX
59 * XXX Generalize this later.
60 */
61 #define __NEED_VIRTIO_F_ACCESS_PLATFORM
62 #endif /* __alpha__ || __sparc64__ */
63
64 #define VIRTIO_PCI_LOG(_sc, _use_log, _fmt, _args...) \
65 do { \
66 if ((_use_log)) { \
67 log(LOG_DEBUG, "%s: " _fmt, \
68 device_xname((_sc)->sc_dev), \
69 ##_args); \
70 } else { \
71 aprint_error_dev((_sc)->sc_dev, \
72 _fmt, ##_args); \
73 } \
74 } while(0)
75
76 static int virtio_pci_match(device_t, cfdata_t, void *);
77 static void virtio_pci_attach(device_t, device_t, void *);
78 static int virtio_pci_rescan(device_t, const char *, const int *);
79 static int virtio_pci_detach(device_t, int);
80
81 #define NMAPREG ((PCI_MAPREG_END - PCI_MAPREG_START) / \
82 sizeof(pcireg_t))
83 struct virtio_pci_softc {
84 struct virtio_softc sc_sc;
85 bool sc_intr_pervq;
86
87 /* IO space */
88 bus_space_tag_t sc_iot;
89 bus_space_handle_t sc_ioh;
90 bus_size_t sc_iosize;
91
92 /* BARs */
93 bus_space_tag_t sc_bars_iot[NMAPREG];
94 bus_space_handle_t sc_bars_ioh[NMAPREG];
95 bus_size_t sc_bars_iosize[NMAPREG];
96
97 /* notify space */
98 bus_space_tag_t sc_notify_iot;
99 bus_space_handle_t sc_notify_ioh;
100 bus_size_t sc_notify_iosize;
101 uint32_t sc_notify_off_multiplier;
102
103 /* isr space */
104 bus_space_tag_t sc_isr_iot;
105 bus_space_handle_t sc_isr_ioh;
106 bus_size_t sc_isr_iosize;
107
108 /* generic */
109 struct pci_attach_args sc_pa;
110 pci_intr_handle_t *sc_ihp;
111 void **sc_ihs;
112 int sc_ihs_num;
113 int sc_devcfg_offset; /* for 0.9 */
114 };
115
116 static int virtio_pci_attach_09(device_t, void *);
117 static void virtio_pci_kick_09(struct virtio_softc *, uint16_t);
118 static uint16_t virtio_pci_read_queue_size_09(struct virtio_softc *, uint16_t);
119 static void virtio_pci_setup_queue_09(struct virtio_softc *, uint16_t,
120 uint64_t);
121 static void virtio_pci_set_status_09(struct virtio_softc *, int);
122 static void virtio_pci_negotiate_features_09(struct virtio_softc *,
123 uint64_t);
124
125 static int virtio_pci_attach_10(device_t, void *);
126 static void virtio_pci_kick_10(struct virtio_softc *, uint16_t);
127 static uint16_t virtio_pci_read_queue_size_10(struct virtio_softc *, uint16_t);
128 static void virtio_pci_setup_queue_10(struct virtio_softc *, uint16_t,
129 uint64_t);
130 static void virtio_pci_set_status_10(struct virtio_softc *, int);
131 static void virtio_pci_negotiate_features_10(struct virtio_softc *,
132 uint64_t);
133 static int virtio_pci_find_cap(struct virtio_pci_softc *, int, void *,
134 int);
135
136 static int virtio_pci_alloc_interrupts(struct virtio_softc *);
137 static void virtio_pci_free_interrupts(struct virtio_softc *);
138 static int virtio_pci_adjust_config_region(struct virtio_pci_softc *);
139 static int virtio_pci_intr(void *);
140 static int virtio_pci_msix_queue_intr(void *);
141 static int virtio_pci_msix_config_intr(void *);
142 static int virtio_pci_setup_interrupts_09(struct virtio_softc *, int);
143 static int virtio_pci_setup_interrupts_10(struct virtio_softc *, int);
144 static int virtio_pci_establish_msix_interrupts(struct virtio_softc *,
145 struct pci_attach_args *);
146 static int virtio_pci_establish_intx_interrupt(struct virtio_softc *,
147 struct pci_attach_args *);
148 static bool virtio_pci_msix_enabled(struct virtio_pci_softc *);
149
150 #define VIRTIO_MSIX_CONFIG_VECTOR_INDEX 0
151 #define VIRTIO_MSIX_QUEUE_VECTOR_INDEX 1
152
153 /*
154 * For big-endian aarch64/armv7 on QEMU (and most real HW), only CPU cores
155 * are running in big-endian mode, with all peripheral being configured to
156 * little-endian mode. Their default bus_space(9) functions forcibly swap
157 * byte-order. This guarantees that PIO'ed data from pci(4), e.g., are
158 * correctly handled by bus_space(9), while DMA'ed ones should be swapped
159 * by hand, in violation of virtio(4) specifications.
160 */
161
162 #if (defined(__aarch64__) || defined(__arm__)) && BYTE_ORDER == BIG_ENDIAN
163 # define READ_ENDIAN_09 BIG_ENDIAN
164 # define READ_ENDIAN_10 BIG_ENDIAN
165 # define STRUCT_ENDIAN_09 BIG_ENDIAN
166 # define STRUCT_ENDIAN_10 LITTLE_ENDIAN
167 #elif BYTE_ORDER == BIG_ENDIAN
168 # define READ_ENDIAN_09 LITTLE_ENDIAN
169 # define READ_ENDIAN_10 BIG_ENDIAN
170 # define STRUCT_ENDIAN_09 BIG_ENDIAN
171 # define STRUCT_ENDIAN_10 LITTLE_ENDIAN
172 #else /* little endian */
173 # define READ_ENDIAN_09 LITTLE_ENDIAN
174 # define READ_ENDIAN_10 LITTLE_ENDIAN
175 # define STRUCT_ENDIAN_09 LITTLE_ENDIAN
176 # define STRUCT_ENDIAN_10 LITTLE_ENDIAN
177 #endif
178
179 CFATTACH_DECL3_NEW(virtio_pci, sizeof(struct virtio_pci_softc),
180 virtio_pci_match, virtio_pci_attach, virtio_pci_detach, NULL,
181 virtio_pci_rescan, NULL, 0);
182
183 static const struct virtio_ops virtio_pci_ops_09 = {
184 .kick = virtio_pci_kick_09,
185 .read_queue_size = virtio_pci_read_queue_size_09,
186 .setup_queue = virtio_pci_setup_queue_09,
187 .set_status = virtio_pci_set_status_09,
188 .neg_features = virtio_pci_negotiate_features_09,
189 .alloc_interrupts = virtio_pci_alloc_interrupts,
190 .free_interrupts = virtio_pci_free_interrupts,
191 .setup_interrupts = virtio_pci_setup_interrupts_09,
192 };
193
194 static const struct virtio_ops virtio_pci_ops_10 = {
195 .kick = virtio_pci_kick_10,
196 .read_queue_size = virtio_pci_read_queue_size_10,
197 .setup_queue = virtio_pci_setup_queue_10,
198 .set_status = virtio_pci_set_status_10,
199 .neg_features = virtio_pci_negotiate_features_10,
200 .alloc_interrupts = virtio_pci_alloc_interrupts,
201 .free_interrupts = virtio_pci_free_interrupts,
202 .setup_interrupts = virtio_pci_setup_interrupts_10,
203 };
204
205 static int
206 virtio_pci_match(device_t parent, cfdata_t match, void *aux)
207 {
208 struct pci_attach_args *pa;
209
210 pa = (struct pci_attach_args *)aux;
211 switch (PCI_VENDOR(pa->pa_id)) {
212 case PCI_VENDOR_QUMRANET:
213 /* Transitional devices MUST have a PCI Revision ID of 0. */
214 if (((PCI_PRODUCT_QUMRANET_VIRTIO_1000 <=
215 PCI_PRODUCT(pa->pa_id)) &&
216 (PCI_PRODUCT(pa->pa_id) <=
217 PCI_PRODUCT_QUMRANET_VIRTIO_103F)) &&
218 PCI_REVISION(pa->pa_class) == 0)
219 return 1;
220 /*
221 * Non-transitional devices SHOULD have a PCI Revision
222 * ID of 1 or higher. Drivers MUST match any PCI
223 * Revision ID value.
224 */
225 if (((PCI_PRODUCT_QUMRANET_VIRTIO_1040 <=
226 PCI_PRODUCT(pa->pa_id)) &&
227 (PCI_PRODUCT(pa->pa_id) <=
228 PCI_PRODUCT_QUMRANET_VIRTIO_107F)) &&
229 /* XXX: TODO */
230 PCI_REVISION(pa->pa_class) == 1)
231 return 1;
232 break;
233 }
234
235 return 0;
236 }
237
238 static void
239 virtio_pci_attach(device_t parent, device_t self, void *aux)
240 {
241 struct virtio_pci_softc * const psc = device_private(self);
242 struct virtio_softc * const sc = &psc->sc_sc;
243 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
244 pci_chipset_tag_t pc = pa->pa_pc;
245 pcitag_t tag = pa->pa_tag;
246 int revision;
247 int ret;
248 pcireg_t id;
249 pcireg_t csr;
250
251 revision = PCI_REVISION(pa->pa_class);
252 switch (revision) {
253 case 0:
254 /* subsystem ID shows what I am */
255 id = PCI_SUBSYS_ID(pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG));
256 break;
257 case 1:
258 /* pci product number shows what I am */
259 id = PCI_PRODUCT(pa->pa_id) - PCI_PRODUCT_QUMRANET_VIRTIO_1040;
260 break;
261 default:
262 aprint_normal(": unknown revision 0x%02x; giving up\n",
263 revision);
264 return;
265 }
266
267 aprint_normal("\n");
268 aprint_naive("\n");
269 virtio_print_device_type(self, id, revision);
270
271 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
272 csr |= PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_IO_ENABLE;
273 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
274
275 sc->sc_dev = self;
276 psc->sc_pa = *pa;
277 psc->sc_iot = pa->pa_iot;
278
279 sc->sc_dmat = pa->pa_dmat;
280 if (pci_dma64_available(pa))
281 sc->sc_dmat = pa->pa_dmat64;
282
283 /* attach is dependent on revision */
284 ret = 0;
285 if (revision == 1) {
286 /* try to attach 1.0 */
287 ret = virtio_pci_attach_10(self, aux);
288 }
289 if (ret == 0 && revision == 0) {
290 /*
291 * revision 0 means 0.9 only or both 0.9 and 1.0. The
292 * latter are so-called "Transitional Devices". For
293 * those devices, we want to use the 1.0 interface if
294 * possible.
295 *
296 * XXX Currently only on platforms that require 1.0
297 * XXX features, such as VIRTIO_F_ACCESS_PLATFORM.
298 */
299 #ifdef __NEED_VIRTIO_F_ACCESS_PLATFORM
300 /* First, try to attach 1.0 */
301 ret = virtio_pci_attach_10(self, aux);
302 if (ret != 0) {
303 aprint_error_dev(self,
304 "VirtIO 1.0 error = %d, falling back to 0.9\n",
305 ret);
306 /* Fall back to 0.9. */
307 ret = virtio_pci_attach_09(self, aux);
308 }
309 #else
310 ret = virtio_pci_attach_09(self, aux);
311 #endif /* __NEED_VIRTIO_F_ACCESS_PLATFORM */
312 }
313 if (ret) {
314 aprint_error_dev(self, "cannot attach (%d)\n", ret);
315 return;
316 }
317 KASSERT(sc->sc_ops);
318
319 /* preset config region */
320 psc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
321 if (virtio_pci_adjust_config_region(psc))
322 return;
323
324 /* generic */
325 virtio_device_reset(sc);
326 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_ACK);
327 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER);
328
329 sc->sc_childdevid = id;
330 sc->sc_child = NULL;
331 virtio_pci_rescan(self, NULL, NULL);
332 return;
333 }
334
335 /* ARGSUSED */
336 static int
337 virtio_pci_rescan(device_t self, const char *ifattr, const int *locs)
338 {
339 struct virtio_pci_softc * const psc = device_private(self);
340 struct virtio_softc * const sc = &psc->sc_sc;
341 struct virtio_attach_args va;
342
343 if (sc->sc_child) /* Child already attached? */
344 return 0;
345
346 memset(&va, 0, sizeof(va));
347 va.sc_childdevid = sc->sc_childdevid;
348
349 config_found(self, &va, NULL, CFARGS_NONE);
350
351 if (virtio_attach_failed(sc))
352 return 0;
353
354 return 0;
355 }
356
357 static int
358 virtio_pci_detach(device_t self, int flags)
359 {
360 struct virtio_pci_softc * const psc = device_private(self);
361 struct virtio_softc * const sc = &psc->sc_sc;
362 unsigned i;
363 int r;
364
365 r = config_detach_children(self, flags);
366 if (r != 0)
367 return r;
368
369 /* Check that child never attached, or detached properly */
370 KASSERT(sc->sc_child == NULL);
371 KASSERT(sc->sc_vqs == NULL);
372 KASSERT(psc->sc_ihs_num == 0);
373
374 if (sc->sc_version_1) {
375 for (i = 0; i < __arraycount(psc->sc_bars_iot); i++) {
376 if (psc->sc_bars_iosize[i] == 0)
377 continue;
378 bus_space_unmap(psc->sc_bars_iot[i],
379 psc->sc_bars_ioh[i], psc->sc_bars_iosize[i]);
380 psc->sc_bars_iosize[i] = 0;
381 }
382 } else {
383 if (psc->sc_iosize) {
384 bus_space_unmap(psc->sc_iot, psc->sc_ioh,
385 psc->sc_iosize);
386 psc->sc_iosize = 0;
387 }
388 }
389
390 return 0;
391 }
392
393 static int
394 virtio_pci_attach_09(device_t self, void *aux)
395 {
396 struct virtio_pci_softc * const psc = device_private(self);
397 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
398 struct virtio_softc * const sc = &psc->sc_sc;
399
400 /* complete IO region */
401 if (pci_mapreg_map(pa, PCI_MAPREG_START, PCI_MAPREG_TYPE_IO, 0,
402 &psc->sc_iot, &psc->sc_ioh, NULL, &psc->sc_iosize)) {
403 aprint_error_dev(self, "can't map i/o space\n");
404 return EIO;
405 }
406
407 /* queue space */
408 if (bus_space_subregion(psc->sc_iot, psc->sc_ioh,
409 VIRTIO_CONFIG_QUEUE_NOTIFY, 2, &psc->sc_notify_ioh)) {
410 aprint_error_dev(self, "can't map notify i/o space\n");
411 return EIO;
412 }
413 psc->sc_notify_iosize = 2;
414 psc->sc_notify_iot = psc->sc_iot;
415
416 /* ISR space */
417 if (bus_space_subregion(psc->sc_iot, psc->sc_ioh,
418 VIRTIO_CONFIG_ISR_STATUS, 1, &psc->sc_isr_ioh)) {
419 aprint_error_dev(self, "can't map isr i/o space\n");
420 return EIO;
421 }
422 psc->sc_isr_iosize = 1;
423 psc->sc_isr_iot = psc->sc_iot;
424
425 /* set our version 0.9 ops */
426 sc->sc_ops = &virtio_pci_ops_09;
427 sc->sc_bus_endian = READ_ENDIAN_09;
428 sc->sc_struct_endian = STRUCT_ENDIAN_09;
429 return 0;
430 }
431
432 static int
433 virtio_pci_attach_10(device_t self, void *aux)
434 {
435 struct virtio_pci_softc * const psc = device_private(self);
436 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
437 struct virtio_softc * const sc = &psc->sc_sc;
438 pci_chipset_tag_t pc = pa->pa_pc;
439 pcitag_t tag = pa->pa_tag;
440
441 struct virtio_pci_cap common, isr, device;
442 struct virtio_pci_notify_cap notify;
443 int have_device_cfg = 0;
444 bus_size_t bars[NMAPREG] = { 0 };
445 int bars_idx[NMAPREG] = { 0 };
446 struct virtio_pci_cap * const caps[] =
447 { &common, &isr, &device, ¬ify.cap };
448 int i, j, ret = 0;
449
450 if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_COMMON_CFG,
451 &common, sizeof(common)))
452 return ENODEV;
453 if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_NOTIFY_CFG,
454 ¬ify, sizeof(notify)))
455 return ENODEV;
456 if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_ISR_CFG,
457 &isr, sizeof(isr)))
458 return ENODEV;
459 if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_DEVICE_CFG,
460 &device, sizeof(device)))
461 memset(&device, 0, sizeof(device));
462 else
463 have_device_cfg = 1;
464
465 /* Figure out which bars we need to map */
466 for (i = 0; i < __arraycount(caps); i++) {
467 int bar = caps[i]->bar;
468 bus_size_t len = caps[i]->offset + caps[i]->length;
469
470 if (caps[i]->length == 0)
471 continue;
472 if (bars[bar] < len)
473 bars[bar] = len;
474 }
475
476 for (i = j = 0; i < __arraycount(bars); i++) {
477 int reg;
478 pcireg_t type;
479
480 if (bars[i] == 0)
481 continue;
482 reg = PCI_BAR(i);
483 type = pci_mapreg_type(pc, tag, reg);
484 if (pci_mapreg_map(pa, reg, type, 0,
485 &psc->sc_bars_iot[j], &psc->sc_bars_ioh[j],
486 NULL, &psc->sc_bars_iosize[j])) {
487 aprint_error_dev(self, "can't map bar %u \n", i);
488 ret = EIO;
489 goto err;
490 }
491 aprint_debug_dev(self,
492 "bar[%d]: iot %p, size 0x%" PRIxBUSSIZE "\n",
493 j, psc->sc_bars_iot[j], psc->sc_bars_iosize[j]);
494 bars_idx[i] = j;
495 j++;
496 }
497
498 i = bars_idx[notify.cap.bar];
499 if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i],
500 notify.cap.offset, notify.cap.length, &psc->sc_notify_ioh)) {
501 aprint_error_dev(self, "can't map notify i/o space\n");
502 ret = EIO;
503 goto err;
504 }
505 psc->sc_notify_iosize = notify.cap.length;
506 psc->sc_notify_iot = psc->sc_bars_iot[i];
507 psc->sc_notify_off_multiplier = le32toh(notify.notify_off_multiplier);
508
509 if (have_device_cfg) {
510 i = bars_idx[device.bar];
511 if (bus_space_subregion(psc->sc_bars_iot[i],
512 psc->sc_bars_ioh[i], device.offset, device.length,
513 &sc->sc_devcfg_ioh)) {
514 aprint_error_dev(self, "can't map devcfg i/o space\n");
515 ret = EIO;
516 goto err;
517 }
518 aprint_debug_dev(self,
519 "device.offset = 0x%x, device.length = 0x%x\n",
520 device.offset, device.length);
521 sc->sc_devcfg_iosize = device.length;
522 sc->sc_devcfg_iot = psc->sc_bars_iot[i];
523 }
524
525 i = bars_idx[isr.bar];
526 if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i],
527 isr.offset, isr.length, &psc->sc_isr_ioh)) {
528 aprint_error_dev(self, "can't map isr i/o space\n");
529 ret = EIO;
530 goto err;
531 }
532 psc->sc_isr_iosize = isr.length;
533 psc->sc_isr_iot = psc->sc_bars_iot[i];
534
535 i = bars_idx[common.bar];
536 if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i],
537 common.offset, common.length, &psc->sc_ioh)) {
538 aprint_error_dev(self, "can't map common i/o space\n");
539 ret = EIO;
540 goto err;
541 }
542 psc->sc_iosize = common.length;
543 psc->sc_iot = psc->sc_bars_iot[i];
544
545 psc->sc_sc.sc_version_1 = 1;
546
547 /* set our version 1.0 ops */
548 sc->sc_ops = &virtio_pci_ops_10;
549 sc->sc_bus_endian = READ_ENDIAN_10;
550 sc->sc_struct_endian = STRUCT_ENDIAN_10;
551 return 0;
552
553 err:
554 /* undo our pci_mapreg_map()s */
555 for (i = 0; i < __arraycount(bars); i++) {
556 if (psc->sc_bars_iosize[i] == 0)
557 continue;
558 bus_space_unmap(psc->sc_bars_iot[i], psc->sc_bars_ioh[i],
559 psc->sc_bars_iosize[i]);
560 psc->sc_bars_iosize[i] = 0;
561 }
562 return ret;
563 }
564
565 /* v1.0 attach helper */
566 static int
567 virtio_pci_find_cap(struct virtio_pci_softc *psc, int cfg_type, void *buf,
568 int buflen)
569 {
570 device_t self = psc->sc_sc.sc_dev;
571 pci_chipset_tag_t pc = psc->sc_pa.pa_pc;
572 pcitag_t tag = psc->sc_pa.pa_tag;
573 unsigned int offset, i, len;
574 union {
575 pcireg_t reg[8];
576 struct virtio_pci_cap vcap;
577 } *v = buf;
578
579 if (buflen < sizeof(struct virtio_pci_cap))
580 return ERANGE;
581
582 if (!pci_get_capability(pc, tag, PCI_CAP_VENDSPEC, &offset,
583 &v->reg[0]))
584 return ENOENT;
585
586 do {
587 for (i = 0; i < 4; i++)
588 v->reg[i] =
589 le32toh(pci_conf_read(pc, tag, offset + i * 4));
590 if (v->vcap.cfg_type == cfg_type)
591 break;
592 offset = v->vcap.cap_next;
593 } while (offset != 0);
594
595 if (offset == 0)
596 return ENOENT;
597
598 if (v->vcap.cap_len > sizeof(struct virtio_pci_cap)) {
599 len = roundup(v->vcap.cap_len, sizeof(pcireg_t));
600 if (len > buflen) {
601 aprint_error_dev(self, "%s cap too large\n", __func__);
602 return ERANGE;
603 }
604 for (i = 4; i < len / sizeof(pcireg_t); i++)
605 v->reg[i] =
606 le32toh(pci_conf_read(pc, tag, offset + i * 4));
607 }
608
609 /* endian fixup */
610 v->vcap.offset = le32toh(v->vcap.offset);
611 v->vcap.length = le32toh(v->vcap.length);
612 return 0;
613 }
614
615 /* -------------------------------------
616 * Version 0.9 support
617 * -------------------------------------*/
618
619 static void
620 virtio_pci_kick_09(struct virtio_softc *sc, uint16_t idx)
621 {
622 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
623
624 bus_space_write_2(psc->sc_notify_iot, psc->sc_notify_ioh, 0, idx);
625 }
626
627 /* only applicable for v 0.9 but also called for 1.0 */
628 static int
629 virtio_pci_adjust_config_region(struct virtio_pci_softc *psc)
630 {
631 struct virtio_softc * const sc = &psc->sc_sc;
632 device_t self = sc->sc_dev;
633
634 if (psc->sc_sc.sc_version_1)
635 return 0;
636
637 sc->sc_devcfg_iosize = psc->sc_iosize - psc->sc_devcfg_offset;
638 sc->sc_devcfg_iot = psc->sc_iot;
639 if (bus_space_subregion(psc->sc_iot, psc->sc_ioh,
640 psc->sc_devcfg_offset, sc->sc_devcfg_iosize,
641 &sc->sc_devcfg_ioh)) {
642 aprint_error_dev(self, "can't map config i/o space\n");
643 return EIO;
644 }
645
646 return 0;
647 }
648
649 static uint16_t
650 virtio_pci_read_queue_size_09(struct virtio_softc *sc, uint16_t idx)
651 {
652 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
653
654 bus_space_write_2(psc->sc_iot, psc->sc_ioh,
655 VIRTIO_CONFIG_QUEUE_SELECT, idx);
656 return bus_space_read_2(psc->sc_iot, psc->sc_ioh,
657 VIRTIO_CONFIG_QUEUE_SIZE);
658 }
659
660 static void
661 virtio_pci_setup_queue_09(struct virtio_softc *sc, uint16_t idx, uint64_t addr)
662 {
663 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
664
665 bus_space_write_2(psc->sc_iot, psc->sc_ioh,
666 VIRTIO_CONFIG_QUEUE_SELECT, idx);
667 bus_space_write_4(psc->sc_iot, psc->sc_ioh,
668 VIRTIO_CONFIG_QUEUE_ADDRESS, addr / VIRTIO_PAGE_SIZE);
669
670 if (psc->sc_ihs_num > 1) {
671 int vec = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
672 if (psc->sc_intr_pervq)
673 vec += idx;
674 bus_space_write_2(psc->sc_iot, psc->sc_ioh,
675 VIRTIO_CONFIG_MSI_QUEUE_VECTOR, vec);
676 }
677 }
678
679 static void
680 virtio_pci_set_status_09(struct virtio_softc *sc, int status)
681 {
682 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
683 int old = 0;
684
685 if (status != 0) {
686 old = bus_space_read_1(psc->sc_iot, psc->sc_ioh,
687 VIRTIO_CONFIG_DEVICE_STATUS);
688 }
689 bus_space_write_1(psc->sc_iot, psc->sc_ioh,
690 VIRTIO_CONFIG_DEVICE_STATUS, status|old);
691 }
692
693 static void
694 virtio_pci_negotiate_features_09(struct virtio_softc *sc,
695 uint64_t guest_features)
696 {
697 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
698 uint32_t r;
699
700 r = bus_space_read_4(psc->sc_iot, psc->sc_ioh,
701 VIRTIO_CONFIG_DEVICE_FEATURES);
702
703 r &= guest_features;
704
705 bus_space_write_4(psc->sc_iot, psc->sc_ioh,
706 VIRTIO_CONFIG_GUEST_FEATURES, r);
707
708 sc->sc_active_features = r;
709 }
710
711 /* -------------------------------------
712 * Version 1.0 support
713 * -------------------------------------*/
714
715 static void
716 virtio_pci_kick_10(struct virtio_softc *sc, uint16_t idx)
717 {
718 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
719 unsigned offset = sc->sc_vqs[idx].vq_notify_off *
720 psc->sc_notify_off_multiplier;
721
722 bus_space_write_2(psc->sc_notify_iot, psc->sc_notify_ioh, offset, idx);
723 }
724
725 static uint16_t
726 virtio_pci_read_queue_size_10(struct virtio_softc *sc, uint16_t idx)
727 {
728 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
729 bus_space_tag_t iot = psc->sc_iot;
730 bus_space_handle_t ioh = psc->sc_ioh;
731
732 bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SELECT, idx);
733 return bus_space_read_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SIZE);
734 }
735
736 /*
737 * By definition little endian only in v1.0. NB: "MAY" in the text
738 * below refers to "independently" (i.e. the order of accesses) not
739 * "32-bit" (which is restricted by the earlier "MUST").
740 *
741 * 4.1.3.1 Driver Requirements: PCI Device Layout
742 *
743 * For device configuration access, the driver MUST use ... 32-bit
744 * wide and aligned accesses for ... 64-bit wide fields. For 64-bit
745 * fields, the driver MAY access each of the high and low 32-bit parts
746 * of the field independently.
747 */
748 static __inline void
749 virtio_pci_bus_space_write_8(bus_space_tag_t iot, bus_space_handle_t ioh,
750 bus_size_t offset, uint64_t value)
751 {
752 #if _QUAD_HIGHWORD
753 bus_space_write_4(iot, ioh, offset, BUS_ADDR_LO32(value));
754 bus_space_write_4(iot, ioh, offset + 4, BUS_ADDR_HI32(value));
755 #else
756 bus_space_write_4(iot, ioh, offset, BUS_ADDR_HI32(value));
757 bus_space_write_4(iot, ioh, offset + 4, BUS_ADDR_LO32(value));
758 #endif
759 }
760
761 static void
762 virtio_pci_setup_queue_10(struct virtio_softc *sc, uint16_t idx, uint64_t addr)
763 {
764 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
765 struct virtqueue *vq = &sc->sc_vqs[idx];
766 bus_space_tag_t iot = psc->sc_iot;
767 bus_space_handle_t ioh = psc->sc_ioh;
768 KASSERT(vq->vq_index == idx);
769
770 bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SELECT, vq->vq_index);
771 if (addr == 0) {
772 bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_ENABLE, 0);
773 virtio_pci_bus_space_write_8(iot, ioh,
774 VIRTIO_CONFIG1_QUEUE_DESC, 0);
775 virtio_pci_bus_space_write_8(iot, ioh,
776 VIRTIO_CONFIG1_QUEUE_AVAIL, 0);
777 virtio_pci_bus_space_write_8(iot, ioh,
778 VIRTIO_CONFIG1_QUEUE_USED, 0);
779 } else {
780 virtio_pci_bus_space_write_8(iot, ioh,
781 VIRTIO_CONFIG1_QUEUE_DESC, addr);
782 virtio_pci_bus_space_write_8(iot, ioh,
783 VIRTIO_CONFIG1_QUEUE_AVAIL, addr + vq->vq_availoffset);
784 virtio_pci_bus_space_write_8(iot, ioh,
785 VIRTIO_CONFIG1_QUEUE_USED, addr + vq->vq_usedoffset);
786 bus_space_write_2(iot, ioh,
787 VIRTIO_CONFIG1_QUEUE_ENABLE, 1);
788 vq->vq_notify_off = bus_space_read_2(iot, ioh,
789 VIRTIO_CONFIG1_QUEUE_NOTIFY_OFF);
790 }
791
792 if (psc->sc_ihs_num > 1) {
793 int vec = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
794 if (psc->sc_intr_pervq)
795 vec += idx;
796 bus_space_write_2(iot, ioh,
797 VIRTIO_CONFIG1_QUEUE_MSIX_VECTOR, vec);
798 }
799 }
800
801 static void
802 virtio_pci_set_status_10(struct virtio_softc *sc, int status)
803 {
804 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
805 bus_space_tag_t iot = psc->sc_iot;
806 bus_space_handle_t ioh = psc->sc_ioh;
807 int old = 0;
808
809 if (status)
810 old = bus_space_read_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS);
811 bus_space_write_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS,
812 status | old);
813 }
814
815 void
816 virtio_pci_negotiate_features_10(struct virtio_softc *sc,
817 uint64_t guest_features)
818 {
819 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
820 device_t self = sc->sc_dev;
821 bus_space_tag_t iot = psc->sc_iot;
822 bus_space_handle_t ioh = psc->sc_ioh;
823 uint64_t host, negotiated, device_status;
824
825 guest_features |= VIRTIO_F_VERSION_1;
826 #ifdef __NEED_VIRTIO_F_ACCESS_PLATFORM
827 /* XXX This could use some work. */
828 guest_features |= VIRTIO_F_ACCESS_PLATFORM;
829 #endif /* __NEED_VIRTIO_F_ACCESS_PLATFORM */
830 /* notify on empty is 0.9 only */
831 guest_features &= ~VIRTIO_F_NOTIFY_ON_EMPTY;
832 sc->sc_active_features = 0;
833
834 bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE_SELECT, 0);
835 host = bus_space_read_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE);
836 bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE_SELECT, 1);
837 host |= (uint64_t)bus_space_read_4(iot, ioh,
838 VIRTIO_CONFIG1_DEVICE_FEATURE) << 32;
839
840 negotiated = host & guest_features;
841
842 bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE_SELECT, 0);
843 bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE,
844 negotiated & 0xffffffff);
845 bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE_SELECT, 1);
846 bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE,
847 negotiated >> 32);
848 virtio_pci_set_status_10(sc, VIRTIO_CONFIG_DEVICE_STATUS_FEATURES_OK);
849
850 device_status = bus_space_read_1(iot, ioh,
851 VIRTIO_CONFIG1_DEVICE_STATUS);
852 if ((device_status & VIRTIO_CONFIG_DEVICE_STATUS_FEATURES_OK) == 0) {
853 aprint_error_dev(self, "feature negotiation failed\n");
854 bus_space_write_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS,
855 VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
856 return;
857 }
858
859 if ((negotiated & VIRTIO_F_VERSION_1) == 0) {
860 aprint_error_dev(self, "host rejected version 1\n");
861 bus_space_write_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS,
862 VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
863 return;
864 }
865
866 sc->sc_active_features = negotiated;
867 return;
868 }
869
870 /* -------------------------------------
871 * Generic PCI interrupt code
872 * -------------------------------------*/
873
874 static int
875 virtio_pci_setup_interrupts_10(struct virtio_softc *sc, int reinit)
876 {
877 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
878 bus_space_tag_t iot = psc->sc_iot;
879 bus_space_handle_t ioh = psc->sc_ioh;
880 int vector, ret, qid;
881
882 if (!virtio_pci_msix_enabled(psc))
883 return 0;
884
885 vector = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
886 bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_CONFIG_MSIX_VECTOR, vector);
887 ret = bus_space_read_2(iot, ioh, VIRTIO_CONFIG1_CONFIG_MSIX_VECTOR);
888 if (ret != vector) {
889 VIRTIO_PCI_LOG(sc, reinit, "can't set config msix vector\n");
890 return -1;
891 }
892
893 for (qid = 0; qid < sc->sc_nvqs; qid++) {
894 vector = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
895
896 if (psc->sc_intr_pervq)
897 vector += qid;
898 bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SELECT, qid);
899 bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_MSIX_VECTOR,
900 vector);
901 ret = bus_space_read_2(iot, ioh,
902 VIRTIO_CONFIG1_QUEUE_MSIX_VECTOR);
903 if (ret != vector) {
904 VIRTIO_PCI_LOG(sc, reinit, "can't set queue %d "
905 "msix vector\n", qid);
906 return -1;
907 }
908 }
909
910 return 0;
911 }
912
913 static int
914 virtio_pci_setup_interrupts_09(struct virtio_softc *sc, int reinit)
915 {
916 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
917 int offset, vector, ret, qid;
918
919 if (!virtio_pci_msix_enabled(psc))
920 return 0;
921
922 offset = VIRTIO_CONFIG_MSI_CONFIG_VECTOR;
923 vector = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
924
925 bus_space_write_2(psc->sc_iot, psc->sc_ioh, offset, vector);
926 ret = bus_space_read_2(psc->sc_iot, psc->sc_ioh, offset);
927 if (ret != vector) {
928 aprint_debug_dev(sc->sc_dev, "%s: expected=%d, actual=%d\n",
929 __func__, vector, ret);
930 VIRTIO_PCI_LOG(sc, reinit,
931 "can't set config msix vector\n");
932 return -1;
933 }
934
935 for (qid = 0; qid < sc->sc_nvqs; qid++) {
936 offset = VIRTIO_CONFIG_QUEUE_SELECT;
937 bus_space_write_2(psc->sc_iot, psc->sc_ioh, offset, qid);
938
939 offset = VIRTIO_CONFIG_MSI_QUEUE_VECTOR;
940 vector = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
941
942 if (psc->sc_intr_pervq)
943 vector += qid;
944
945 bus_space_write_2(psc->sc_iot, psc->sc_ioh, offset, vector);
946 ret = bus_space_read_2(psc->sc_iot, psc->sc_ioh, offset);
947 if (ret != vector) {
948 aprint_debug_dev(sc->sc_dev, "%s[qid=%d]:"
949 " expected=%d, actual=%d\n",
950 __func__, qid, vector, ret);
951 VIRTIO_PCI_LOG(sc, reinit, "can't set queue %d "
952 "msix vector\n", qid);
953 return -1;
954 }
955 }
956
957 return 0;
958 }
959
960 static int
961 virtio_pci_establish_msix_interrupts(struct virtio_softc *sc,
962 struct pci_attach_args *pa)
963 {
964 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
965 device_t self = sc->sc_dev;
966 pci_chipset_tag_t pc = pa->pa_pc;
967 struct virtqueue *vq;
968 char intrbuf[PCI_INTRSTR_LEN];
969 char intr_xname[INTRDEVNAMEBUF];
970 char const *intrstr;
971 int idx, qid, n;
972
973 idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
974 if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE)
975 pci_intr_setattr(pc, &psc->sc_ihp[idx], PCI_INTR_MPSAFE, true);
976
977 snprintf(intr_xname, sizeof(intr_xname), "%s config",
978 device_xname(sc->sc_dev));
979
980 psc->sc_ihs[idx] = pci_intr_establish_xname(pc, psc->sc_ihp[idx],
981 sc->sc_ipl, virtio_pci_msix_config_intr, sc, intr_xname);
982 if (psc->sc_ihs[idx] == NULL) {
983 aprint_error_dev(self,
984 "couldn't establish MSI-X for config\n");
985 goto error;
986 }
987
988 idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
989 if (psc->sc_intr_pervq) {
990 for (qid = 0; qid < sc->sc_nvqs; qid++) {
991 n = idx + qid;
992 vq = &sc->sc_vqs[qid];
993
994 snprintf(intr_xname, sizeof(intr_xname), "%s vq#%d",
995 device_xname(sc->sc_dev), qid);
996
997 if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE) {
998 pci_intr_setattr(pc, &psc->sc_ihp[n],
999 PCI_INTR_MPSAFE, true);
1000 }
1001
1002 psc->sc_ihs[n] = pci_intr_establish_xname(pc,
1003 psc->sc_ihp[n], sc->sc_ipl,
1004 vq->vq_intrhand, vq->vq_intrhand_arg, intr_xname);
1005 if (psc->sc_ihs[n] == NULL) {
1006 aprint_error_dev(self,
1007 "couldn't establish MSI-X for a vq\n");
1008 goto error;
1009 }
1010 }
1011 } else {
1012 if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE) {
1013 pci_intr_setattr(pc, &psc->sc_ihp[idx],
1014 PCI_INTR_MPSAFE, true);
1015 }
1016
1017 snprintf(intr_xname, sizeof(intr_xname), "%s queues",
1018 device_xname(sc->sc_dev));
1019 psc->sc_ihs[idx] = pci_intr_establish_xname(pc,
1020 psc->sc_ihp[idx], sc->sc_ipl,
1021 virtio_pci_msix_queue_intr, sc, intr_xname);
1022 if (psc->sc_ihs[idx] == NULL) {
1023 aprint_error_dev(self,
1024 "couldn't establish MSI-X for queues\n");
1025 goto error;
1026 }
1027 }
1028
1029 idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
1030 intrstr = pci_intr_string(pc, psc->sc_ihp[idx], intrbuf,
1031 sizeof(intrbuf));
1032 aprint_normal_dev(self, "config interrupting at %s\n", intrstr);
1033 idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
1034 if (psc->sc_intr_pervq) {
1035 kcpuset_t *affinity;
1036 int affinity_to, r;
1037
1038 kcpuset_create(&affinity, false);
1039
1040 for (qid = 0; qid < sc->sc_nvqs; qid++) {
1041 n = idx + qid;
1042 affinity_to = (qid / 2) % ncpu;
1043
1044 intrstr = pci_intr_string(pc, psc->sc_ihp[n],
1045 intrbuf, sizeof(intrbuf));
1046
1047 kcpuset_zero(affinity);
1048 kcpuset_set(affinity, affinity_to);
1049 r = interrupt_distribute(psc->sc_ihs[n], affinity,
1050 NULL);
1051 if (r == 0) {
1052 aprint_normal_dev(self,
1053 "for vq #%d interrupting at %s"
1054 " affinity to %u\n",
1055 qid, intrstr, affinity_to);
1056 } else {
1057 aprint_normal_dev(self,
1058 "for vq #%d interrupting at %s\n",
1059 qid, intrstr);
1060 }
1061 }
1062
1063 kcpuset_destroy(affinity);
1064 } else {
1065 intrstr = pci_intr_string(pc, psc->sc_ihp[idx], intrbuf,
1066 sizeof(intrbuf));
1067 aprint_normal_dev(self, "queues interrupting at %s\n",
1068 intrstr);
1069 }
1070
1071 return 0;
1072
1073 error:
1074 idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
1075 if (psc->sc_ihs[idx] != NULL)
1076 pci_intr_disestablish(psc->sc_pa.pa_pc, psc->sc_ihs[idx]);
1077 idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
1078 if (psc->sc_intr_pervq) {
1079 for (qid = 0; qid < sc->sc_nvqs; qid++) {
1080 n = idx + qid;
1081 if (psc->sc_ihs[n] == NULL)
1082 continue;
1083 pci_intr_disestablish(psc->sc_pa.pa_pc,
1084 psc->sc_ihs[n]);
1085 }
1086
1087 } else {
1088 if (psc->sc_ihs[idx] != NULL) {
1089 pci_intr_disestablish(psc->sc_pa.pa_pc,
1090 psc->sc_ihs[idx]);
1091 }
1092 }
1093
1094 return -1;
1095 }
1096
1097 static int
1098 virtio_pci_establish_intx_interrupt(struct virtio_softc *sc,
1099 struct pci_attach_args *pa)
1100 {
1101 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
1102 device_t self = sc->sc_dev;
1103 pci_chipset_tag_t pc = pa->pa_pc;
1104 char intrbuf[PCI_INTRSTR_LEN];
1105 char const *intrstr;
1106
1107 if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE)
1108 pci_intr_setattr(pc, &psc->sc_ihp[0], PCI_INTR_MPSAFE, true);
1109
1110 psc->sc_ihs[0] = pci_intr_establish_xname(pc, psc->sc_ihp[0],
1111 sc->sc_ipl, virtio_pci_intr, sc, device_xname(sc->sc_dev));
1112 if (psc->sc_ihs[0] == NULL) {
1113 aprint_error_dev(self, "couldn't establish INTx\n");
1114 return -1;
1115 }
1116
1117 intrstr = pci_intr_string(pc, psc->sc_ihp[0], intrbuf,
1118 sizeof(intrbuf));
1119 aprint_normal_dev(self, "interrupting at %s\n", intrstr);
1120
1121 return 0;
1122 }
1123
1124 static int
1125 virtio_pci_alloc_interrupts(struct virtio_softc *sc)
1126 {
1127 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
1128 device_t self = sc->sc_dev;
1129 pci_chipset_tag_t pc = psc->sc_pa.pa_pc;
1130 pcitag_t tag = psc->sc_pa.pa_tag;
1131 int error;
1132 int nmsix;
1133 int off;
1134 int counts[PCI_INTR_TYPE_SIZE];
1135 pci_intr_type_t max_type;
1136 pcireg_t ctl;
1137
1138 nmsix = pci_msix_count(psc->sc_pa.pa_pc, psc->sc_pa.pa_tag);
1139 aprint_debug_dev(self, "pci_msix_count=%d\n", nmsix);
1140
1141 /* We need at least two: one for config and the other for queues */
1142 if ((sc->sc_flags & VIRTIO_F_INTR_MSIX) == 0 || nmsix < 2) {
1143 /* Try INTx only */
1144 max_type = PCI_INTR_TYPE_INTX;
1145 counts[PCI_INTR_TYPE_INTX] = 1;
1146 } else {
1147 /* Try MSI-X first and INTx second */
1148 if (ISSET(sc->sc_flags, VIRTIO_F_INTR_PERVQ) &&
1149 sc->sc_nvqs + VIRTIO_MSIX_QUEUE_VECTOR_INDEX <= nmsix) {
1150 nmsix = sc->sc_nvqs + VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
1151 } else {
1152 nmsix = 2;
1153 }
1154
1155 max_type = PCI_INTR_TYPE_MSIX;
1156 counts[PCI_INTR_TYPE_MSIX] = nmsix;
1157 counts[PCI_INTR_TYPE_MSI] = 0;
1158 counts[PCI_INTR_TYPE_INTX] = 1;
1159 }
1160
1161 retry:
1162 error = pci_intr_alloc(&psc->sc_pa, &psc->sc_ihp, counts, max_type);
1163 if (error != 0) {
1164 aprint_error_dev(self, "couldn't map interrupt\n");
1165 return -1;
1166 }
1167
1168 if (pci_intr_type(pc, psc->sc_ihp[0]) == PCI_INTR_TYPE_MSIX) {
1169 psc->sc_intr_pervq = nmsix > 2 ? true : false;
1170 psc->sc_ihs = kmem_zalloc(sizeof(*psc->sc_ihs) * nmsix,
1171 KM_SLEEP);
1172
1173 error = virtio_pci_establish_msix_interrupts(sc, &psc->sc_pa);
1174 if (error != 0) {
1175 kmem_free(psc->sc_ihs, sizeof(*psc->sc_ihs) * nmsix);
1176 pci_intr_release(pc, psc->sc_ihp, nmsix);
1177
1178 /* Retry INTx */
1179 max_type = PCI_INTR_TYPE_INTX;
1180 counts[PCI_INTR_TYPE_INTX] = 1;
1181 goto retry;
1182 }
1183
1184 psc->sc_ihs_num = nmsix;
1185 psc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_MSI;
1186 virtio_pci_adjust_config_region(psc);
1187 } else if (pci_intr_type(pc, psc->sc_ihp[0]) == PCI_INTR_TYPE_INTX) {
1188 psc->sc_intr_pervq = false;
1189 psc->sc_ihs = kmem_zalloc(sizeof(*psc->sc_ihs) * 1,
1190 KM_SLEEP);
1191
1192 error = virtio_pci_establish_intx_interrupt(sc, &psc->sc_pa);
1193 if (error != 0) {
1194 kmem_free(psc->sc_ihs, sizeof(*psc->sc_ihs) * 1);
1195 pci_intr_release(pc, psc->sc_ihp, 1);
1196 return -1;
1197 }
1198
1199 psc->sc_ihs_num = 1;
1200 psc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
1201 virtio_pci_adjust_config_region(psc);
1202
1203 error = pci_get_capability(pc, tag, PCI_CAP_MSIX, &off, NULL);
1204 if (error != 0) {
1205 ctl = pci_conf_read(pc, tag, off + PCI_MSIX_CTL);
1206 ctl &= ~PCI_MSIX_CTL_ENABLE;
1207 pci_conf_write(pc, tag, off + PCI_MSIX_CTL, ctl);
1208 }
1209 }
1210
1211 if (!psc->sc_intr_pervq)
1212 CLR(sc->sc_flags, VIRTIO_F_INTR_PERVQ);
1213 return 0;
1214 }
1215
1216 static void
1217 virtio_pci_free_interrupts(struct virtio_softc *sc)
1218 {
1219 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
1220
1221 for (int i = 0; i < psc->sc_ihs_num; i++) {
1222 if (psc->sc_ihs[i] == NULL)
1223 continue;
1224 pci_intr_disestablish(psc->sc_pa.pa_pc, psc->sc_ihs[i]);
1225 psc->sc_ihs[i] = NULL;
1226 }
1227
1228 if (psc->sc_ihs_num > 0) {
1229 pci_intr_release(psc->sc_pa.pa_pc, psc->sc_ihp,
1230 psc->sc_ihs_num);
1231 }
1232
1233 if (psc->sc_ihs != NULL) {
1234 kmem_free(psc->sc_ihs, sizeof(*psc->sc_ihs) * psc->sc_ihs_num);
1235 psc->sc_ihs = NULL;
1236 }
1237 psc->sc_ihs_num = 0;
1238 }
1239
1240 static bool
1241 virtio_pci_msix_enabled(struct virtio_pci_softc *psc)
1242 {
1243 pci_chipset_tag_t pc = psc->sc_pa.pa_pc;
1244
1245 if (pci_intr_type(pc, psc->sc_ihp[0]) == PCI_INTR_TYPE_MSIX)
1246 return true;
1247
1248 return false;
1249 }
1250
1251 /*
1252 * Interrupt handler.
1253 */
1254 static int
1255 virtio_pci_intr(void *arg)
1256 {
1257 struct virtio_softc *sc = arg;
1258 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
1259 int isr, r = 0;
1260
1261 /* check and ack the interrupt */
1262 isr = bus_space_read_1(psc->sc_isr_iot, psc->sc_isr_ioh, 0);
1263 if (isr == 0)
1264 return 0;
1265 if ((isr & VIRTIO_CONFIG_ISR_CONFIG_CHANGE) &&
1266 (sc->sc_config_change != NULL))
1267 r = (sc->sc_config_change)(sc);
1268 if (sc->sc_intrhand != NULL) {
1269 if (sc->sc_soft_ih != NULL)
1270 softint_schedule(sc->sc_soft_ih);
1271 else
1272 r |= (sc->sc_intrhand)(sc);
1273 }
1274
1275 return r;
1276 }
1277
1278 static int
1279 virtio_pci_msix_queue_intr(void *arg)
1280 {
1281 struct virtio_softc *sc = arg;
1282 int r = 0;
1283
1284 if (sc->sc_intrhand != NULL) {
1285 if (sc->sc_soft_ih != NULL)
1286 softint_schedule(sc->sc_soft_ih);
1287 else
1288 r |= (sc->sc_intrhand)(sc);
1289 }
1290
1291 return r;
1292 }
1293
1294 static int
1295 virtio_pci_msix_config_intr(void *arg)
1296 {
1297 struct virtio_softc *sc = arg;
1298 int r = 0;
1299
1300 if (sc->sc_config_change != NULL)
1301 r = (sc->sc_config_change)(sc);
1302 return r;
1303 }
1304
1305 MODULE(MODULE_CLASS_DRIVER, virtio_pci, "pci,virtio");
1306
1307 #ifdef _MODULE
1308 #include "ioconf.c"
1309 #endif
1310
1311 static int
1312 virtio_pci_modcmd(modcmd_t cmd, void *opaque)
1313 {
1314 int error = 0;
1315
1316 #ifdef _MODULE
1317 switch (cmd) {
1318 case MODULE_CMD_INIT:
1319 error = config_init_component(cfdriver_ioconf_virtio_pci,
1320 cfattach_ioconf_virtio_pci, cfdata_ioconf_virtio_pci);
1321 break;
1322 case MODULE_CMD_FINI:
1323 error = config_fini_component(cfdriver_ioconf_virtio_pci,
1324 cfattach_ioconf_virtio_pci, cfdata_ioconf_virtio_pci);
1325 break;
1326 default:
1327 error = ENOTTY;
1328 break;
1329 }
1330 #endif
1331
1332 return error;
1333 }
1334