virtio_pci.c revision 1.46 1 /* $NetBSD: virtio_pci.c,v 1.46 2024/06/25 14:22:03 riastradh Exp $ */
2
3 /*
4 * Copyright (c) 2020 The NetBSD Foundation, Inc.
5 * Copyright (c) 2012 Stefan Fritsch.
6 * Copyright (c) 2010 Minoura Makoto.
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: virtio_pci.c,v 1.46 2024/06/25 14:22:03 riastradh Exp $");
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/kmem.h>
36 #include <sys/module.h>
37 #include <sys/endian.h>
38 #include <sys/interrupt.h>
39 #include <sys/syslog.h>
40
41 #include <sys/device.h>
42
43 #include <dev/pci/pcidevs.h>
44 #include <dev/pci/pcireg.h>
45 #include <dev/pci/pcivar.h>
46
47 #include <dev/pci/virtioreg.h> /* XXX: move to non-pci */
48 #include <dev/pci/virtio_pcireg.h>
49
50 #define VIRTIO_PRIVATE
51 #include <dev/pci/virtiovar.h> /* XXX: move to non-pci */
52
53 #if defined(__alpha__) || defined(__sparc64__)
54 /*
55 * XXX VIRTIO_F_ACCESS_PLATFORM is required for standard PCI DMA
56 * XXX to work on these platforms, at least by Qemu.
57 * XXX
58 * XXX Generalize this later.
59 */
60 #define __NEED_VIRTIO_F_ACCESS_PLATFORM
61 #endif /* __alpha__ || __sparc64__ */
62
63 #define VIRTIO_PCI_LOG(_sc, _use_log, _fmt, _args...) \
64 do { \
65 if ((_use_log)) { \
66 log(LOG_DEBUG, "%s: " _fmt, \
67 device_xname((_sc)->sc_dev), \
68 ##_args); \
69 } else { \
70 aprint_error_dev((_sc)->sc_dev, \
71 _fmt, ##_args); \
72 } \
73 } while(0)
74
75 static int virtio_pci_match(device_t, cfdata_t, void *);
76 static void virtio_pci_attach(device_t, device_t, void *);
77 static int virtio_pci_rescan(device_t, const char *, const int *);
78 static int virtio_pci_detach(device_t, int);
79
80
81 #define NMAPREG ((PCI_MAPREG_END - PCI_MAPREG_START) / \
82 sizeof(pcireg_t))
83 struct virtio_pci_softc {
84 struct virtio_softc sc_sc;
85 bool sc_intr_pervq;
86
87 /* IO space */
88 bus_space_tag_t sc_iot;
89 bus_space_handle_t sc_ioh;
90 bus_size_t sc_iosize;
91 bus_size_t sc_mapped_iosize;
92
93 /* BARs */
94 bus_space_tag_t sc_bars_iot[NMAPREG];
95 bus_space_handle_t sc_bars_ioh[NMAPREG];
96 bus_size_t sc_bars_iosize[NMAPREG];
97
98 /* notify space */
99 bus_space_tag_t sc_notify_iot;
100 bus_space_handle_t sc_notify_ioh;
101 bus_size_t sc_notify_iosize;
102 uint32_t sc_notify_off_multiplier;
103
104 /* isr space */
105 bus_space_tag_t sc_isr_iot;
106 bus_space_handle_t sc_isr_ioh;
107 bus_size_t sc_isr_iosize;
108
109 /* generic */
110 struct pci_attach_args sc_pa;
111 pci_intr_handle_t *sc_ihp;
112 void **sc_ihs;
113 int sc_ihs_num;
114 int sc_devcfg_offset; /* for 0.9 */
115 };
116
117 static int virtio_pci_attach_09(device_t, void *);
118 static void virtio_pci_kick_09(struct virtio_softc *, uint16_t);
119 static uint16_t virtio_pci_read_queue_size_09(struct virtio_softc *, uint16_t);
120 static void virtio_pci_setup_queue_09(struct virtio_softc *, uint16_t, uint64_t);
121 static void virtio_pci_set_status_09(struct virtio_softc *, int);
122 static void virtio_pci_negotiate_features_09(struct virtio_softc *, uint64_t);
123
124 static int virtio_pci_attach_10(device_t, void *);
125 static void virtio_pci_kick_10(struct virtio_softc *, uint16_t);
126 static uint16_t virtio_pci_read_queue_size_10(struct virtio_softc *, uint16_t);
127 static void virtio_pci_setup_queue_10(struct virtio_softc *, uint16_t, uint64_t);
128 static void virtio_pci_set_status_10(struct virtio_softc *, int);
129 static void virtio_pci_negotiate_features_10(struct virtio_softc *, uint64_t);
130 static int virtio_pci_find_cap(struct virtio_pci_softc *psc, int cfg_type, void *buf, int buflen);
131
132 static int virtio_pci_alloc_interrupts(struct virtio_softc *);
133 static void virtio_pci_free_interrupts(struct virtio_softc *);
134 static int virtio_pci_adjust_config_region(struct virtio_pci_softc *psc);
135 static int virtio_pci_intr(void *arg);
136 static int virtio_pci_msix_queue_intr(void *);
137 static int virtio_pci_msix_config_intr(void *);
138 static int virtio_pci_setup_interrupts_09(struct virtio_softc *, int);
139 static int virtio_pci_setup_interrupts_10(struct virtio_softc *, int);
140 static int virtio_pci_establish_msix_interrupts(struct virtio_softc *,
141 struct pci_attach_args *);
142 static int virtio_pci_establish_intx_interrupt(struct virtio_softc *,
143 struct pci_attach_args *);
144 static bool virtio_pci_msix_enabled(struct virtio_pci_softc *);
145
146 #define VIRTIO_MSIX_CONFIG_VECTOR_INDEX 0
147 #define VIRTIO_MSIX_QUEUE_VECTOR_INDEX 1
148
149 /*
150 * For big-endian aarch64/armv7 on QEMU (and most real HW), only CPU cores
151 * are running in big-endian mode, with all peripheral being configured to
152 * little-endian mode. Their default bus_space(9) functions forcibly swap
153 * byte-order. This guarantees that PIO'ed data from pci(4), e.g., are
154 * correctly handled by bus_space(9), while DMA'ed ones should be swapped
155 * by hand, in violation of virtio(4) specifications.
156 */
157
158 #if (defined(__aarch64__) || defined(__arm__)) && BYTE_ORDER == BIG_ENDIAN
159 # define READ_ENDIAN_09 BIG_ENDIAN
160 # define READ_ENDIAN_10 BIG_ENDIAN
161 # define STRUCT_ENDIAN_09 BIG_ENDIAN
162 # define STRUCT_ENDIAN_10 LITTLE_ENDIAN
163 #elif BYTE_ORDER == BIG_ENDIAN
164 # define READ_ENDIAN_09 LITTLE_ENDIAN
165 # define READ_ENDIAN_10 BIG_ENDIAN
166 # define STRUCT_ENDIAN_09 BIG_ENDIAN
167 # define STRUCT_ENDIAN_10 LITTLE_ENDIAN
168 #else /* little endian */
169 # define READ_ENDIAN_09 LITTLE_ENDIAN
170 # define READ_ENDIAN_10 LITTLE_ENDIAN
171 # define STRUCT_ENDIAN_09 LITTLE_ENDIAN
172 # define STRUCT_ENDIAN_10 LITTLE_ENDIAN
173 #endif
174
175
176 CFATTACH_DECL3_NEW(virtio_pci, sizeof(struct virtio_pci_softc),
177 virtio_pci_match, virtio_pci_attach, virtio_pci_detach, NULL,
178 virtio_pci_rescan, NULL, DVF_DETACH_SHUTDOWN);
179
180 static const struct virtio_ops virtio_pci_ops_09 = {
181 .kick = virtio_pci_kick_09,
182 .read_queue_size = virtio_pci_read_queue_size_09,
183 .setup_queue = virtio_pci_setup_queue_09,
184 .set_status = virtio_pci_set_status_09,
185 .neg_features = virtio_pci_negotiate_features_09,
186 .alloc_interrupts = virtio_pci_alloc_interrupts,
187 .free_interrupts = virtio_pci_free_interrupts,
188 .setup_interrupts = virtio_pci_setup_interrupts_09,
189 };
190
191 static const struct virtio_ops virtio_pci_ops_10 = {
192 .kick = virtio_pci_kick_10,
193 .read_queue_size = virtio_pci_read_queue_size_10,
194 .setup_queue = virtio_pci_setup_queue_10,
195 .set_status = virtio_pci_set_status_10,
196 .neg_features = virtio_pci_negotiate_features_10,
197 .alloc_interrupts = virtio_pci_alloc_interrupts,
198 .free_interrupts = virtio_pci_free_interrupts,
199 .setup_interrupts = virtio_pci_setup_interrupts_10,
200 };
201
202 static int
203 virtio_pci_match(device_t parent, cfdata_t match, void *aux)
204 {
205 struct pci_attach_args *pa;
206
207 pa = (struct pci_attach_args *)aux;
208 switch (PCI_VENDOR(pa->pa_id)) {
209 case PCI_VENDOR_QUMRANET:
210 /* Transitional devices MUST have a PCI Revision ID of 0. */
211 if (((PCI_PRODUCT_QUMRANET_VIRTIO_1000 <=
212 PCI_PRODUCT(pa->pa_id)) &&
213 (PCI_PRODUCT(pa->pa_id) <=
214 PCI_PRODUCT_QUMRANET_VIRTIO_103F)) &&
215 PCI_REVISION(pa->pa_class) == 0)
216 return 1;
217 /*
218 * Non-transitional devices SHOULD have a PCI Revision
219 * ID of 1 or higher. Drivers MUST match any PCI
220 * Revision ID value.
221 */
222 if (((PCI_PRODUCT_QUMRANET_VIRTIO_1040 <=
223 PCI_PRODUCT(pa->pa_id)) &&
224 (PCI_PRODUCT(pa->pa_id) <=
225 PCI_PRODUCT_QUMRANET_VIRTIO_107F)) &&
226 /* XXX: TODO */
227 PCI_REVISION(pa->pa_class) == 1)
228 return 1;
229 break;
230 }
231
232 return 0;
233 }
234
235 static void
236 virtio_pci_attach(device_t parent, device_t self, void *aux)
237 {
238 struct virtio_pci_softc * const psc = device_private(self);
239 struct virtio_softc * const sc = &psc->sc_sc;
240 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
241 pci_chipset_tag_t pc = pa->pa_pc;
242 pcitag_t tag = pa->pa_tag;
243 int revision;
244 int ret;
245 pcireg_t id;
246 pcireg_t csr;
247
248 revision = PCI_REVISION(pa->pa_class);
249 switch (revision) {
250 case 0:
251 /* subsystem ID shows what I am */
252 id = PCI_SUBSYS_ID(pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG));
253 break;
254 case 1:
255 /* pci product number shows what I am */
256 id = PCI_PRODUCT(pa->pa_id) - PCI_PRODUCT_QUMRANET_VIRTIO_1040;
257 break;
258 default:
259 aprint_normal(": unknown revision 0x%02x; giving up\n",
260 revision);
261 return;
262 }
263
264 aprint_normal("\n");
265 aprint_naive("\n");
266 virtio_print_device_type(self, id, revision);
267
268 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
269 csr |= PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_IO_ENABLE;
270 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
271
272 sc->sc_dev = self;
273 psc->sc_pa = *pa;
274 psc->sc_iot = pa->pa_iot;
275
276 sc->sc_dmat = pa->pa_dmat;
277 if (pci_dma64_available(pa))
278 sc->sc_dmat = pa->pa_dmat64;
279
280 /* attach is dependent on revision */
281 ret = 0;
282 if (revision == 1) {
283 /* try to attach 1.0 */
284 ret = virtio_pci_attach_10(self, aux);
285 }
286 if (ret == 0 && revision == 0) {
287 /*
288 * revision 0 means 0.9 only or both 0.9 and 1.0. The
289 * latter are so-called "Transitional Devices". For
290 * those devices, we want to use the 1.0 interface if
291 * possible.
292 *
293 * XXX Currently only on platforms that require 1.0
294 * XXX features, such as VIRTIO_F_ACCESS_PLATFORM.
295 */
296 #ifdef __NEED_VIRTIO_F_ACCESS_PLATFORM
297 /* First, try to attach 1.0 */
298 ret = virtio_pci_attach_10(self, aux);
299 if (ret != 0) {
300 aprint_error_dev(self,
301 "VirtIO 1.0 error = %d, falling back to 0.9\n",
302 ret);
303 /* Fall back to 0.9. */
304 ret = virtio_pci_attach_09(self, aux);
305 }
306 #else
307 ret = virtio_pci_attach_09(self, aux);
308 #endif /* __NEED_VIRTIO_F_ACCESS_PLATFORM */
309 }
310 if (ret) {
311 aprint_error_dev(self, "cannot attach (%d)\n", ret);
312 return;
313 }
314 KASSERT(sc->sc_ops);
315
316 /* preset config region */
317 psc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
318 if (virtio_pci_adjust_config_region(psc))
319 return;
320
321 /* generic */
322 virtio_device_reset(sc);
323 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_ACK);
324 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER);
325
326 sc->sc_childdevid = id;
327 sc->sc_child = NULL;
328 virtio_pci_rescan(self, NULL, NULL);
329 return;
330 }
331
332 /* ARGSUSED */
333 static int
334 virtio_pci_rescan(device_t self, const char *ifattr, const int *locs)
335 {
336 struct virtio_pci_softc * const psc = device_private(self);
337 struct virtio_softc * const sc = &psc->sc_sc;
338 struct virtio_attach_args va;
339
340 if (sc->sc_child) /* Child already attached? */
341 return 0;
342
343 memset(&va, 0, sizeof(va));
344 va.sc_childdevid = sc->sc_childdevid;
345
346 config_found(self, &va, NULL, CFARGS_NONE);
347
348 if (virtio_attach_failed(sc))
349 return 0;
350
351 return 0;
352 }
353
354
355 static int
356 virtio_pci_detach(device_t self, int flags)
357 {
358 struct virtio_pci_softc * const psc = device_private(self);
359 struct virtio_softc * const sc = &psc->sc_sc;
360 unsigned i;
361 int r;
362
363 r = config_detach_children(self, flags);
364 if (r != 0)
365 return r;
366
367 /* Check that child never attached, or detached properly */
368 KASSERT(sc->sc_child == NULL);
369 KASSERT(sc->sc_vqs == NULL);
370 KASSERT(psc->sc_ihs_num == 0);
371
372 if (sc->sc_version_1) {
373 for (i = 0; i < __arraycount(psc->sc_bars_iot); i++) {
374 if (psc->sc_bars_iosize[i] == 0)
375 continue;
376 bus_space_unmap(psc->sc_bars_iot[i],
377 psc->sc_bars_ioh[i], psc->sc_bars_iosize[i]);
378 psc->sc_bars_iosize[i] = 0;
379 }
380 } else {
381 if (psc->sc_iosize) {
382 bus_space_unmap(psc->sc_iot, psc->sc_ioh,
383 psc->sc_mapped_iosize);
384 psc->sc_iosize = 0;
385 }
386 }
387
388 return 0;
389 }
390
391
392 static int
393 virtio_pci_attach_09(device_t self, void *aux)
394 //struct virtio_pci_softc *psc, struct pci_attach_args *pa)
395 {
396 struct virtio_pci_softc * const psc = device_private(self);
397 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
398 struct virtio_softc * const sc = &psc->sc_sc;
399 // pci_chipset_tag_t pc = pa->pa_pc;
400 // pcitag_t tag = pa->pa_tag;
401
402 /* complete IO region */
403 if (pci_mapreg_map(pa, PCI_MAPREG_START, PCI_MAPREG_TYPE_IO, 0,
404 &psc->sc_iot, &psc->sc_ioh, NULL, &psc->sc_iosize)) {
405 aprint_error_dev(self, "can't map i/o space\n");
406 return EIO;
407 }
408 psc->sc_mapped_iosize = psc->sc_iosize;
409
410 /* queue space */
411 if (bus_space_subregion(psc->sc_iot, psc->sc_ioh,
412 VIRTIO_CONFIG_QUEUE_NOTIFY, 2, &psc->sc_notify_ioh)) {
413 aprint_error_dev(self, "can't map notify i/o space\n");
414 return EIO;
415 }
416 psc->sc_notify_iosize = 2;
417 psc->sc_notify_iot = psc->sc_iot;
418
419 /* ISR space */
420 if (bus_space_subregion(psc->sc_iot, psc->sc_ioh,
421 VIRTIO_CONFIG_ISR_STATUS, 1, &psc->sc_isr_ioh)) {
422 aprint_error_dev(self, "can't map isr i/o space\n");
423 return EIO;
424 }
425 psc->sc_isr_iosize = 1;
426 psc->sc_isr_iot = psc->sc_iot;
427
428 /* set our version 0.9 ops */
429 sc->sc_ops = &virtio_pci_ops_09;
430 sc->sc_bus_endian = READ_ENDIAN_09;
431 sc->sc_struct_endian = STRUCT_ENDIAN_09;
432 return 0;
433 }
434
435
436 static int
437 virtio_pci_attach_10(device_t self, void *aux)
438 {
439 struct virtio_pci_softc * const psc = device_private(self);
440 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
441 struct virtio_softc * const sc = &psc->sc_sc;
442 pci_chipset_tag_t pc = pa->pa_pc;
443 pcitag_t tag = pa->pa_tag;
444
445 struct virtio_pci_cap common, isr, device;
446 struct virtio_pci_notify_cap notify;
447 int have_device_cfg = 0;
448 bus_size_t bars[NMAPREG] = { 0 };
449 int bars_idx[NMAPREG] = { 0 };
450 struct virtio_pci_cap *caps[] = { &common, &isr, &device, ¬ify.cap };
451 int i, j, ret = 0;
452
453 if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_COMMON_CFG,
454 &common, sizeof(common)))
455 return ENODEV;
456 if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_NOTIFY_CFG,
457 ¬ify, sizeof(notify)))
458 return ENODEV;
459 if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_ISR_CFG,
460 &isr, sizeof(isr)))
461 return ENODEV;
462 if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_DEVICE_CFG,
463 &device, sizeof(device)))
464 memset(&device, 0, sizeof(device));
465 else
466 have_device_cfg = 1;
467
468 /* Figure out which bars we need to map */
469 for (i = 0; i < __arraycount(caps); i++) {
470 int bar = caps[i]->bar;
471 bus_size_t len = caps[i]->offset + caps[i]->length;
472 if (caps[i]->length == 0)
473 continue;
474 if (bars[bar] < len)
475 bars[bar] = len;
476 }
477
478 for (i = j = 0; i < __arraycount(bars); i++) {
479 int reg;
480 pcireg_t type;
481 if (bars[i] == 0)
482 continue;
483 reg = PCI_BAR(i);
484 type = pci_mapreg_type(pc, tag, reg);
485 if (pci_mapreg_map(pa, reg, type, 0,
486 &psc->sc_bars_iot[j], &psc->sc_bars_ioh[j],
487 NULL, &psc->sc_bars_iosize[j])) {
488 aprint_error_dev(self, "can't map bar %u \n", i);
489 ret = EIO;
490 goto err;
491 }
492 aprint_debug_dev(self,
493 "bar[%d]: iot %p, size 0x%" PRIxBUSSIZE "\n",
494 j, psc->sc_bars_iot[j], psc->sc_bars_iosize[j]);
495 bars_idx[i] = j;
496 j++;
497 }
498
499 i = bars_idx[notify.cap.bar];
500 if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i],
501 notify.cap.offset, notify.cap.length,
502 &psc->sc_notify_ioh)) {
503 aprint_error_dev(self, "can't map notify i/o space\n");
504 ret = EIO;
505 goto err;
506 }
507 psc->sc_notify_iosize = notify.cap.length;
508 psc->sc_notify_iot = psc->sc_bars_iot[i];
509 psc->sc_notify_off_multiplier = le32toh(notify.notify_off_multiplier);
510
511 if (have_device_cfg) {
512 i = bars_idx[device.bar];
513 if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i],
514 device.offset, device.length,
515 &sc->sc_devcfg_ioh)) {
516 aprint_error_dev(self, "can't map devcfg i/o space\n");
517 ret = EIO;
518 goto err;
519 }
520 aprint_debug_dev(self,
521 "device.offset = 0x%x, device.length = 0x%x\n",
522 device.offset, device.length);
523 sc->sc_devcfg_iosize = device.length;
524 sc->sc_devcfg_iot = psc->sc_bars_iot[i];
525 }
526
527 i = bars_idx[isr.bar];
528 if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i],
529 isr.offset, isr.length, &psc->sc_isr_ioh)) {
530 aprint_error_dev(self, "can't map isr i/o space\n");
531 ret = EIO;
532 goto err;
533 }
534 psc->sc_isr_iosize = isr.length;
535 psc->sc_isr_iot = psc->sc_bars_iot[i];
536
537 i = bars_idx[common.bar];
538 if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i],
539 common.offset, common.length, &psc->sc_ioh)) {
540 aprint_error_dev(self, "can't map common i/o space\n");
541 ret = EIO;
542 goto err;
543 }
544 psc->sc_iosize = common.length;
545 psc->sc_iot = psc->sc_bars_iot[i];
546 psc->sc_mapped_iosize = psc->sc_bars_iosize[i];
547
548 psc->sc_sc.sc_version_1 = 1;
549
550 /* set our version 1.0 ops */
551 sc->sc_ops = &virtio_pci_ops_10;
552 sc->sc_bus_endian = READ_ENDIAN_10;
553 sc->sc_struct_endian = STRUCT_ENDIAN_10;
554 return 0;
555
556 err:
557 /* undo our pci_mapreg_map()s */
558 for (i = 0; i < __arraycount(bars); i++) {
559 if (psc->sc_bars_iosize[i] == 0)
560 continue;
561 bus_space_unmap(psc->sc_bars_iot[i], psc->sc_bars_ioh[i],
562 psc->sc_bars_iosize[i]);
563 }
564 return ret;
565 }
566
567 /* v1.0 attach helper */
568 static int
569 virtio_pci_find_cap(struct virtio_pci_softc *psc, int cfg_type, void *buf, int buflen)
570 {
571 device_t self = psc->sc_sc.sc_dev;
572 pci_chipset_tag_t pc = psc->sc_pa.pa_pc;
573 pcitag_t tag = psc->sc_pa.pa_tag;
574 unsigned int offset, i, len;
575 union {
576 pcireg_t reg[8];
577 struct virtio_pci_cap vcap;
578 } *v = buf;
579
580 if (buflen < sizeof(struct virtio_pci_cap))
581 return ERANGE;
582
583 if (!pci_get_capability(pc, tag, PCI_CAP_VENDSPEC, &offset, &v->reg[0]))
584 return ENOENT;
585
586 do {
587 for (i = 0; i < 4; i++)
588 v->reg[i] =
589 le32toh(pci_conf_read(pc, tag, offset + i * 4));
590 if (v->vcap.cfg_type == cfg_type)
591 break;
592 offset = v->vcap.cap_next;
593 } while (offset != 0);
594
595 if (offset == 0)
596 return ENOENT;
597
598 if (v->vcap.cap_len > sizeof(struct virtio_pci_cap)) {
599 len = roundup(v->vcap.cap_len, sizeof(pcireg_t));
600 if (len > buflen) {
601 aprint_error_dev(self, "%s cap too large\n", __func__);
602 return ERANGE;
603 }
604 for (i = 4; i < len / sizeof(pcireg_t); i++)
605 v->reg[i] =
606 le32toh(pci_conf_read(pc, tag, offset + i * 4));
607 }
608
609 /* endian fixup */
610 v->vcap.offset = le32toh(v->vcap.offset);
611 v->vcap.length = le32toh(v->vcap.length);
612 return 0;
613 }
614
615
616 /* -------------------------------------
617 * Version 0.9 support
618 * -------------------------------------*/
619
620 static void
621 virtio_pci_kick_09(struct virtio_softc *sc, uint16_t idx)
622 {
623 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
624
625 bus_space_write_2(psc->sc_notify_iot, psc->sc_notify_ioh, 0, idx);
626 }
627
628 /* only applicable for v 0.9 but also called for 1.0 */
629 static int
630 virtio_pci_adjust_config_region(struct virtio_pci_softc *psc)
631 {
632 struct virtio_softc * const sc = &psc->sc_sc;
633 device_t self = sc->sc_dev;
634
635 if (psc->sc_sc.sc_version_1)
636 return 0;
637
638 sc->sc_devcfg_iosize = psc->sc_iosize - psc->sc_devcfg_offset;
639 sc->sc_devcfg_iot = psc->sc_iot;
640 if (bus_space_subregion(psc->sc_iot, psc->sc_ioh,
641 psc->sc_devcfg_offset, sc->sc_devcfg_iosize,
642 &sc->sc_devcfg_ioh)) {
643 aprint_error_dev(self, "can't map config i/o space\n");
644 return EIO;
645 }
646
647 return 0;
648 }
649
650 static uint16_t
651 virtio_pci_read_queue_size_09(struct virtio_softc *sc, uint16_t idx)
652 {
653 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
654
655 bus_space_write_2(psc->sc_iot, psc->sc_ioh,
656 VIRTIO_CONFIG_QUEUE_SELECT, idx);
657 return bus_space_read_2(psc->sc_iot, psc->sc_ioh,
658 VIRTIO_CONFIG_QUEUE_SIZE);
659 }
660
661 static void
662 virtio_pci_setup_queue_09(struct virtio_softc *sc, uint16_t idx, uint64_t addr)
663 {
664 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
665
666 bus_space_write_2(psc->sc_iot, psc->sc_ioh,
667 VIRTIO_CONFIG_QUEUE_SELECT, idx);
668 bus_space_write_4(psc->sc_iot, psc->sc_ioh,
669 VIRTIO_CONFIG_QUEUE_ADDRESS, addr / VIRTIO_PAGE_SIZE);
670
671 if (psc->sc_ihs_num > 1) {
672 int vec = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
673 if (psc->sc_intr_pervq)
674 vec += idx;
675 bus_space_write_2(psc->sc_iot, psc->sc_ioh,
676 VIRTIO_CONFIG_MSI_QUEUE_VECTOR, vec);
677 }
678 }
679
680 static void
681 virtio_pci_set_status_09(struct virtio_softc *sc, int status)
682 {
683 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
684 int old = 0;
685
686 if (status != 0) {
687 old = bus_space_read_1(psc->sc_iot, psc->sc_ioh,
688 VIRTIO_CONFIG_DEVICE_STATUS);
689 }
690 bus_space_write_1(psc->sc_iot, psc->sc_ioh,
691 VIRTIO_CONFIG_DEVICE_STATUS, status|old);
692 }
693
694 static void
695 virtio_pci_negotiate_features_09(struct virtio_softc *sc, uint64_t guest_features)
696 {
697 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
698 uint32_t r;
699
700 r = bus_space_read_4(psc->sc_iot, psc->sc_ioh,
701 VIRTIO_CONFIG_DEVICE_FEATURES);
702
703 r &= guest_features;
704
705 bus_space_write_4(psc->sc_iot, psc->sc_ioh,
706 VIRTIO_CONFIG_GUEST_FEATURES, r);
707
708 sc->sc_active_features = r;
709 }
710
711 /* -------------------------------------
712 * Version 1.0 support
713 * -------------------------------------*/
714
715 static void
716 virtio_pci_kick_10(struct virtio_softc *sc, uint16_t idx)
717 {
718 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
719 unsigned offset = sc->sc_vqs[idx].vq_notify_off *
720 psc->sc_notify_off_multiplier;
721
722 bus_space_write_2(psc->sc_notify_iot, psc->sc_notify_ioh, offset, idx);
723 }
724
725
726 static uint16_t
727 virtio_pci_read_queue_size_10(struct virtio_softc *sc, uint16_t idx)
728 {
729 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
730 bus_space_tag_t iot = psc->sc_iot;
731 bus_space_handle_t ioh = psc->sc_ioh;
732
733 bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SELECT, idx);
734 return bus_space_read_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SIZE);
735 }
736
737 /*
738 * By definition little endian only in v1.0. NB: "MAY" in the text
739 * below refers to "independently" (i.e. the order of accesses) not
740 * "32-bit" (which is restricted by the earlier "MUST").
741 *
742 * 4.1.3.1 Driver Requirements: PCI Device Layout
743 *
744 * For device configuration access, the driver MUST use ... 32-bit
745 * wide and aligned accesses for ... 64-bit wide fields. For 64-bit
746 * fields, the driver MAY access each of the high and low 32-bit parts
747 * of the field independently.
748 */
749 static __inline void
750 virtio_pci_bus_space_write_8(bus_space_tag_t iot, bus_space_handle_t ioh,
751 bus_size_t offset, uint64_t value)
752 {
753 #if _QUAD_HIGHWORD
754 bus_space_write_4(iot, ioh, offset, BUS_ADDR_LO32(value));
755 bus_space_write_4(iot, ioh, offset + 4, BUS_ADDR_HI32(value));
756 #else
757 bus_space_write_4(iot, ioh, offset, BUS_ADDR_HI32(value));
758 bus_space_write_4(iot, ioh, offset + 4, BUS_ADDR_LO32(value));
759 #endif
760 }
761
762 static void
763 virtio_pci_setup_queue_10(struct virtio_softc *sc, uint16_t idx, uint64_t addr)
764 {
765 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
766 struct virtqueue *vq = &sc->sc_vqs[idx];
767 bus_space_tag_t iot = psc->sc_iot;
768 bus_space_handle_t ioh = psc->sc_ioh;
769 KASSERT(vq->vq_index == idx);
770
771 bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SELECT, vq->vq_index);
772 if (addr == 0) {
773 bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_ENABLE, 0);
774 virtio_pci_bus_space_write_8(iot, ioh,
775 VIRTIO_CONFIG1_QUEUE_DESC, 0);
776 virtio_pci_bus_space_write_8(iot, ioh,
777 VIRTIO_CONFIG1_QUEUE_AVAIL, 0);
778 virtio_pci_bus_space_write_8(iot, ioh,
779 VIRTIO_CONFIG1_QUEUE_USED, 0);
780 } else {
781 virtio_pci_bus_space_write_8(iot, ioh,
782 VIRTIO_CONFIG1_QUEUE_DESC, addr);
783 virtio_pci_bus_space_write_8(iot, ioh,
784 VIRTIO_CONFIG1_QUEUE_AVAIL, addr + vq->vq_availoffset);
785 virtio_pci_bus_space_write_8(iot, ioh,
786 VIRTIO_CONFIG1_QUEUE_USED, addr + vq->vq_usedoffset);
787 bus_space_write_2(iot, ioh,
788 VIRTIO_CONFIG1_QUEUE_ENABLE, 1);
789 vq->vq_notify_off = bus_space_read_2(iot, ioh,
790 VIRTIO_CONFIG1_QUEUE_NOTIFY_OFF);
791 }
792
793 if (psc->sc_ihs_num > 1) {
794 int vec = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
795 if (psc->sc_intr_pervq)
796 vec += idx;
797 bus_space_write_2(iot, ioh,
798 VIRTIO_CONFIG1_QUEUE_MSIX_VECTOR, vec);
799 }
800 }
801
802 static void
803 virtio_pci_set_status_10(struct virtio_softc *sc, int status)
804 {
805 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
806 bus_space_tag_t iot = psc->sc_iot;
807 bus_space_handle_t ioh = psc->sc_ioh;
808 int old = 0;
809
810 if (status)
811 old = bus_space_read_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS);
812 bus_space_write_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS, status | old);
813 }
814
815 void
816 virtio_pci_negotiate_features_10(struct virtio_softc *sc, uint64_t guest_features)
817 {
818 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
819 device_t self = sc->sc_dev;
820 bus_space_tag_t iot = psc->sc_iot;
821 bus_space_handle_t ioh = psc->sc_ioh;
822 uint64_t host, negotiated, device_status;
823
824 guest_features |= VIRTIO_F_VERSION_1;
825 #ifdef __NEED_VIRTIO_F_ACCESS_PLATFORM
826 /* XXX This could use some work. */
827 guest_features |= VIRTIO_F_ACCESS_PLATFORM;
828 #endif /* __NEED_VIRTIO_F_ACCESS_PLATFORM */
829 /* notify on empty is 0.9 only */
830 guest_features &= ~VIRTIO_F_NOTIFY_ON_EMPTY;
831 sc->sc_active_features = 0;
832
833 bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE_SELECT, 0);
834 host = bus_space_read_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE);
835 bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE_SELECT, 1);
836 host |= (uint64_t)
837 bus_space_read_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE) << 32;
838
839 negotiated = host & guest_features;
840
841 bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE_SELECT, 0);
842 bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE,
843 negotiated & 0xffffffff);
844 bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE_SELECT, 1);
845 bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE,
846 negotiated >> 32);
847 virtio_pci_set_status_10(sc, VIRTIO_CONFIG_DEVICE_STATUS_FEATURES_OK);
848
849 device_status = bus_space_read_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS);
850 if ((device_status & VIRTIO_CONFIG_DEVICE_STATUS_FEATURES_OK) == 0) {
851 aprint_error_dev(self, "feature negotiation failed\n");
852 bus_space_write_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS,
853 VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
854 return;
855 }
856
857 if ((negotiated & VIRTIO_F_VERSION_1) == 0) {
858 aprint_error_dev(self, "host rejected version 1\n");
859 bus_space_write_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS,
860 VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
861 return;
862 }
863
864 sc->sc_active_features = negotiated;
865 return;
866 }
867
868
869 /* -------------------------------------
870 * Generic PCI interrupt code
871 * -------------------------------------*/
872
873 static int
874 virtio_pci_setup_interrupts_10(struct virtio_softc *sc, int reinit)
875 {
876 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
877 bus_space_tag_t iot = psc->sc_iot;
878 bus_space_handle_t ioh = psc->sc_ioh;
879 int vector, ret, qid;
880
881 if (!virtio_pci_msix_enabled(psc))
882 return 0;
883
884 vector = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
885 bus_space_write_2(iot, ioh,
886 VIRTIO_CONFIG1_CONFIG_MSIX_VECTOR, vector);
887 ret = bus_space_read_2(iot, ioh, VIRTIO_CONFIG1_CONFIG_MSIX_VECTOR);
888 if (ret != vector) {
889 VIRTIO_PCI_LOG(sc, reinit,
890 "can't set config msix vector\n");
891 return -1;
892 }
893
894 for (qid = 0; qid < sc->sc_nvqs; qid++) {
895 vector = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
896
897 if (psc->sc_intr_pervq)
898 vector += qid;
899 bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SELECT, qid);
900 bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_MSIX_VECTOR,
901 vector);
902 ret = bus_space_read_2(iot, ioh,
903 VIRTIO_CONFIG1_QUEUE_MSIX_VECTOR);
904 if (ret != vector) {
905 VIRTIO_PCI_LOG(sc, reinit, "can't set queue %d "
906 "msix vector\n", qid);
907 return -1;
908 }
909 }
910
911 return 0;
912 }
913
914 static int
915 virtio_pci_setup_interrupts_09(struct virtio_softc *sc, int reinit)
916 {
917 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
918 int offset, vector, ret, qid;
919
920 if (!virtio_pci_msix_enabled(psc))
921 return 0;
922
923 offset = VIRTIO_CONFIG_MSI_CONFIG_VECTOR;
924 vector = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
925
926 bus_space_write_2(psc->sc_iot, psc->sc_ioh, offset, vector);
927 ret = bus_space_read_2(psc->sc_iot, psc->sc_ioh, offset);
928 if (ret != vector) {
929 aprint_debug_dev(sc->sc_dev, "%s: expected=%d, actual=%d\n",
930 __func__, vector, ret);
931 VIRTIO_PCI_LOG(sc, reinit,
932 "can't set config msix vector\n");
933 return -1;
934 }
935
936 for (qid = 0; qid < sc->sc_nvqs; qid++) {
937 offset = VIRTIO_CONFIG_QUEUE_SELECT;
938 bus_space_write_2(psc->sc_iot, psc->sc_ioh, offset, qid);
939
940 offset = VIRTIO_CONFIG_MSI_QUEUE_VECTOR;
941 vector = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
942
943 if (psc->sc_intr_pervq)
944 vector += qid;
945
946 bus_space_write_2(psc->sc_iot, psc->sc_ioh, offset, vector);
947 ret = bus_space_read_2(psc->sc_iot, psc->sc_ioh, offset);
948 if (ret != vector) {
949 aprint_debug_dev(sc->sc_dev, "%s[qid=%d]:"
950 " expected=%d, actual=%d\n",
951 __func__, qid, vector, ret);
952 VIRTIO_PCI_LOG(sc, reinit, "can't set queue %d "
953 "msix vector\n", qid);
954 return -1;
955 }
956 }
957
958 return 0;
959 }
960
961 static int
962 virtio_pci_establish_msix_interrupts(struct virtio_softc *sc,
963 struct pci_attach_args *pa)
964 {
965 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
966 device_t self = sc->sc_dev;
967 pci_chipset_tag_t pc = pa->pa_pc;
968 struct virtqueue *vq;
969 char intrbuf[PCI_INTRSTR_LEN];
970 char intr_xname[INTRDEVNAMEBUF];
971 char const *intrstr;
972 int idx, qid, n;
973
974 idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
975 if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE)
976 pci_intr_setattr(pc, &psc->sc_ihp[idx], PCI_INTR_MPSAFE, true);
977
978 snprintf(intr_xname, sizeof(intr_xname), "%s config",
979 device_xname(sc->sc_dev));
980
981 psc->sc_ihs[idx] = pci_intr_establish_xname(pc, psc->sc_ihp[idx],
982 sc->sc_ipl, virtio_pci_msix_config_intr, sc, intr_xname);
983 if (psc->sc_ihs[idx] == NULL) {
984 aprint_error_dev(self, "couldn't establish MSI-X for config\n");
985 goto error;
986 }
987
988 idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
989 if (psc->sc_intr_pervq) {
990 for (qid = 0; qid < sc->sc_nvqs; qid++) {
991 n = idx + qid;
992 vq = &sc->sc_vqs[qid];
993
994 snprintf(intr_xname, sizeof(intr_xname), "%s vq#%d",
995 device_xname(sc->sc_dev), qid);
996
997 if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE) {
998 pci_intr_setattr(pc, &psc->sc_ihp[n],
999 PCI_INTR_MPSAFE, true);
1000 }
1001
1002 psc->sc_ihs[n] = pci_intr_establish_xname(pc, psc->sc_ihp[n],
1003 sc->sc_ipl, vq->vq_intrhand, vq->vq_intrhand_arg, intr_xname);
1004 if (psc->sc_ihs[n] == NULL) {
1005 aprint_error_dev(self, "couldn't establish MSI-X for a vq\n");
1006 goto error;
1007 }
1008 }
1009 } else {
1010 if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE)
1011 pci_intr_setattr(pc, &psc->sc_ihp[idx], PCI_INTR_MPSAFE, true);
1012
1013 snprintf(intr_xname, sizeof(intr_xname), "%s queues",
1014 device_xname(sc->sc_dev));
1015 psc->sc_ihs[idx] = pci_intr_establish_xname(pc, psc->sc_ihp[idx],
1016 sc->sc_ipl, virtio_pci_msix_queue_intr, sc, intr_xname);
1017 if (psc->sc_ihs[idx] == NULL) {
1018 aprint_error_dev(self, "couldn't establish MSI-X for queues\n");
1019 goto error;
1020 }
1021 }
1022
1023 idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
1024 intrstr = pci_intr_string(pc, psc->sc_ihp[idx], intrbuf, sizeof(intrbuf));
1025 aprint_normal_dev(self, "config interrupting at %s\n", intrstr);
1026 idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
1027 if (psc->sc_intr_pervq) {
1028 kcpuset_t *affinity;
1029 int affinity_to, r;
1030
1031 kcpuset_create(&affinity, false);
1032
1033 for (qid = 0; qid < sc->sc_nvqs; qid++) {
1034 n = idx + qid;
1035 affinity_to = (qid / 2) % ncpu;
1036
1037 intrstr = pci_intr_string(pc, psc->sc_ihp[n],
1038 intrbuf, sizeof(intrbuf));
1039
1040 kcpuset_zero(affinity);
1041 kcpuset_set(affinity, affinity_to);
1042 r = interrupt_distribute(psc->sc_ihs[n], affinity, NULL);
1043 if (r == 0) {
1044 aprint_normal_dev(self,
1045 "for vq #%d interrupting at %s affinity to %u\n",
1046 qid, intrstr, affinity_to);
1047 } else {
1048 aprint_normal_dev(self,
1049 "for vq #%d interrupting at %s\n",
1050 qid, intrstr);
1051 }
1052 }
1053
1054 kcpuset_destroy(affinity);
1055 } else {
1056 intrstr = pci_intr_string(pc, psc->sc_ihp[idx], intrbuf, sizeof(intrbuf));
1057 aprint_normal_dev(self, "queues interrupting at %s\n", intrstr);
1058 }
1059
1060 return 0;
1061
1062 error:
1063 idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
1064 if (psc->sc_ihs[idx] != NULL)
1065 pci_intr_disestablish(psc->sc_pa.pa_pc, psc->sc_ihs[idx]);
1066 idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
1067 if (psc->sc_intr_pervq) {
1068 for (qid = 0; qid < sc->sc_nvqs; qid++) {
1069 n = idx + qid;
1070 if (psc->sc_ihs[n] == NULL)
1071 continue;
1072 pci_intr_disestablish(psc->sc_pa.pa_pc, psc->sc_ihs[n]);
1073 }
1074
1075 } else {
1076 if (psc->sc_ihs[idx] != NULL)
1077 pci_intr_disestablish(psc->sc_pa.pa_pc, psc->sc_ihs[idx]);
1078 }
1079
1080 return -1;
1081 }
1082
1083 static int
1084 virtio_pci_establish_intx_interrupt(struct virtio_softc *sc,
1085 struct pci_attach_args *pa)
1086 {
1087 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
1088 device_t self = sc->sc_dev;
1089 pci_chipset_tag_t pc = pa->pa_pc;
1090 char intrbuf[PCI_INTRSTR_LEN];
1091 char const *intrstr;
1092
1093 if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE)
1094 pci_intr_setattr(pc, &psc->sc_ihp[0], PCI_INTR_MPSAFE, true);
1095
1096 psc->sc_ihs[0] = pci_intr_establish_xname(pc, psc->sc_ihp[0],
1097 sc->sc_ipl, virtio_pci_intr, sc, device_xname(sc->sc_dev));
1098 if (psc->sc_ihs[0] == NULL) {
1099 aprint_error_dev(self, "couldn't establish INTx\n");
1100 return -1;
1101 }
1102
1103 intrstr = pci_intr_string(pc, psc->sc_ihp[0], intrbuf, sizeof(intrbuf));
1104 aprint_normal_dev(self, "interrupting at %s\n", intrstr);
1105
1106 return 0;
1107 }
1108
1109 static int
1110 virtio_pci_alloc_interrupts(struct virtio_softc *sc)
1111 {
1112 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
1113 device_t self = sc->sc_dev;
1114 pci_chipset_tag_t pc = psc->sc_pa.pa_pc;
1115 pcitag_t tag = psc->sc_pa.pa_tag;
1116 int error;
1117 int nmsix;
1118 int off;
1119 int counts[PCI_INTR_TYPE_SIZE];
1120 pci_intr_type_t max_type;
1121 pcireg_t ctl;
1122
1123 nmsix = pci_msix_count(psc->sc_pa.pa_pc, psc->sc_pa.pa_tag);
1124 aprint_debug_dev(self, "pci_msix_count=%d\n", nmsix);
1125
1126 /* We need at least two: one for config and the other for queues */
1127 if ((sc->sc_flags & VIRTIO_F_INTR_MSIX) == 0 || nmsix < 2) {
1128 /* Try INTx only */
1129 max_type = PCI_INTR_TYPE_INTX;
1130 counts[PCI_INTR_TYPE_INTX] = 1;
1131 } else {
1132 /* Try MSI-X first and INTx second */
1133 if (ISSET(sc->sc_flags, VIRTIO_F_INTR_PERVQ) &&
1134 sc->sc_nvqs + VIRTIO_MSIX_QUEUE_VECTOR_INDEX <= nmsix) {
1135 nmsix = sc->sc_nvqs + VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
1136 } else {
1137 nmsix = 2;
1138 }
1139
1140 max_type = PCI_INTR_TYPE_MSIX;
1141 counts[PCI_INTR_TYPE_MSIX] = nmsix;
1142 counts[PCI_INTR_TYPE_MSI] = 0;
1143 counts[PCI_INTR_TYPE_INTX] = 1;
1144 }
1145
1146 retry:
1147 error = pci_intr_alloc(&psc->sc_pa, &psc->sc_ihp, counts, max_type);
1148 if (error != 0) {
1149 aprint_error_dev(self, "couldn't map interrupt\n");
1150 return -1;
1151 }
1152
1153 if (pci_intr_type(pc, psc->sc_ihp[0]) == PCI_INTR_TYPE_MSIX) {
1154 psc->sc_intr_pervq = nmsix > 2 ? true : false;
1155 psc->sc_ihs = kmem_zalloc(sizeof(*psc->sc_ihs) * nmsix,
1156 KM_SLEEP);
1157
1158 error = virtio_pci_establish_msix_interrupts(sc, &psc->sc_pa);
1159 if (error != 0) {
1160 kmem_free(psc->sc_ihs, sizeof(*psc->sc_ihs) * nmsix);
1161 pci_intr_release(pc, psc->sc_ihp, nmsix);
1162
1163 /* Retry INTx */
1164 max_type = PCI_INTR_TYPE_INTX;
1165 counts[PCI_INTR_TYPE_INTX] = 1;
1166 goto retry;
1167 }
1168
1169 psc->sc_ihs_num = nmsix;
1170 psc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_MSI;
1171 virtio_pci_adjust_config_region(psc);
1172 } else if (pci_intr_type(pc, psc->sc_ihp[0]) == PCI_INTR_TYPE_INTX) {
1173 psc->sc_intr_pervq = false;
1174 psc->sc_ihs = kmem_zalloc(sizeof(*psc->sc_ihs) * 1,
1175 KM_SLEEP);
1176
1177 error = virtio_pci_establish_intx_interrupt(sc, &psc->sc_pa);
1178 if (error != 0) {
1179 kmem_free(psc->sc_ihs, sizeof(*psc->sc_ihs) * 1);
1180 pci_intr_release(pc, psc->sc_ihp, 1);
1181 return -1;
1182 }
1183
1184 psc->sc_ihs_num = 1;
1185 psc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
1186 virtio_pci_adjust_config_region(psc);
1187
1188 error = pci_get_capability(pc, tag, PCI_CAP_MSIX, &off, NULL);
1189 if (error != 0) {
1190 ctl = pci_conf_read(pc, tag, off + PCI_MSIX_CTL);
1191 ctl &= ~PCI_MSIX_CTL_ENABLE;
1192 pci_conf_write(pc, tag, off + PCI_MSIX_CTL, ctl);
1193 }
1194 }
1195
1196 if (!psc->sc_intr_pervq)
1197 CLR(sc->sc_flags, VIRTIO_F_INTR_PERVQ);
1198 return 0;
1199 }
1200
1201 static void
1202 virtio_pci_free_interrupts(struct virtio_softc *sc)
1203 {
1204 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
1205
1206 for (int i = 0; i < psc->sc_ihs_num; i++) {
1207 if (psc->sc_ihs[i] == NULL)
1208 continue;
1209 pci_intr_disestablish(psc->sc_pa.pa_pc, psc->sc_ihs[i]);
1210 psc->sc_ihs[i] = NULL;
1211 }
1212
1213 if (psc->sc_ihs_num > 0)
1214 pci_intr_release(psc->sc_pa.pa_pc, psc->sc_ihp, psc->sc_ihs_num);
1215
1216 if (psc->sc_ihs != NULL) {
1217 kmem_free(psc->sc_ihs, sizeof(*psc->sc_ihs) * psc->sc_ihs_num);
1218 psc->sc_ihs = NULL;
1219 }
1220 psc->sc_ihs_num = 0;
1221 }
1222
1223 static bool
1224 virtio_pci_msix_enabled(struct virtio_pci_softc *psc)
1225 {
1226 pci_chipset_tag_t pc = psc->sc_pa.pa_pc;
1227
1228 if (pci_intr_type(pc, psc->sc_ihp[0]) == PCI_INTR_TYPE_MSIX)
1229 return true;
1230
1231 return false;
1232 }
1233
1234 /*
1235 * Interrupt handler.
1236 */
1237 static int
1238 virtio_pci_intr(void *arg)
1239 {
1240 struct virtio_softc *sc = arg;
1241 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
1242 int isr, r = 0;
1243
1244 /* check and ack the interrupt */
1245 isr = bus_space_read_1(psc->sc_isr_iot, psc->sc_isr_ioh, 0);
1246 if (isr == 0)
1247 return 0;
1248 if ((isr & VIRTIO_CONFIG_ISR_CONFIG_CHANGE) &&
1249 (sc->sc_config_change != NULL))
1250 r = (sc->sc_config_change)(sc);
1251 if (sc->sc_intrhand != NULL) {
1252 if (sc->sc_soft_ih != NULL)
1253 softint_schedule(sc->sc_soft_ih);
1254 else
1255 r |= (sc->sc_intrhand)(sc);
1256 }
1257
1258 return r;
1259 }
1260
1261 static int
1262 virtio_pci_msix_queue_intr(void *arg)
1263 {
1264 struct virtio_softc *sc = arg;
1265 int r = 0;
1266
1267 if (sc->sc_intrhand != NULL) {
1268 if (sc->sc_soft_ih != NULL)
1269 softint_schedule(sc->sc_soft_ih);
1270 else
1271 r |= (sc->sc_intrhand)(sc);
1272 }
1273
1274 return r;
1275 }
1276
1277 static int
1278 virtio_pci_msix_config_intr(void *arg)
1279 {
1280 struct virtio_softc *sc = arg;
1281 int r = 0;
1282
1283 if (sc->sc_config_change != NULL)
1284 r = (sc->sc_config_change)(sc);
1285 return r;
1286 }
1287
1288 MODULE(MODULE_CLASS_DRIVER, virtio_pci, "pci,virtio");
1289
1290 #ifdef _MODULE
1291 #include "ioconf.c"
1292 #endif
1293
1294 static int
1295 virtio_pci_modcmd(modcmd_t cmd, void *opaque)
1296 {
1297 int error = 0;
1298
1299 #ifdef _MODULE
1300 switch (cmd) {
1301 case MODULE_CMD_INIT:
1302 error = config_init_component(cfdriver_ioconf_virtio_pci,
1303 cfattach_ioconf_virtio_pci, cfdata_ioconf_virtio_pci);
1304 break;
1305 case MODULE_CMD_FINI:
1306 error = config_fini_component(cfdriver_ioconf_virtio_pci,
1307 cfattach_ioconf_virtio_pci, cfdata_ioconf_virtio_pci);
1308 break;
1309 default:
1310 error = ENOTTY;
1311 break;
1312 }
1313 #endif
1314
1315 return error;
1316 }
1317