virtio_pci.c revision 1.36 1 /* $NetBSD: virtio_pci.c,v 1.36 2022/03/17 23:05:01 uwe Exp $ */
2
3 /*
4 * Copyright (c) 2020 The NetBSD Foundation, Inc.
5 * Copyright (c) 2012 Stefan Fritsch.
6 * Copyright (c) 2010 Minoura Makoto.
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: virtio_pci.c,v 1.36 2022/03/17 23:05:01 uwe Exp $");
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/kmem.h>
36 #include <sys/module.h>
37 #include <sys/endian.h>
38 #include <sys/interrupt.h>
39 #include <sys/syslog.h>
40
41 #include <sys/device.h>
42
43 #include <dev/pci/pcidevs.h>
44 #include <dev/pci/pcireg.h>
45 #include <dev/pci/pcivar.h>
46
47 #include <dev/pci/virtioreg.h> /* XXX: move to non-pci */
48 #include <dev/pci/virtio_pcireg.h>
49
50 #define VIRTIO_PRIVATE
51 #include <dev/pci/virtiovar.h> /* XXX: move to non-pci */
52
53
54 #define VIRTIO_PCI_LOG(_sc, _use_log, _fmt, _args...) \
55 do { \
56 if ((_use_log)) { \
57 log(LOG_DEBUG, "%s: " _fmt, \
58 device_xname((_sc)->sc_dev), \
59 ##_args); \
60 } else { \
61 aprint_error_dev((_sc)->sc_dev, \
62 _fmt, ##_args); \
63 } \
64 } while(0)
65
66 static int virtio_pci_match(device_t, cfdata_t, void *);
67 static void virtio_pci_attach(device_t, device_t, void *);
68 static int virtio_pci_rescan(device_t, const char *, const int *);
69 static int virtio_pci_detach(device_t, int);
70
71
72 #define NMAPREG ((PCI_MAPREG_END - PCI_MAPREG_START) / \
73 sizeof(pcireg_t))
74 struct virtio_pci_softc {
75 struct virtio_softc sc_sc;
76
77 /* IO space */
78 bus_space_tag_t sc_iot;
79 bus_space_handle_t sc_ioh;
80 bus_size_t sc_iosize;
81 bus_size_t sc_mapped_iosize;
82
83 /* BARs */
84 bus_space_tag_t sc_bars_iot[NMAPREG];
85 bus_space_handle_t sc_bars_ioh[NMAPREG];
86 bus_size_t sc_bars_iosize[NMAPREG];
87
88 /* notify space */
89 bus_space_tag_t sc_notify_iot;
90 bus_space_handle_t sc_notify_ioh;
91 bus_size_t sc_notify_iosize;
92 uint32_t sc_notify_off_multiplier;
93
94 /* isr space */
95 bus_space_tag_t sc_isr_iot;
96 bus_space_handle_t sc_isr_ioh;
97 bus_size_t sc_isr_iosize;
98
99 /* generic */
100 struct pci_attach_args sc_pa;
101 pci_intr_handle_t *sc_ihp;
102 void **sc_ihs;
103 int sc_ihs_num;
104 int sc_devcfg_offset; /* for 0.9 */
105 };
106
107 static int virtio_pci_attach_09(device_t, void *);
108 static void virtio_pci_kick_09(struct virtio_softc *, uint16_t);
109 static uint16_t virtio_pci_read_queue_size_09(struct virtio_softc *, uint16_t);
110 static void virtio_pci_setup_queue_09(struct virtio_softc *, uint16_t, uint64_t);
111 static void virtio_pci_set_status_09(struct virtio_softc *, int);
112 static void virtio_pci_negotiate_features_09(struct virtio_softc *, uint64_t);
113
114 static int virtio_pci_attach_10(device_t, void *);
115 static void virtio_pci_kick_10(struct virtio_softc *, uint16_t);
116 static uint16_t virtio_pci_read_queue_size_10(struct virtio_softc *, uint16_t);
117 static void virtio_pci_setup_queue_10(struct virtio_softc *, uint16_t, uint64_t);
118 static void virtio_pci_set_status_10(struct virtio_softc *, int);
119 static void virtio_pci_negotiate_features_10(struct virtio_softc *, uint64_t);
120 static int virtio_pci_find_cap(struct virtio_pci_softc *psc, int cfg_type, void *buf, int buflen);
121
122 static int virtio_pci_alloc_interrupts(struct virtio_softc *);
123 static void virtio_pci_free_interrupts(struct virtio_softc *);
124 static int virtio_pci_adjust_config_region(struct virtio_pci_softc *psc);
125 static int virtio_pci_intr(void *arg);
126 static int virtio_pci_msix_queue_intr(void *);
127 static int virtio_pci_msix_config_intr(void *);
128 static int virtio_pci_setup_interrupts_09(struct virtio_softc *, int);
129 static int virtio_pci_setup_interrupts_10(struct virtio_softc *, int);
130 static int virtio_pci_establish_msix_interrupts(struct virtio_softc *,
131 struct pci_attach_args *);
132 static int virtio_pci_establish_intx_interrupt(struct virtio_softc *,
133 struct pci_attach_args *);
134 static bool virtio_pci_msix_enabled(struct virtio_pci_softc *);
135
136 #define VIRTIO_MSIX_CONFIG_VECTOR_INDEX 0
137 #define VIRTIO_MSIX_QUEUE_VECTOR_INDEX 1
138
139 /*
140 * When using PCI attached virtio on aarch64-eb under Qemu, the IO space
141 * suddenly read BIG_ENDIAN where it should stay LITTLE_ENDIAN. The data read
142 * 1 byte at a time seem OK but reading bigger lengths result in swapped
143 * endian. This is most notable on reading 8 byters since we can't use
144 * bus_space_{read,write}_8().
145 */
146
147 #if defined(__aarch64__) && BYTE_ORDER == BIG_ENDIAN
148 # define READ_ENDIAN_09 BIG_ENDIAN /* should be LITTLE_ENDIAN */
149 # define READ_ENDIAN_10 BIG_ENDIAN
150 # define STRUCT_ENDIAN_09 BIG_ENDIAN
151 # define STRUCT_ENDIAN_10 LITTLE_ENDIAN
152 #elif BYTE_ORDER == BIG_ENDIAN
153 # define READ_ENDIAN_09 LITTLE_ENDIAN
154 # define READ_ENDIAN_10 BIG_ENDIAN
155 # define STRUCT_ENDIAN_09 BIG_ENDIAN
156 # define STRUCT_ENDIAN_10 LITTLE_ENDIAN
157 #else /* little endian */
158 # define READ_ENDIAN_09 LITTLE_ENDIAN
159 # define READ_ENDIAN_10 LITTLE_ENDIAN
160 # define STRUCT_ENDIAN_09 LITTLE_ENDIAN
161 # define STRUCT_ENDIAN_10 LITTLE_ENDIAN
162 #endif
163
164
165 CFATTACH_DECL3_NEW(virtio_pci, sizeof(struct virtio_pci_softc),
166 virtio_pci_match, virtio_pci_attach, virtio_pci_detach, NULL,
167 virtio_pci_rescan, NULL, DVF_DETACH_SHUTDOWN);
168
169 static const struct virtio_ops virtio_pci_ops_09 = {
170 .kick = virtio_pci_kick_09,
171 .read_queue_size = virtio_pci_read_queue_size_09,
172 .setup_queue = virtio_pci_setup_queue_09,
173 .set_status = virtio_pci_set_status_09,
174 .neg_features = virtio_pci_negotiate_features_09,
175 .alloc_interrupts = virtio_pci_alloc_interrupts,
176 .free_interrupts = virtio_pci_free_interrupts,
177 .setup_interrupts = virtio_pci_setup_interrupts_09,
178 };
179
180 static const struct virtio_ops virtio_pci_ops_10 = {
181 .kick = virtio_pci_kick_10,
182 .read_queue_size = virtio_pci_read_queue_size_10,
183 .setup_queue = virtio_pci_setup_queue_10,
184 .set_status = virtio_pci_set_status_10,
185 .neg_features = virtio_pci_negotiate_features_10,
186 .alloc_interrupts = virtio_pci_alloc_interrupts,
187 .free_interrupts = virtio_pci_free_interrupts,
188 .setup_interrupts = virtio_pci_setup_interrupts_10,
189 };
190
191 static int
192 virtio_pci_match(device_t parent, cfdata_t match, void *aux)
193 {
194 struct pci_attach_args *pa;
195
196 pa = (struct pci_attach_args *)aux;
197 switch (PCI_VENDOR(pa->pa_id)) {
198 case PCI_VENDOR_QUMRANET:
199 /* Transitional devices MUST have a PCI Revision ID of 0. */
200 if (((PCI_PRODUCT_QUMRANET_VIRTIO_1000 <=
201 PCI_PRODUCT(pa->pa_id)) &&
202 (PCI_PRODUCT(pa->pa_id) <=
203 PCI_PRODUCT_QUMRANET_VIRTIO_103F)) &&
204 PCI_REVISION(pa->pa_class) == 0)
205 return 1;
206 /*
207 * Non-transitional devices SHOULD have a PCI Revision
208 * ID of 1 or higher. Drivers MUST match any PCI
209 * Revision ID value.
210 */
211 if (((PCI_PRODUCT_QUMRANET_VIRTIO_1040 <=
212 PCI_PRODUCT(pa->pa_id)) &&
213 (PCI_PRODUCT(pa->pa_id) <=
214 PCI_PRODUCT_QUMRANET_VIRTIO_107F)) &&
215 /* XXX: TODO */
216 PCI_REVISION(pa->pa_class) == 1)
217 return 1;
218 break;
219 }
220
221 return 0;
222 }
223
224 static void
225 virtio_pci_attach(device_t parent, device_t self, void *aux)
226 {
227 struct virtio_pci_softc * const psc = device_private(self);
228 struct virtio_softc * const sc = &psc->sc_sc;
229 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
230 pci_chipset_tag_t pc = pa->pa_pc;
231 pcitag_t tag = pa->pa_tag;
232 int revision;
233 int ret;
234 pcireg_t id;
235 pcireg_t csr;
236
237 revision = PCI_REVISION(pa->pa_class);
238 switch (revision) {
239 case 0:
240 /* subsystem ID shows what I am */
241 id = PCI_SUBSYS_ID(pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG));
242 break;
243 case 1:
244 /* pci product number shows what I am */
245 id = PCI_PRODUCT(pa->pa_id) - PCI_PRODUCT_QUMRANET_VIRTIO_1040;
246 break;
247 default:
248 aprint_normal(": unknown revision 0x%02x; giving up\n",
249 revision);
250 return;
251 }
252
253 aprint_normal("\n");
254 aprint_naive("\n");
255 virtio_print_device_type(self, id, revision);
256
257 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
258 csr |= PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_IO_ENABLE;
259 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
260
261 sc->sc_dev = self;
262 psc->sc_pa = *pa;
263 psc->sc_iot = pa->pa_iot;
264
265 sc->sc_dmat = pa->pa_dmat;
266 if (pci_dma64_available(pa))
267 sc->sc_dmat = pa->pa_dmat64;
268
269 /* attach is dependent on revision */
270 ret = 0;
271 if (revision == 1) {
272 /* try to attach 1.0 */
273 ret = virtio_pci_attach_10(self, aux);
274 }
275 if (ret == 0 && revision == 0) {
276 /* revision 0 means 0.9 only or both 0.9 and 1.0 */
277 ret = virtio_pci_attach_09(self, aux);
278 }
279 if (ret) {
280 aprint_error_dev(self, "cannot attach (%d)\n", ret);
281 return;
282 }
283 KASSERT(sc->sc_ops);
284
285 /* preset config region */
286 psc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
287 if (virtio_pci_adjust_config_region(psc))
288 return;
289
290 /* generic */
291 virtio_device_reset(sc);
292 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_ACK);
293 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER);
294
295 sc->sc_childdevid = id;
296 sc->sc_child = NULL;
297 virtio_pci_rescan(self, NULL, NULL);
298 return;
299 }
300
301 /* ARGSUSED */
302 static int
303 virtio_pci_rescan(device_t self, const char *ifattr, const int *locs)
304 {
305 struct virtio_pci_softc * const psc = device_private(self);
306 struct virtio_softc * const sc = &psc->sc_sc;
307 struct virtio_attach_args va;
308
309 if (sc->sc_child) /* Child already attached? */
310 return 0;
311
312 memset(&va, 0, sizeof(va));
313 va.sc_childdevid = sc->sc_childdevid;
314
315 config_found(self, &va, NULL, CFARGS_NONE);
316
317 if (virtio_attach_failed(sc))
318 return 0;
319
320 return 0;
321 }
322
323
324 static int
325 virtio_pci_detach(device_t self, int flags)
326 {
327 struct virtio_pci_softc * const psc = device_private(self);
328 struct virtio_softc * const sc = &psc->sc_sc;
329 int r;
330
331 if (sc->sc_child != NULL) {
332 r = config_detach(sc->sc_child, flags);
333 if (r)
334 return r;
335 }
336
337 /* Check that child detached properly */
338 KASSERT(sc->sc_child == NULL);
339 KASSERT(sc->sc_vqs == NULL);
340 KASSERT(psc->sc_ihs_num == 0);
341
342 if (psc->sc_iosize)
343 bus_space_unmap(psc->sc_iot, psc->sc_ioh,
344 psc->sc_mapped_iosize);
345 psc->sc_iosize = 0;
346
347 return 0;
348 }
349
350
351 static int
352 virtio_pci_attach_09(device_t self, void *aux)
353 //struct virtio_pci_softc *psc, struct pci_attach_args *pa)
354 {
355 struct virtio_pci_softc * const psc = device_private(self);
356 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
357 struct virtio_softc * const sc = &psc->sc_sc;
358 // pci_chipset_tag_t pc = pa->pa_pc;
359 // pcitag_t tag = pa->pa_tag;
360
361 /* complete IO region */
362 if (pci_mapreg_map(pa, PCI_MAPREG_START, PCI_MAPREG_TYPE_IO, 0,
363 &psc->sc_iot, &psc->sc_ioh, NULL, &psc->sc_iosize)) {
364 aprint_error_dev(self, "can't map i/o space\n");
365 return EIO;
366 }
367 psc->sc_mapped_iosize = psc->sc_iosize;
368
369 /* queue space */
370 if (bus_space_subregion(psc->sc_iot, psc->sc_ioh,
371 VIRTIO_CONFIG_QUEUE_NOTIFY, 2, &psc->sc_notify_ioh)) {
372 aprint_error_dev(self, "can't map notify i/o space\n");
373 return EIO;
374 }
375 psc->sc_notify_iosize = 2;
376 psc->sc_notify_iot = psc->sc_iot;
377
378 /* ISR space */
379 if (bus_space_subregion(psc->sc_iot, psc->sc_ioh,
380 VIRTIO_CONFIG_ISR_STATUS, 1, &psc->sc_isr_ioh)) {
381 aprint_error_dev(self, "can't map isr i/o space\n");
382 return EIO;
383 }
384 psc->sc_isr_iosize = 1;
385 psc->sc_isr_iot = psc->sc_iot;
386
387 /* set our version 0.9 ops */
388 sc->sc_ops = &virtio_pci_ops_09;
389 sc->sc_bus_endian = READ_ENDIAN_09;
390 sc->sc_struct_endian = STRUCT_ENDIAN_09;
391 return 0;
392 }
393
394
395 static int
396 virtio_pci_attach_10(device_t self, void *aux)
397 {
398 struct virtio_pci_softc * const psc = device_private(self);
399 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
400 struct virtio_softc * const sc = &psc->sc_sc;
401 pci_chipset_tag_t pc = pa->pa_pc;
402 pcitag_t tag = pa->pa_tag;
403
404 struct virtio_pci_cap common, isr, device;
405 struct virtio_pci_notify_cap notify;
406 int have_device_cfg = 0;
407 bus_size_t bars[NMAPREG] = { 0 };
408 int bars_idx[NMAPREG] = { 0 };
409 struct virtio_pci_cap *caps[] = { &common, &isr, &device, ¬ify.cap };
410 int i, j, ret = 0;
411
412 if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_COMMON_CFG,
413 &common, sizeof(common)))
414 return ENODEV;
415 if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_NOTIFY_CFG,
416 ¬ify, sizeof(notify)))
417 return ENODEV;
418 if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_ISR_CFG,
419 &isr, sizeof(isr)))
420 return ENODEV;
421 if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_DEVICE_CFG,
422 &device, sizeof(device)))
423 memset(&device, 0, sizeof(device));
424 else
425 have_device_cfg = 1;
426
427 /* Figure out which bars we need to map */
428 for (i = 0; i < __arraycount(caps); i++) {
429 int bar = caps[i]->bar;
430 bus_size_t len = caps[i]->offset + caps[i]->length;
431 if (caps[i]->length == 0)
432 continue;
433 if (bars[bar] < len)
434 bars[bar] = len;
435 }
436
437 for (i = j = 0; i < __arraycount(bars); i++) {
438 int reg;
439 pcireg_t type;
440 if (bars[i] == 0)
441 continue;
442 reg = PCI_BAR(i);
443 type = pci_mapreg_type(pc, tag, reg);
444 if (pci_mapreg_map(pa, reg, type, 0,
445 &psc->sc_bars_iot[j], &psc->sc_bars_ioh[j],
446 NULL, &psc->sc_bars_iosize[j])) {
447 aprint_error_dev(self, "can't map bar %u \n", i);
448 ret = EIO;
449 goto err;
450 }
451 aprint_debug_dev(self,
452 "bar[%d]: iot %p, size 0x%" PRIxBUSSIZE "\n",
453 j, psc->sc_bars_iot[j], psc->sc_bars_iosize[j]);
454 bars_idx[i] = j;
455 j++;
456 }
457
458 i = bars_idx[notify.cap.bar];
459 if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i],
460 notify.cap.offset, notify.cap.length,
461 &psc->sc_notify_ioh)) {
462 aprint_error_dev(self, "can't map notify i/o space\n");
463 ret = EIO;
464 goto err;
465 }
466 psc->sc_notify_iosize = notify.cap.length;
467 psc->sc_notify_iot = psc->sc_bars_iot[i];
468 psc->sc_notify_off_multiplier = le32toh(notify.notify_off_multiplier);
469
470 if (have_device_cfg) {
471 i = bars_idx[device.bar];
472 if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i],
473 device.offset, device.length,
474 &sc->sc_devcfg_ioh)) {
475 aprint_error_dev(self, "can't map devcfg i/o space\n");
476 ret = EIO;
477 goto err;
478 }
479 aprint_debug_dev(self,
480 "device.offset = 0x%x, device.length = 0x%x\n",
481 device.offset, device.length);
482 sc->sc_devcfg_iosize = device.length;
483 sc->sc_devcfg_iot = psc->sc_bars_iot[i];
484 }
485
486 i = bars_idx[isr.bar];
487 if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i],
488 isr.offset, isr.length, &psc->sc_isr_ioh)) {
489 aprint_error_dev(self, "can't map isr i/o space\n");
490 ret = EIO;
491 goto err;
492 }
493 psc->sc_isr_iosize = isr.length;
494 psc->sc_isr_iot = psc->sc_bars_iot[i];
495
496 i = bars_idx[common.bar];
497 if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i],
498 common.offset, common.length, &psc->sc_ioh)) {
499 aprint_error_dev(self, "can't map common i/o space\n");
500 ret = EIO;
501 goto err;
502 }
503 psc->sc_iosize = common.length;
504 psc->sc_iot = psc->sc_bars_iot[i];
505 psc->sc_mapped_iosize = psc->sc_bars_iosize[i];
506
507 psc->sc_sc.sc_version_1 = 1;
508
509 /* set our version 1.0 ops */
510 sc->sc_ops = &virtio_pci_ops_10;
511 sc->sc_bus_endian = READ_ENDIAN_10;
512 sc->sc_struct_endian = STRUCT_ENDIAN_10;
513 return 0;
514
515 err:
516 /* undo our pci_mapreg_map()s */
517 for (i = 0; i < __arraycount(bars); i++) {
518 if (psc->sc_bars_iosize[i] == 0)
519 continue;
520 bus_space_unmap(psc->sc_bars_iot[i], psc->sc_bars_ioh[i],
521 psc->sc_bars_iosize[i]);
522 }
523 return ret;
524 }
525
526 /* v1.0 attach helper */
527 static int
528 virtio_pci_find_cap(struct virtio_pci_softc *psc, int cfg_type, void *buf, int buflen)
529 {
530 device_t self = psc->sc_sc.sc_dev;
531 pci_chipset_tag_t pc = psc->sc_pa.pa_pc;
532 pcitag_t tag = psc->sc_pa.pa_tag;
533 unsigned int offset, i, len;
534 union {
535 pcireg_t reg[8];
536 struct virtio_pci_cap vcap;
537 } *v = buf;
538
539 if (buflen < sizeof(struct virtio_pci_cap))
540 return ERANGE;
541
542 if (!pci_get_capability(pc, tag, PCI_CAP_VENDSPEC, &offset, &v->reg[0]))
543 return ENOENT;
544
545 do {
546 for (i = 0; i < 4; i++)
547 v->reg[i] =
548 le32toh(pci_conf_read(pc, tag, offset + i * 4));
549 if (v->vcap.cfg_type == cfg_type)
550 break;
551 offset = v->vcap.cap_next;
552 } while (offset != 0);
553
554 if (offset == 0)
555 return ENOENT;
556
557 if (v->vcap.cap_len > sizeof(struct virtio_pci_cap)) {
558 len = roundup(v->vcap.cap_len, sizeof(pcireg_t));
559 if (len > buflen) {
560 aprint_error_dev(self, "%s cap too large\n", __func__);
561 return ERANGE;
562 }
563 for (i = 4; i < len / sizeof(pcireg_t); i++)
564 v->reg[i] =
565 le32toh(pci_conf_read(pc, tag, offset + i * 4));
566 }
567
568 /* endian fixup */
569 v->vcap.offset = le32toh(v->vcap.offset);
570 v->vcap.length = le32toh(v->vcap.length);
571 return 0;
572 }
573
574
575 /* -------------------------------------
576 * Version 0.9 support
577 * -------------------------------------*/
578
579 static void
580 virtio_pci_kick_09(struct virtio_softc *sc, uint16_t idx)
581 {
582 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
583
584 bus_space_write_2(psc->sc_notify_iot, psc->sc_notify_ioh, 0, idx);
585 }
586
587 /* only applicable for v 0.9 but also called for 1.0 */
588 static int
589 virtio_pci_adjust_config_region(struct virtio_pci_softc *psc)
590 {
591 struct virtio_softc * const sc = (struct virtio_softc *) psc;
592 device_t self = psc->sc_sc.sc_dev;
593
594 if (psc->sc_sc.sc_version_1)
595 return 0;
596
597 sc->sc_devcfg_iosize = psc->sc_iosize - psc->sc_devcfg_offset;
598 sc->sc_devcfg_iot = psc->sc_iot;
599 if (bus_space_subregion(psc->sc_iot, psc->sc_ioh,
600 psc->sc_devcfg_offset, sc->sc_devcfg_iosize,
601 &sc->sc_devcfg_ioh)) {
602 aprint_error_dev(self, "can't map config i/o space\n");
603 return EIO;
604 }
605
606 return 0;
607 }
608
609 static uint16_t
610 virtio_pci_read_queue_size_09(struct virtio_softc *sc, uint16_t idx)
611 {
612 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
613
614 bus_space_write_2(psc->sc_iot, psc->sc_ioh,
615 VIRTIO_CONFIG_QUEUE_SELECT, idx);
616 return bus_space_read_2(psc->sc_iot, psc->sc_ioh,
617 VIRTIO_CONFIG_QUEUE_SIZE);
618 }
619
620 static void
621 virtio_pci_setup_queue_09(struct virtio_softc *sc, uint16_t idx, uint64_t addr)
622 {
623 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
624
625 bus_space_write_2(psc->sc_iot, psc->sc_ioh,
626 VIRTIO_CONFIG_QUEUE_SELECT, idx);
627 bus_space_write_4(psc->sc_iot, psc->sc_ioh,
628 VIRTIO_CONFIG_QUEUE_ADDRESS, addr / VIRTIO_PAGE_SIZE);
629
630 if (psc->sc_ihs_num > 1) {
631 int vec = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
632 if (sc->sc_child_mq)
633 vec += idx;
634 bus_space_write_2(psc->sc_iot, psc->sc_ioh,
635 VIRTIO_CONFIG_MSI_QUEUE_VECTOR, vec);
636 }
637 }
638
639 static void
640 virtio_pci_set_status_09(struct virtio_softc *sc, int status)
641 {
642 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
643 int old = 0;
644
645 if (status != 0) {
646 old = bus_space_read_1(psc->sc_iot, psc->sc_ioh,
647 VIRTIO_CONFIG_DEVICE_STATUS);
648 }
649 bus_space_write_1(psc->sc_iot, psc->sc_ioh,
650 VIRTIO_CONFIG_DEVICE_STATUS, status|old);
651 }
652
653 static void
654 virtio_pci_negotiate_features_09(struct virtio_softc *sc, uint64_t guest_features)
655 {
656 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
657 uint32_t r;
658
659 r = bus_space_read_4(psc->sc_iot, psc->sc_ioh,
660 VIRTIO_CONFIG_DEVICE_FEATURES);
661
662 r &= guest_features;
663
664 bus_space_write_4(psc->sc_iot, psc->sc_ioh,
665 VIRTIO_CONFIG_GUEST_FEATURES, r);
666
667 sc->sc_active_features = r;
668 }
669
670 /* -------------------------------------
671 * Version 1.0 support
672 * -------------------------------------*/
673
674 static void
675 virtio_pci_kick_10(struct virtio_softc *sc, uint16_t idx)
676 {
677 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
678 unsigned offset = sc->sc_vqs[idx].vq_notify_off *
679 psc->sc_notify_off_multiplier;
680
681 bus_space_write_2(psc->sc_notify_iot, psc->sc_notify_ioh, offset, idx);
682 }
683
684
685 static uint16_t
686 virtio_pci_read_queue_size_10(struct virtio_softc *sc, uint16_t idx)
687 {
688 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
689 bus_space_tag_t iot = psc->sc_iot;
690 bus_space_handle_t ioh = psc->sc_ioh;
691
692 bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SELECT, idx);
693 return bus_space_read_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SIZE);
694 }
695
696 /*
697 * By definition little endian only in v1.0. NB: "MAY" in the text
698 * below refers to "independently" (i.e. the order of accesses) not
699 * "32-bit" (which is restricted by the earlier "MUST").
700 *
701 * 4.1.3.1 Driver Requirements: PCI Device Layout
702 *
703 * For device configuration access, the driver MUST use ... 32-bit
704 * wide and aligned accesses for ... 64-bit wide fields. For 64-bit
705 * fields, the driver MAY access each of the high and low 32-bit parts
706 * of the field independently.
707 */
708 static __inline void
709 virtio_pci_bus_space_write_8(bus_space_tag_t iot, bus_space_handle_t ioh,
710 bus_size_t offset, uint64_t value)
711 {
712 #if _QUAD_HIGHWORD
713 bus_space_write_4(iot, ioh, offset, BUS_ADDR_LO32(value));
714 bus_space_write_4(iot, ioh, offset + 4, BUS_ADDR_HI32(value));
715 #else
716 bus_space_write_4(iot, ioh, offset, BUS_ADDR_HI32(value));
717 bus_space_write_4(iot, ioh, offset + 4, BUS_ADDR_LO32(value));
718 #endif
719 }
720
721 static void
722 virtio_pci_setup_queue_10(struct virtio_softc *sc, uint16_t idx, uint64_t addr)
723 {
724 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
725 struct virtqueue *vq = &sc->sc_vqs[idx];
726 bus_space_tag_t iot = psc->sc_iot;
727 bus_space_handle_t ioh = psc->sc_ioh;
728 KASSERT(vq->vq_index == idx);
729
730 bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SELECT, vq->vq_index);
731 if (addr == 0) {
732 bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_ENABLE, 0);
733 virtio_pci_bus_space_write_8(iot, ioh,
734 VIRTIO_CONFIG1_QUEUE_DESC, 0);
735 virtio_pci_bus_space_write_8(iot, ioh,
736 VIRTIO_CONFIG1_QUEUE_AVAIL, 0);
737 virtio_pci_bus_space_write_8(iot, ioh,
738 VIRTIO_CONFIG1_QUEUE_USED, 0);
739 } else {
740 virtio_pci_bus_space_write_8(iot, ioh,
741 VIRTIO_CONFIG1_QUEUE_DESC, addr);
742 virtio_pci_bus_space_write_8(iot, ioh,
743 VIRTIO_CONFIG1_QUEUE_AVAIL, addr + vq->vq_availoffset);
744 virtio_pci_bus_space_write_8(iot, ioh,
745 VIRTIO_CONFIG1_QUEUE_USED, addr + vq->vq_usedoffset);
746 bus_space_write_2(iot, ioh,
747 VIRTIO_CONFIG1_QUEUE_ENABLE, 1);
748 vq->vq_notify_off = bus_space_read_2(iot, ioh,
749 VIRTIO_CONFIG1_QUEUE_NOTIFY_OFF);
750 }
751
752 if (psc->sc_ihs_num > 1) {
753 int vec = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
754 if (sc->sc_child_mq)
755 vec += idx;
756 bus_space_write_2(iot, ioh,
757 VIRTIO_CONFIG1_QUEUE_MSIX_VECTOR, vec);
758 }
759 }
760
761 static void
762 virtio_pci_set_status_10(struct virtio_softc *sc, int status)
763 {
764 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
765 bus_space_tag_t iot = psc->sc_iot;
766 bus_space_handle_t ioh = psc->sc_ioh;
767 int old = 0;
768
769 if (status)
770 old = bus_space_read_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS);
771 bus_space_write_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS, status | old);
772 }
773
774 void
775 virtio_pci_negotiate_features_10(struct virtio_softc *sc, uint64_t guest_features)
776 {
777 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
778 device_t self = sc->sc_dev;
779 bus_space_tag_t iot = psc->sc_iot;
780 bus_space_handle_t ioh = psc->sc_ioh;
781 uint64_t host, negotiated, device_status;
782
783 guest_features |= VIRTIO_F_VERSION_1;
784 /* notify on empty is 0.9 only */
785 guest_features &= ~VIRTIO_F_NOTIFY_ON_EMPTY;
786 sc->sc_active_features = 0;
787
788 bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE_SELECT, 0);
789 host = bus_space_read_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE);
790 bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE_SELECT, 1);
791 host |= (uint64_t)
792 bus_space_read_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE) << 32;
793
794 negotiated = host & guest_features;
795
796 bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE_SELECT, 0);
797 bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE,
798 negotiated & 0xffffffff);
799 bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE_SELECT, 1);
800 bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE,
801 negotiated >> 32);
802 virtio_pci_set_status_10(sc, VIRTIO_CONFIG_DEVICE_STATUS_FEATURES_OK);
803
804 device_status = bus_space_read_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS);
805 if ((device_status & VIRTIO_CONFIG_DEVICE_STATUS_FEATURES_OK) == 0) {
806 aprint_error_dev(self, "feature negotiation failed\n");
807 bus_space_write_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS,
808 VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
809 return;
810 }
811
812 if ((negotiated & VIRTIO_F_VERSION_1) == 0) {
813 aprint_error_dev(self, "host rejected version 1\n");
814 bus_space_write_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS,
815 VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
816 return;
817 }
818
819 sc->sc_active_features = negotiated;
820 return;
821 }
822
823
824 /* -------------------------------------
825 * Generic PCI interrupt code
826 * -------------------------------------*/
827
828 static int
829 virtio_pci_setup_interrupts_10(struct virtio_softc *sc, int reinit)
830 {
831 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
832 bus_space_tag_t iot = psc->sc_iot;
833 bus_space_handle_t ioh = psc->sc_ioh;
834 int vector, ret, qid;
835
836 if (!virtio_pci_msix_enabled(psc))
837 return 0;
838
839 vector = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
840 bus_space_write_2(iot, ioh,
841 VIRTIO_CONFIG1_CONFIG_MSIX_VECTOR, vector);
842 ret = bus_space_read_2(iot, ioh, VIRTIO_CONFIG1_CONFIG_MSIX_VECTOR);
843 if (ret != vector) {
844 VIRTIO_PCI_LOG(sc, reinit,
845 "can't set config msix vector\n");
846 return -1;
847 }
848
849 for (qid = 0; qid < sc->sc_nvqs; qid++) {
850 vector = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
851
852 if (sc->sc_child_mq)
853 vector += qid;
854 bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SELECT, qid);
855 bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_MSIX_VECTOR,
856 vector);
857 ret = bus_space_read_2(iot, ioh,
858 VIRTIO_CONFIG1_QUEUE_MSIX_VECTOR);
859 if (ret != vector) {
860 VIRTIO_PCI_LOG(sc, reinit, "can't set queue %d "
861 "msix vector\n", qid);
862 return -1;
863 }
864 }
865
866 return 0;
867 }
868
869 static int
870 virtio_pci_setup_interrupts_09(struct virtio_softc *sc, int reinit)
871 {
872 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
873 int offset, vector, ret, qid;
874
875 if (!virtio_pci_msix_enabled(psc))
876 return 0;
877
878 offset = VIRTIO_CONFIG_MSI_CONFIG_VECTOR;
879 vector = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
880
881 bus_space_write_2(psc->sc_iot, psc->sc_ioh, offset, vector);
882 ret = bus_space_read_2(psc->sc_iot, psc->sc_ioh, offset);
883 aprint_debug_dev(sc->sc_dev, "expected=%d, actual=%d\n",
884 vector, ret);
885 if (ret != vector) {
886 VIRTIO_PCI_LOG(sc, reinit,
887 "can't set config msix vector\n");
888 return -1;
889 }
890
891 for (qid = 0; qid < sc->sc_nvqs; qid++) {
892 offset = VIRTIO_CONFIG_QUEUE_SELECT;
893 bus_space_write_2(psc->sc_iot, psc->sc_ioh, offset, qid);
894
895 offset = VIRTIO_CONFIG_MSI_QUEUE_VECTOR;
896 vector = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
897
898 if (sc->sc_child_mq)
899 vector += qid;
900
901 bus_space_write_2(psc->sc_iot, psc->sc_ioh, offset, vector);
902 ret = bus_space_read_2(psc->sc_iot, psc->sc_ioh, offset);
903 aprint_debug_dev(sc->sc_dev, "expected=%d, actual=%d\n",
904 vector, ret);
905 if (ret != vector) {
906 VIRTIO_PCI_LOG(sc, reinit, "can't set queue %d "
907 "msix vector\n", qid);
908 return -1;
909 }
910 }
911
912 return 0;
913 }
914
915 static int
916 virtio_pci_establish_msix_interrupts(struct virtio_softc *sc,
917 struct pci_attach_args *pa)
918 {
919 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
920 device_t self = sc->sc_dev;
921 pci_chipset_tag_t pc = pa->pa_pc;
922 struct virtqueue *vq;
923 char intrbuf[PCI_INTRSTR_LEN];
924 char intr_xname[INTRDEVNAMEBUF];
925 char const *intrstr;
926 int idx, qid, n;
927
928 idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
929 if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE)
930 pci_intr_setattr(pc, &psc->sc_ihp[idx], PCI_INTR_MPSAFE, true);
931
932 snprintf(intr_xname, sizeof(intr_xname), "%s config",
933 device_xname(sc->sc_dev));
934
935 psc->sc_ihs[idx] = pci_intr_establish_xname(pc, psc->sc_ihp[idx],
936 sc->sc_ipl, virtio_pci_msix_config_intr, sc, intr_xname);
937 if (psc->sc_ihs[idx] == NULL) {
938 aprint_error_dev(self, "couldn't establish MSI-X for config\n");
939 goto error;
940 }
941
942 idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
943 if (sc->sc_child_mq) {
944 for (qid = 0; qid < sc->sc_nvqs; qid++) {
945 n = idx + qid;
946 vq = &sc->sc_vqs[qid];
947
948 snprintf(intr_xname, sizeof(intr_xname), "%s vq#%d",
949 device_xname(sc->sc_dev), qid);
950
951 if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE) {
952 pci_intr_setattr(pc, &psc->sc_ihp[n],
953 PCI_INTR_MPSAFE, true);
954 }
955
956 psc->sc_ihs[n] = pci_intr_establish_xname(pc, psc->sc_ihp[n],
957 sc->sc_ipl, vq->vq_intrhand, vq->vq_intrhand_arg, intr_xname);
958 if (psc->sc_ihs[n] == NULL) {
959 aprint_error_dev(self, "couldn't establish MSI-X for a vq\n");
960 goto error;
961 }
962 }
963 } else {
964 if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE)
965 pci_intr_setattr(pc, &psc->sc_ihp[idx], PCI_INTR_MPSAFE, true);
966
967 snprintf(intr_xname, sizeof(intr_xname), "%s queues",
968 device_xname(sc->sc_dev));
969 psc->sc_ihs[idx] = pci_intr_establish_xname(pc, psc->sc_ihp[idx],
970 sc->sc_ipl, virtio_pci_msix_queue_intr, sc, intr_xname);
971 if (psc->sc_ihs[idx] == NULL) {
972 aprint_error_dev(self, "couldn't establish MSI-X for queues\n");
973 goto error;
974 }
975 }
976
977 idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
978 intrstr = pci_intr_string(pc, psc->sc_ihp[idx], intrbuf, sizeof(intrbuf));
979 aprint_normal_dev(self, "config interrupting at %s\n", intrstr);
980 idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
981 if (sc->sc_child_mq) {
982 kcpuset_t *affinity;
983 int affinity_to, r;
984
985 kcpuset_create(&affinity, false);
986
987 for (qid = 0; qid < sc->sc_nvqs; qid++) {
988 n = idx + qid;
989 affinity_to = (qid / 2) % ncpu;
990
991 intrstr = pci_intr_string(pc, psc->sc_ihp[n],
992 intrbuf, sizeof(intrbuf));
993
994 kcpuset_zero(affinity);
995 kcpuset_set(affinity, affinity_to);
996 r = interrupt_distribute(psc->sc_ihs[n], affinity, NULL);
997 if (r == 0) {
998 aprint_normal_dev(self,
999 "for vq #%d interrupting at %s affinity to %u\n",
1000 qid, intrstr, affinity_to);
1001 } else {
1002 aprint_normal_dev(self,
1003 "for vq #%d interrupting at %s\n",
1004 qid, intrstr);
1005 }
1006 }
1007
1008 kcpuset_destroy(affinity);
1009 } else {
1010 intrstr = pci_intr_string(pc, psc->sc_ihp[idx], intrbuf, sizeof(intrbuf));
1011 aprint_normal_dev(self, "queues interrupting at %s\n", intrstr);
1012 }
1013
1014 return 0;
1015
1016 error:
1017 idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
1018 if (psc->sc_ihs[idx] != NULL)
1019 pci_intr_disestablish(psc->sc_pa.pa_pc, psc->sc_ihs[idx]);
1020 idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
1021 if (sc->sc_child_mq) {
1022 for (qid = 0; qid < sc->sc_nvqs; qid++) {
1023 n = idx + qid;
1024 if (psc->sc_ihs[n] == NULL)
1025 continue;
1026 pci_intr_disestablish(psc->sc_pa.pa_pc, psc->sc_ihs[n]);
1027 }
1028
1029 } else {
1030 if (psc->sc_ihs[idx] != NULL)
1031 pci_intr_disestablish(psc->sc_pa.pa_pc, psc->sc_ihs[idx]);
1032 }
1033
1034 return -1;
1035 }
1036
1037 static int
1038 virtio_pci_establish_intx_interrupt(struct virtio_softc *sc,
1039 struct pci_attach_args *pa)
1040 {
1041 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
1042 device_t self = sc->sc_dev;
1043 pci_chipset_tag_t pc = pa->pa_pc;
1044 char intrbuf[PCI_INTRSTR_LEN];
1045 char const *intrstr;
1046
1047 if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE)
1048 pci_intr_setattr(pc, &psc->sc_ihp[0], PCI_INTR_MPSAFE, true);
1049
1050 psc->sc_ihs[0] = pci_intr_establish_xname(pc, psc->sc_ihp[0],
1051 sc->sc_ipl, virtio_pci_intr, sc, device_xname(sc->sc_dev));
1052 if (psc->sc_ihs[0] == NULL) {
1053 aprint_error_dev(self, "couldn't establish INTx\n");
1054 return -1;
1055 }
1056
1057 intrstr = pci_intr_string(pc, psc->sc_ihp[0], intrbuf, sizeof(intrbuf));
1058 aprint_normal_dev(self, "interrupting at %s\n", intrstr);
1059
1060 return 0;
1061 }
1062
1063 static int
1064 virtio_pci_alloc_interrupts(struct virtio_softc *sc)
1065 {
1066 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
1067 device_t self = sc->sc_dev;
1068 pci_chipset_tag_t pc = psc->sc_pa.pa_pc;
1069 pcitag_t tag = psc->sc_pa.pa_tag;
1070 int error;
1071 int nmsix;
1072 int off;
1073 int counts[PCI_INTR_TYPE_SIZE];
1074 pci_intr_type_t max_type;
1075 pcireg_t ctl;
1076
1077 nmsix = pci_msix_count(psc->sc_pa.pa_pc, psc->sc_pa.pa_tag);
1078 aprint_debug_dev(self, "pci_msix_count=%d\n", nmsix);
1079
1080 /* We need at least two: one for config and the other for queues */
1081 if ((sc->sc_flags & VIRTIO_F_INTR_MSIX) == 0 || nmsix < 2) {
1082 /* Try INTx only */
1083 max_type = PCI_INTR_TYPE_INTX;
1084 counts[PCI_INTR_TYPE_INTX] = 1;
1085 } else {
1086 /* Try MSI-X first and INTx second */
1087 if (sc->sc_nvqs + VIRTIO_MSIX_QUEUE_VECTOR_INDEX <= nmsix) {
1088 nmsix = sc->sc_nvqs + VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
1089 } else {
1090 sc->sc_child_mq = false;
1091 }
1092
1093 if (sc->sc_child_mq == false) {
1094 nmsix = 2;
1095 }
1096
1097 max_type = PCI_INTR_TYPE_MSIX;
1098 counts[PCI_INTR_TYPE_MSIX] = nmsix;
1099 counts[PCI_INTR_TYPE_MSI] = 0;
1100 counts[PCI_INTR_TYPE_INTX] = 1;
1101 }
1102
1103 retry:
1104 error = pci_intr_alloc(&psc->sc_pa, &psc->sc_ihp, counts, max_type);
1105 if (error != 0) {
1106 aprint_error_dev(self, "couldn't map interrupt\n");
1107 return -1;
1108 }
1109
1110 if (pci_intr_type(pc, psc->sc_ihp[0]) == PCI_INTR_TYPE_MSIX) {
1111 psc->sc_ihs = kmem_zalloc(sizeof(*psc->sc_ihs) * nmsix,
1112 KM_SLEEP);
1113
1114 error = virtio_pci_establish_msix_interrupts(sc, &psc->sc_pa);
1115 if (error != 0) {
1116 kmem_free(psc->sc_ihs, sizeof(*psc->sc_ihs) * nmsix);
1117 pci_intr_release(pc, psc->sc_ihp, nmsix);
1118
1119 /* Retry INTx */
1120 max_type = PCI_INTR_TYPE_INTX;
1121 counts[PCI_INTR_TYPE_INTX] = 1;
1122 goto retry;
1123 }
1124
1125 psc->sc_ihs_num = nmsix;
1126 psc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_MSI;
1127 virtio_pci_adjust_config_region(psc);
1128 } else if (pci_intr_type(pc, psc->sc_ihp[0]) == PCI_INTR_TYPE_INTX) {
1129 psc->sc_ihs = kmem_zalloc(sizeof(*psc->sc_ihs) * 1,
1130 KM_SLEEP);
1131
1132 error = virtio_pci_establish_intx_interrupt(sc, &psc->sc_pa);
1133 if (error != 0) {
1134 kmem_free(psc->sc_ihs, sizeof(*psc->sc_ihs) * 1);
1135 pci_intr_release(pc, psc->sc_ihp, 1);
1136 return -1;
1137 }
1138
1139 psc->sc_ihs_num = 1;
1140 psc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
1141 virtio_pci_adjust_config_region(psc);
1142
1143 error = pci_get_capability(pc, tag, PCI_CAP_MSIX, &off, NULL);
1144 if (error != 0) {
1145 ctl = pci_conf_read(pc, tag, off + PCI_MSIX_CTL);
1146 ctl &= ~PCI_MSIX_CTL_ENABLE;
1147 pci_conf_write(pc, tag, off + PCI_MSIX_CTL, ctl);
1148 }
1149 }
1150
1151 return 0;
1152 }
1153
1154 static void
1155 virtio_pci_free_interrupts(struct virtio_softc *sc)
1156 {
1157 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
1158
1159 for (int i = 0; i < psc->sc_ihs_num; i++) {
1160 if (psc->sc_ihs[i] == NULL)
1161 continue;
1162 pci_intr_disestablish(psc->sc_pa.pa_pc, psc->sc_ihs[i]);
1163 psc->sc_ihs[i] = NULL;
1164 }
1165
1166 if (psc->sc_ihs_num > 0)
1167 pci_intr_release(psc->sc_pa.pa_pc, psc->sc_ihp, psc->sc_ihs_num);
1168
1169 if (psc->sc_ihs != NULL) {
1170 kmem_free(psc->sc_ihs, sizeof(*psc->sc_ihs) * psc->sc_ihs_num);
1171 psc->sc_ihs = NULL;
1172 }
1173 psc->sc_ihs_num = 0;
1174 }
1175
1176 static bool
1177 virtio_pci_msix_enabled(struct virtio_pci_softc *psc)
1178 {
1179 pci_chipset_tag_t pc = psc->sc_pa.pa_pc;
1180
1181 if (pci_intr_type(pc, psc->sc_ihp[0]) == PCI_INTR_TYPE_MSIX)
1182 return true;
1183
1184 return false;
1185 }
1186
1187 /*
1188 * Interrupt handler.
1189 */
1190 static int
1191 virtio_pci_intr(void *arg)
1192 {
1193 struct virtio_softc *sc = arg;
1194 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
1195 int isr, r = 0;
1196
1197 /* check and ack the interrupt */
1198 isr = bus_space_read_1(psc->sc_isr_iot, psc->sc_isr_ioh, 0);
1199 if (isr == 0)
1200 return 0;
1201 if ((isr & VIRTIO_CONFIG_ISR_CONFIG_CHANGE) &&
1202 (sc->sc_config_change != NULL))
1203 r = (sc->sc_config_change)(sc);
1204 if (sc->sc_intrhand != NULL) {
1205 if (sc->sc_soft_ih != NULL)
1206 softint_schedule(sc->sc_soft_ih);
1207 else
1208 r |= (sc->sc_intrhand)(sc);
1209 }
1210
1211 return r;
1212 }
1213
1214 static int
1215 virtio_pci_msix_queue_intr(void *arg)
1216 {
1217 struct virtio_softc *sc = arg;
1218 int r = 0;
1219
1220 if (sc->sc_intrhand != NULL) {
1221 if (sc->sc_soft_ih != NULL)
1222 softint_schedule(sc->sc_soft_ih);
1223 else
1224 r |= (sc->sc_intrhand)(sc);
1225 }
1226
1227 return r;
1228 }
1229
1230 static int
1231 virtio_pci_msix_config_intr(void *arg)
1232 {
1233 struct virtio_softc *sc = arg;
1234 int r = 0;
1235
1236 if (sc->sc_config_change != NULL)
1237 r = (sc->sc_config_change)(sc);
1238 return r;
1239 }
1240
1241 MODULE(MODULE_CLASS_DRIVER, virtio_pci, "pci,virtio");
1242
1243 #ifdef _MODULE
1244 #include "ioconf.c"
1245 #endif
1246
1247 static int
1248 virtio_pci_modcmd(modcmd_t cmd, void *opaque)
1249 {
1250 int error = 0;
1251
1252 #ifdef _MODULE
1253 switch (cmd) {
1254 case MODULE_CMD_INIT:
1255 error = config_init_component(cfdriver_ioconf_virtio_pci,
1256 cfattach_ioconf_virtio_pci, cfdata_ioconf_virtio_pci);
1257 break;
1258 case MODULE_CMD_FINI:
1259 error = config_fini_component(cfdriver_ioconf_virtio_pci,
1260 cfattach_ioconf_virtio_pci, cfdata_ioconf_virtio_pci);
1261 break;
1262 default:
1263 error = ENOTTY;
1264 break;
1265 }
1266 #endif
1267
1268 return error;
1269 }
1270