virtio_pci.c revision 1.19 1 /* $NetBSD: virtio_pci.c,v 1.19 2021/01/23 20:00:19 christos Exp $ */
2
3 /*
4 * Copyright (c) 2020 The NetBSD Foundation, Inc.
5 * Copyright (c) 2012 Stefan Fritsch.
6 * Copyright (c) 2010 Minoura Makoto.
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: virtio_pci.c,v 1.19 2021/01/23 20:00:19 christos Exp $");
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/kmem.h>
36 #include <sys/module.h>
37 #include <sys/endian.h>
38 #include <sys/interrupt.h>
39
40 #include <sys/device.h>
41
42 #include <dev/pci/pcidevs.h>
43 #include <dev/pci/pcireg.h>
44 #include <dev/pci/pcivar.h>
45
46 #include <dev/pci/virtioreg.h> /* XXX: move to non-pci */
47 #include <dev/pci/virtio_pcireg.h>
48
49 #define VIRTIO_PRIVATE
50 #include <dev/pci/virtiovar.h> /* XXX: move to non-pci */
51
52
53 static int virtio_pci_match(device_t, cfdata_t, void *);
54 static void virtio_pci_attach(device_t, device_t, void *);
55 static int virtio_pci_rescan(device_t, const char *, const int *);
56 static int virtio_pci_detach(device_t, int);
57
58 struct virtio_pci_softc {
59 struct virtio_softc sc_sc;
60
61 /* IO space */
62 bus_space_tag_t sc_iot;
63 bus_space_handle_t sc_ioh;
64 bus_size_t sc_iosize;
65 bus_size_t sc_mapped_iosize;
66
67 /* BARs */
68 bus_space_tag_t sc_bars_iot[4];
69 bus_space_handle_t sc_bars_ioh[4];
70 bus_size_t sc_bars_iosize[4];
71
72 /* notify space */
73 bus_space_tag_t sc_notify_iot;
74 bus_space_handle_t sc_notify_ioh;
75 bus_size_t sc_notify_iosize;
76 uint32_t sc_notify_off_multiplier;
77
78 /* isr space */
79 bus_space_tag_t sc_isr_iot;
80 bus_space_handle_t sc_isr_ioh;
81 bus_size_t sc_isr_iosize;
82
83 /* generic */
84 struct pci_attach_args sc_pa;
85 pci_intr_handle_t *sc_ihp;
86 void **sc_ihs;
87 int sc_ihs_num;
88 int sc_devcfg_offset; /* for 0.9 */
89 };
90
91 static int virtio_pci_attach_09(device_t, void *);
92 static void virtio_pci_kick_09(struct virtio_softc *, uint16_t);
93 static uint16_t virtio_pci_read_queue_size_09(struct virtio_softc *, uint16_t);
94 static void virtio_pci_setup_queue_09(struct virtio_softc *, uint16_t, uint64_t);
95 static void virtio_pci_set_status_09(struct virtio_softc *, int);
96 static void virtio_pci_negotiate_features_09(struct virtio_softc *, uint64_t);
97
98 static int virtio_pci_attach_10(device_t, void *);
99 static void virtio_pci_kick_10(struct virtio_softc *, uint16_t);
100 static uint16_t virtio_pci_read_queue_size_10(struct virtio_softc *, uint16_t);
101 static void virtio_pci_setup_queue_10(struct virtio_softc *, uint16_t, uint64_t);
102 static void virtio_pci_set_status_10(struct virtio_softc *, int);
103 static void virtio_pci_negotiate_features_10(struct virtio_softc *, uint64_t);
104 static int virtio_pci_find_cap(struct virtio_pci_softc *psc, int cfg_type, void *buf, int buflen);
105
106 static uint8_t virtio_pci_read_device_config_1(struct virtio_softc *, int);
107 static uint16_t virtio_pci_read_device_config_2(struct virtio_softc *, int);
108 static uint32_t virtio_pci_read_device_config_4(struct virtio_softc *, int);
109 static uint64_t virtio_pci_read_device_config_8(struct virtio_softc *, int);
110 static void virtio_pci_write_device_config_1(struct virtio_softc *, int, uint8_t);
111 static void virtio_pci_write_device_config_2(struct virtio_softc *, int, uint16_t);
112 static void virtio_pci_write_device_config_4(struct virtio_softc *, int, uint32_t);
113 static void virtio_pci_write_device_config_8(struct virtio_softc *, int, uint64_t);
114
115 static int virtio_pci_setup_interrupts(struct virtio_softc *);
116 static void virtio_pci_free_interrupts(struct virtio_softc *);
117 static int virtio_pci_adjust_config_region(struct virtio_pci_softc *psc);
118 static int virtio_pci_intr(void *arg);
119 static int virtio_pci_msix_queue_intr(void *);
120 static int virtio_pci_msix_config_intr(void *);
121 static int virtio_pci_setup_msix_vectors_09(struct virtio_softc *);
122 static int virtio_pci_setup_msix_vectors_10(struct virtio_softc *);
123 static int virtio_pci_setup_msix_interrupts(struct virtio_softc *,
124 struct pci_attach_args *);
125 static int virtio_pci_setup_intx_interrupt(struct virtio_softc *,
126 struct pci_attach_args *);
127
128 #define VIRTIO_MSIX_CONFIG_VECTOR_INDEX 0
129 #define VIRTIO_MSIX_QUEUE_VECTOR_INDEX 1
130
131 #if 0
132 /* we use the legacy virtio spec, so the PCI registers are host native
133 * byte order, not PCI (i.e. LE) byte order */
134 #if BYTE_ORDER == BIG_ENDIAN
135 #define REG_HI_OFF 0
136 #define REG_LO_OFF 4
137 #ifndef __BUS_SPACE_HAS_STREAM_METHODS
138 #define bus_space_read_stream_1 bus_space_read_1
139 #define bus_space_write_stream_1 bus_space_write_1
140 static inline uint16_t
141 bus_space_read_stream_2(bus_space_tag_t t, bus_space_handle_t h,
142 bus_size_t o)
143 {
144 return le16toh(bus_space_read_2(t, h, o));
145 }
146 static inline void
147 bus_space_write_stream_2(bus_space_tag_t t, bus_space_handle_t h,
148 bus_size_t o, uint16_t v)
149 {
150 bus_space_write_2(t, h, o, htole16(v));
151 }
152 static inline uint32_t
153 bus_space_read_stream_4(bus_space_tag_t t, bus_space_handle_t h,
154 bus_size_t o)
155 {
156 return le32toh(bus_space_read_4(t, h, o));
157 }
158 static inline void
159 bus_space_write_stream_4(bus_space_tag_t t, bus_space_handle_t h,
160 bus_size_t o, uint32_t v)
161 {
162 bus_space_write_4(t, h, o, htole32(v));
163 }
164 #endif
165 #else
166 #define REG_HI_OFF 4
167 #define REG_LO_OFF 0
168 #ifndef __BUS_SPACE_HAS_STREAM_METHODS
169 #define bus_space_read_stream_1 bus_space_read_1
170 #define bus_space_read_stream_2 bus_space_read_2
171 #define bus_space_read_stream_4 bus_space_read_4
172 #define bus_space_write_stream_1 bus_space_write_1
173 #define bus_space_write_stream_2 bus_space_write_2
174 #define bus_space_write_stream_4 bus_space_write_4
175 #endif
176 #endif
177 #endif
178
179
180 #if BYTE_ORDER == LITTLE_ENDIAN
181 # define VIODEVRW_SWAP_09 false
182 # define VIODEVRW_SWAP_10 false
183 #else /* big endian */
184 # define VIODEVRW_SWAP_09 false
185 # define VIODEVRW_SWAP_10 true
186 #endif
187
188
189 CFATTACH_DECL3_NEW(virtio_pci, sizeof(struct virtio_pci_softc),
190 virtio_pci_match, virtio_pci_attach, virtio_pci_detach, NULL,
191 virtio_pci_rescan, NULL, DVF_DETACH_SHUTDOWN);
192
193 static const struct virtio_ops virtio_pci_ops_09 = {
194 .kick = virtio_pci_kick_09,
195
196 .read_dev_cfg_1 = virtio_pci_read_device_config_1,
197 .read_dev_cfg_2 = virtio_pci_read_device_config_2,
198 .read_dev_cfg_4 = virtio_pci_read_device_config_4,
199 .read_dev_cfg_8 = virtio_pci_read_device_config_8,
200 .write_dev_cfg_1 = virtio_pci_write_device_config_1,
201 .write_dev_cfg_2 = virtio_pci_write_device_config_2,
202 .write_dev_cfg_4 = virtio_pci_write_device_config_4,
203 .write_dev_cfg_8 = virtio_pci_write_device_config_8,
204
205 .read_queue_size = virtio_pci_read_queue_size_09,
206 .setup_queue = virtio_pci_setup_queue_09,
207 .set_status = virtio_pci_set_status_09,
208 .neg_features = virtio_pci_negotiate_features_09,
209 .setup_interrupts = virtio_pci_setup_interrupts,
210 .free_interrupts = virtio_pci_free_interrupts,
211 };
212
213 static const struct virtio_ops virtio_pci_ops_10 = {
214 .kick = virtio_pci_kick_10,
215
216 .read_dev_cfg_1 = virtio_pci_read_device_config_1,
217 .read_dev_cfg_2 = virtio_pci_read_device_config_2,
218 .read_dev_cfg_4 = virtio_pci_read_device_config_4,
219 .read_dev_cfg_8 = virtio_pci_read_device_config_8,
220 .write_dev_cfg_1 = virtio_pci_write_device_config_1,
221 .write_dev_cfg_2 = virtio_pci_write_device_config_2,
222 .write_dev_cfg_4 = virtio_pci_write_device_config_4,
223 .write_dev_cfg_8 = virtio_pci_write_device_config_8,
224
225 .read_queue_size = virtio_pci_read_queue_size_10,
226 .setup_queue = virtio_pci_setup_queue_10,
227 .set_status = virtio_pci_set_status_10,
228 .neg_features = virtio_pci_negotiate_features_10,
229 .setup_interrupts = virtio_pci_setup_interrupts,
230 .free_interrupts = virtio_pci_free_interrupts,
231 };
232
233 static int
234 virtio_pci_match(device_t parent, cfdata_t match, void *aux)
235 {
236 struct pci_attach_args *pa;
237
238 pa = (struct pci_attach_args *)aux;
239 switch (PCI_VENDOR(pa->pa_id)) {
240 case PCI_VENDOR_QUMRANET:
241 if (((PCI_PRODUCT_QUMRANET_VIRTIO_1000 <=
242 PCI_PRODUCT(pa->pa_id)) &&
243 (PCI_PRODUCT(pa->pa_id) <=
244 PCI_PRODUCT_QUMRANET_VIRTIO_103F)) &&
245 PCI_REVISION(pa->pa_class) == 0)
246 return 1;
247 if (((PCI_PRODUCT_QUMRANET_VIRTIO_1040 <=
248 PCI_PRODUCT(pa->pa_id)) &&
249 (PCI_PRODUCT(pa->pa_id) <=
250 PCI_PRODUCT_QUMRANET_VIRTIO_107F)) &&
251 PCI_REVISION(pa->pa_class) == 1)
252 return 1;
253 break;
254 }
255
256 return 0;
257 }
258
259 static void
260 virtio_pci_attach(device_t parent, device_t self, void *aux)
261 {
262 struct virtio_pci_softc * const psc = device_private(self);
263 struct virtio_softc * const sc = &psc->sc_sc;
264 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
265 pci_chipset_tag_t pc = pa->pa_pc;
266 pcitag_t tag = pa->pa_tag;
267 int revision;
268 int ret;
269 pcireg_t id;
270 pcireg_t csr;
271
272 revision = PCI_REVISION(pa->pa_class);
273 switch (revision) {
274 case 0:
275 /* subsystem ID shows what I am */
276 id = PCI_SUBSYS_ID(pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG));
277 break;
278 case 1:
279 /* pci product number shows what I am */
280 id = PCI_PRODUCT(pa->pa_id) - PCI_PRODUCT_QUMRANET_VIRTIO_1040;
281 break;
282 default:
283 aprint_normal(": unknown revision 0x%02x; giving up\n",
284 revision);
285 return;
286 }
287
288 aprint_normal("\n");
289 aprint_naive("\n");
290 virtio_print_device_type(self, id, revision);
291
292 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
293 csr |= PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_IO_ENABLE;
294 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
295
296 sc->sc_dev = self;
297 psc->sc_pa = *pa;
298 psc->sc_iot = pa->pa_iot;
299
300 sc->sc_dmat = pa->pa_dmat;
301 if (pci_dma64_available(pa))
302 sc->sc_dmat = pa->pa_dmat64;
303
304 /* attach is dependent on revision */
305 ret = 0;
306 if (revision == 1) {
307 /* try to attach 1.0 */
308 ret = virtio_pci_attach_10(self, aux);
309 }
310 if (ret == 0 && revision == 0) {
311 /* revision 0 means 0.9 only or both 0.9 and 1.0 */
312 ret = virtio_pci_attach_09(self, aux);
313 }
314 if (ret) {
315 aprint_error_dev(self, "cannot attach (%d)\n", ret);
316 return;
317 }
318 KASSERT(sc->sc_ops);
319
320 /* preset config region */
321 psc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
322 if (virtio_pci_adjust_config_region(psc))
323 return;
324
325 /* generic */
326 virtio_device_reset(sc);
327 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_ACK);
328 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER);
329
330 sc->sc_childdevid = id;
331 sc->sc_child = NULL;
332 virtio_pci_rescan(self, "virtio", 0);
333 return;
334 }
335
336 /* ARGSUSED */
337 static int
338 virtio_pci_rescan(device_t self, const char *attr, const int *scan_flags)
339 {
340 struct virtio_pci_softc * const psc = device_private(self);
341 struct virtio_softc * const sc = &psc->sc_sc;
342 struct virtio_attach_args va;
343
344 if (sc->sc_child) /* Child already attached? */
345 return 0;
346
347 memset(&va, 0, sizeof(va));
348 va.sc_childdevid = sc->sc_childdevid;
349
350 config_found_ia(self, attr, &va, NULL);
351
352 if (virtio_attach_failed(sc))
353 return 0;
354
355 return 0;
356 }
357
358
359 static int
360 virtio_pci_detach(device_t self, int flags)
361 {
362 struct virtio_pci_softc * const psc = device_private(self);
363 struct virtio_softc * const sc = &psc->sc_sc;
364 int r;
365
366 if (sc->sc_child != NULL) {
367 r = config_detach(sc->sc_child, flags);
368 if (r)
369 return r;
370 }
371
372 /* Check that child detached properly */
373 KASSERT(sc->sc_child == NULL);
374 KASSERT(sc->sc_vqs == NULL);
375 KASSERT(psc->sc_ihs_num == 0);
376
377 if (psc->sc_iosize)
378 bus_space_unmap(psc->sc_iot, psc->sc_ioh,
379 psc->sc_mapped_iosize);
380 psc->sc_iosize = 0;
381
382 return 0;
383 }
384
385
386 static int
387 virtio_pci_attach_09(device_t self, void *aux)
388 //struct virtio_pci_softc *psc, struct pci_attach_args *pa)
389 {
390 struct virtio_pci_softc * const psc = device_private(self);
391 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
392 struct virtio_softc * const sc = &psc->sc_sc;
393 // pci_chipset_tag_t pc = pa->pa_pc;
394 // pcitag_t tag = pa->pa_tag;
395
396 /* complete IO region */
397 if (pci_mapreg_map(pa, PCI_MAPREG_START, PCI_MAPREG_TYPE_IO, 0,
398 &psc->sc_iot, &psc->sc_ioh, NULL, &psc->sc_iosize)) {
399 aprint_error_dev(self, "can't map i/o space\n");
400 return EIO;
401 }
402 psc->sc_mapped_iosize = psc->sc_iosize;
403
404 /* queue space */
405 if (bus_space_subregion(psc->sc_iot, psc->sc_ioh,
406 VIRTIO_CONFIG_QUEUE_NOTIFY, 2, &psc->sc_notify_ioh)) {
407 aprint_error_dev(self, "can't map notify i/o space\n");
408 return EIO;
409 }
410 psc->sc_notify_iosize = 2;
411 psc->sc_notify_iot = psc->sc_iot;
412
413 /* ISR space */
414 if (bus_space_subregion(psc->sc_iot, psc->sc_ioh,
415 VIRTIO_CONFIG_ISR_STATUS, 1, &psc->sc_isr_ioh)) {
416 aprint_error_dev(self, "can't map isr i/o space\n");
417 return EIO;
418 }
419 psc->sc_isr_iosize = 1;
420 psc->sc_isr_iot = psc->sc_iot;
421
422 /* set our version 0.9 ops */
423 sc->sc_ops = &virtio_pci_ops_09;
424 sc->sc_devcfg_swap = VIODEVRW_SWAP_09;
425 return 0;
426 }
427
428
429 #define NMAPREG ((PCI_MAPREG_END - PCI_MAPREG_START) / \
430 sizeof(pcireg_t))
431 static int
432 virtio_pci_attach_10(device_t self, void *aux)
433 {
434 struct virtio_pci_softc * const psc = device_private(self);
435 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
436 struct virtio_softc * const sc = &psc->sc_sc;
437 pci_chipset_tag_t pc = pa->pa_pc;
438 pcitag_t tag = pa->pa_tag;
439
440 struct virtio_pci_cap common, isr, device;
441 struct virtio_pci_notify_cap notify;
442 int have_device_cfg = 0;
443 bus_size_t bars[NMAPREG] = { 0 };
444 int bars_idx[NMAPREG] = { 0 };
445 struct virtio_pci_cap *caps[] = { &common, &isr, &device, ¬ify.cap };
446 int i, j = 0, ret = 0;
447
448 if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_COMMON_CFG,
449 &common, sizeof(common)))
450 return ENODEV;
451 if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_NOTIFY_CFG,
452 ¬ify, sizeof(notify)))
453 return ENODEV;
454 if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_ISR_CFG,
455 &isr, sizeof(isr)))
456 return ENODEV;
457 if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_DEVICE_CFG,
458 &device, sizeof(device)))
459 memset(&device, 0, sizeof(device));
460 else
461 have_device_cfg = 1;
462
463 /*
464 * XXX Maybe there are devices that offer the pci caps but not the
465 * XXX VERSION_1 feature bit? Then we should check the feature bit
466 * XXX here and fall back to 0.9 out if not present.
467 */
468
469 /* Figure out which bars we need to map */
470 for (i = 0; i < __arraycount(caps); i++) {
471 int bar = caps[i]->bar;
472 bus_size_t len = caps[i]->offset + caps[i]->length;
473 if (caps[i]->length == 0)
474 continue;
475 if (bars[bar] < len)
476 bars[bar] = len;
477 }
478
479 for (i = 0; i < __arraycount(bars); i++) {
480 int reg;
481 pcireg_t type;
482 if (bars[i] == 0)
483 continue;
484 reg = PCI_MAPREG_START + i * 4;
485 type = pci_mapreg_type(pc, tag, reg);
486 if (pci_mapreg_map(pa, reg, type, 0,
487 &psc->sc_bars_iot[j], &psc->sc_bars_ioh[j],
488 NULL, &psc->sc_bars_iosize[j])) {
489 aprint_error_dev(self, "can't map bar %u \n", i);
490 ret = EIO;
491 goto err;
492 }
493 aprint_debug_dev(self,
494 "bar[%d]: iot %p, size 0x%" PRIxBUSSIZE "\n",
495 j, psc->sc_bars_iot[j], psc->sc_bars_iosize[j]);
496 bars_idx[i] = j;
497 j++;
498 }
499
500 i = bars_idx[notify.cap.bar];
501 if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i],
502 notify.cap.offset, notify.cap.length,
503 &psc->sc_notify_ioh)) {
504 aprint_error_dev(self, "can't map notify i/o space\n");
505 ret = EIO;
506 goto err;
507 }
508 psc->sc_notify_iosize = notify.cap.length;
509 psc->sc_notify_iot = psc->sc_bars_iot[i];
510 psc->sc_notify_off_multiplier = le32toh(notify.notify_off_multiplier);
511
512 if (have_device_cfg) {
513 i = bars_idx[device.bar];
514 if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i],
515 device.offset, device.length,
516 &sc->sc_devcfg_ioh)) {
517 aprint_error_dev(self, "can't map devcfg i/o space\n");
518 ret = EIO;
519 goto err;
520 }
521 aprint_debug_dev(self,
522 "device.offset = 0x%x, device.length = 0x%x\n",
523 device.offset, device.length);
524 sc->sc_devcfg_iosize = device.length;
525 sc->sc_devcfg_iot = psc->sc_bars_iot[i];
526 }
527
528 i = bars_idx[isr.bar];
529 if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i],
530 isr.offset, isr.length, &psc->sc_isr_ioh)) {
531 aprint_error_dev(self, "can't map isr i/o space\n");
532 ret = EIO;
533 goto err;
534 }
535 psc->sc_isr_iosize = isr.length;
536 psc->sc_isr_iot = psc->sc_bars_iot[i];
537
538 i = bars_idx[common.bar];
539 if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i],
540 common.offset, common.length, &psc->sc_ioh)) {
541 aprint_error_dev(self, "can't map common i/o space\n");
542 ret = EIO;
543 goto err;
544 }
545 psc->sc_iosize = common.length;
546 psc->sc_iot = psc->sc_bars_iot[i];
547 psc->sc_mapped_iosize = psc->sc_bars_iosize[i];
548
549 psc->sc_sc.sc_version_1 = 1;
550
551 /* set our version 1.0 ops */
552 sc->sc_ops = &virtio_pci_ops_10;
553 sc->sc_devcfg_swap = VIODEVRW_SWAP_10;
554 return 0;
555
556 err:
557 /* there is no pci_mapreg_unmap() */
558 return ret;
559 }
560
561 /* v1.0 attach helper */
562 static int
563 virtio_pci_find_cap(struct virtio_pci_softc *psc, int cfg_type, void *buf, int buflen)
564 {
565 device_t self = psc->sc_sc.sc_dev;
566 pci_chipset_tag_t pc = psc->sc_pa.pa_pc;
567 pcitag_t tag = psc->sc_pa.pa_tag;
568 unsigned int offset, i, len;
569 union {
570 pcireg_t reg[8];
571 struct virtio_pci_cap vcap;
572 } *v = buf;
573
574 if (buflen < sizeof(struct virtio_pci_cap))
575 return ERANGE;
576
577 if (!pci_get_capability(pc, tag, PCI_CAP_VENDSPEC, &offset, &v->reg[0]))
578 return ENOENT;
579
580 do {
581 for (i = 0; i < 4; i++)
582 v->reg[i] =
583 le32toh(pci_conf_read(pc, tag, offset + i * 4));
584 if (v->vcap.cfg_type == cfg_type)
585 break;
586 offset = v->vcap.cap_next;
587 } while (offset != 0);
588
589 if (offset == 0)
590 return ENOENT;
591
592 if (v->vcap.cap_len > sizeof(struct virtio_pci_cap)) {
593 len = roundup(v->vcap.cap_len, sizeof(pcireg_t));
594 if (len > buflen) {
595 aprint_error_dev(self, "%s cap too large\n", __func__);
596 return ERANGE;
597 }
598 for (i = 4; i < len / sizeof(pcireg_t); i++)
599 v->reg[i] =
600 le32toh(pci_conf_read(pc, tag, offset + i * 4));
601 }
602
603 /* endian fixup */
604 v->vcap.offset = le32toh(v->vcap.offset);
605 v->vcap.length = le32toh(v->vcap.length);
606 return 0;
607 }
608
609
610 /* -------------------------------------
611 * Version 0.9 support
612 * -------------------------------------*/
613
614 static void
615 virtio_pci_kick_09(struct virtio_softc *sc, uint16_t idx)
616 {
617 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
618
619 bus_space_write_2(psc->sc_notify_iot, psc->sc_notify_ioh, 0, idx);
620 }
621
622 /* only applicable for v 0.9 but also called for 1.0 */
623 static int
624 virtio_pci_adjust_config_region(struct virtio_pci_softc *psc)
625 {
626 struct virtio_softc * const sc = (struct virtio_softc *) psc;
627 device_t self = psc->sc_sc.sc_dev;
628
629 if (psc->sc_sc.sc_version_1)
630 return 0;
631
632 sc->sc_devcfg_iosize = psc->sc_iosize - psc->sc_devcfg_offset;
633 sc->sc_devcfg_iot = psc->sc_iot;
634 if (bus_space_subregion(psc->sc_iot, psc->sc_ioh,
635 psc->sc_devcfg_offset, sc->sc_devcfg_iosize,
636 &sc->sc_devcfg_ioh)) {
637 aprint_error_dev(self, "can't map config i/o space\n");
638 return EIO;
639 }
640
641 return 0;
642 }
643
644 static uint16_t
645 virtio_pci_read_queue_size_09(struct virtio_softc *sc, uint16_t idx)
646 {
647 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
648
649 bus_space_write_2(psc->sc_iot, psc->sc_ioh,
650 VIRTIO_CONFIG_QUEUE_SELECT, idx);
651 return bus_space_read_2(psc->sc_iot, psc->sc_ioh,
652 VIRTIO_CONFIG_QUEUE_SIZE);
653 }
654
655 static void
656 virtio_pci_setup_queue_09(struct virtio_softc *sc, uint16_t idx, uint64_t addr)
657 {
658 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
659
660 bus_space_write_2(psc->sc_iot, psc->sc_ioh,
661 VIRTIO_CONFIG_QUEUE_SELECT, idx);
662 bus_space_write_4(psc->sc_iot, psc->sc_ioh,
663 VIRTIO_CONFIG_QUEUE_ADDRESS, addr / VIRTIO_PAGE_SIZE);
664
665 if (psc->sc_ihs_num > 1) {
666 int vec = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
667 if (sc->sc_child_mq)
668 vec += idx;
669 bus_space_write_2(psc->sc_iot, psc->sc_ioh,
670 VIRTIO_CONFIG_MSI_QUEUE_VECTOR, vec);
671 }
672 }
673
674 static void
675 virtio_pci_set_status_09(struct virtio_softc *sc, int status)
676 {
677 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
678 int old = 0;
679
680 if (status != 0) {
681 old = bus_space_read_1(psc->sc_iot, psc->sc_ioh,
682 VIRTIO_CONFIG_DEVICE_STATUS);
683 }
684 bus_space_write_1(psc->sc_iot, psc->sc_ioh,
685 VIRTIO_CONFIG_DEVICE_STATUS, status|old);
686 }
687
688 static void
689 virtio_pci_negotiate_features_09(struct virtio_softc *sc, uint64_t guest_features)
690 {
691 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
692 uint32_t r;
693
694 r = bus_space_read_4(psc->sc_iot, psc->sc_ioh,
695 VIRTIO_CONFIG_DEVICE_FEATURES);
696
697 r &= guest_features;
698
699 bus_space_write_4(psc->sc_iot, psc->sc_ioh,
700 VIRTIO_CONFIG_GUEST_FEATURES, r);
701
702 sc->sc_active_features = r;
703 }
704
705 /* -------------------------------------
706 * Version 1.0 support
707 * -------------------------------------*/
708
709 static void
710 virtio_pci_kick_10(struct virtio_softc *sc, uint16_t idx)
711 {
712 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
713 unsigned offset = sc->sc_vqs[idx].vq_notify_off *
714 psc->sc_notify_off_multiplier;
715
716 bus_space_write_2(psc->sc_notify_iot, psc->sc_notify_ioh, offset, idx);
717 }
718
719
720 static uint16_t
721 virtio_pci_read_queue_size_10(struct virtio_softc *sc, uint16_t idx)
722 {
723 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
724 bus_space_tag_t iot = psc->sc_iot;
725 bus_space_handle_t ioh = psc->sc_ioh;
726
727 bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SELECT, idx);
728 return bus_space_read_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SIZE);
729 }
730
731 /*
732 * By definition little endian only in v1.0 and 8 byters are allowed to be
733 * written as two 4 byters
734 */
735 #ifndef __HAVE_BUS_SPACE_8
736 static __inline void
737 bus_space_write_8(bus_space_tag_t iot, bus_space_handle_t ioh,
738 bus_size_t offset, uint64_t value)
739 {
740 #if _QUAD_HIGHWORD
741 bus_space_write_4(iot, ioh, offset, BUS_ADDR_LO32(value));
742 bus_space_write_4(iot, ioh, offset + 4, BUS_ADDR_HI32(value));
743 #else
744 bus_space_write_4(iot, ioh, offset, BUS_ADDR_HI32(value));
745 bus_space_write_4(iot, ioh, offset + 4, BUS_ADDR_LO32(value));
746 #endif
747 }
748 #endif
749
750 static void
751 virtio_pci_setup_queue_10(struct virtio_softc *sc, uint16_t idx, uint64_t addr)
752 {
753 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
754 struct virtqueue *vq = &sc->sc_vqs[idx];
755 bus_space_tag_t iot = psc->sc_iot;
756 bus_space_handle_t ioh = psc->sc_ioh;
757 KASSERT(vq->vq_index == idx);
758
759 bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SELECT, vq->vq_index);
760 if (addr == 0) {
761 bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_ENABLE, 0);
762 bus_space_write_8(iot, ioh, VIRTIO_CONFIG1_QUEUE_DESC, 0);
763 bus_space_write_8(iot, ioh, VIRTIO_CONFIG1_QUEUE_AVAIL, 0);
764 bus_space_write_8(iot, ioh, VIRTIO_CONFIG1_QUEUE_USED, 0);
765 } else {
766 bus_space_write_8(iot, ioh,
767 VIRTIO_CONFIG1_QUEUE_DESC, addr);
768 bus_space_write_8(iot, ioh,
769 VIRTIO_CONFIG1_QUEUE_AVAIL, addr + vq->vq_availoffset);
770 bus_space_write_8(iot, ioh,
771 VIRTIO_CONFIG1_QUEUE_USED, addr + vq->vq_usedoffset);
772 bus_space_write_2(iot, ioh,
773 VIRTIO_CONFIG1_QUEUE_ENABLE, 1);
774 vq->vq_notify_off = bus_space_read_2(iot, ioh,
775 VIRTIO_CONFIG1_QUEUE_NOTIFY_OFF);
776 }
777
778 if (psc->sc_ihs_num > 1) {
779 int vec = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
780 if (sc->sc_child_mq)
781 vec += idx;
782 bus_space_write_2(iot, ioh,
783 VIRTIO_CONFIG1_QUEUE_MSIX_VECTOR, vec);
784 }
785 }
786
787 static void
788 virtio_pci_set_status_10(struct virtio_softc *sc, int status)
789 {
790 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
791 bus_space_tag_t iot = psc->sc_iot;
792 bus_space_handle_t ioh = psc->sc_ioh;
793 int old = 0;
794
795 if (status)
796 old = bus_space_read_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS);
797 bus_space_write_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS, status | old);
798 }
799
800 void
801 virtio_pci_negotiate_features_10(struct virtio_softc *sc, uint64_t guest_features)
802 {
803 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
804 device_t self = sc->sc_dev;
805 bus_space_tag_t iot = psc->sc_iot;
806 bus_space_handle_t ioh = psc->sc_ioh;
807 uint64_t host, negotiated, device_status;
808
809 guest_features |= VIRTIO_F_VERSION_1;
810 /* notify on empty is 0.9 only */
811 guest_features &= ~VIRTIO_F_NOTIFY_ON_EMPTY;
812 sc->sc_active_features = 0;
813
814 bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE_SELECT, 0);
815 host = bus_space_read_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE);
816 bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE_SELECT, 1);
817 host |= (uint64_t)
818 bus_space_read_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE) << 32;
819
820 negotiated = host & guest_features;
821
822 bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE_SELECT, 0);
823 bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE,
824 negotiated & 0xffffffff);
825 bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE_SELECT, 1);
826 bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE,
827 negotiated >> 32);
828 virtio_pci_set_status_10(sc, VIRTIO_CONFIG_DEVICE_STATUS_FEATURES_OK);
829
830 device_status = bus_space_read_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS);
831 if ((device_status & VIRTIO_CONFIG_DEVICE_STATUS_FEATURES_OK) == 0) {
832 aprint_error_dev(self, "feature negotiation failed\n");
833 bus_space_write_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS,
834 VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
835 return;
836 }
837
838 if ((negotiated & VIRTIO_F_VERSION_1) == 0) {
839 aprint_error_dev(self, "host rejected version 1\n");
840 bus_space_write_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS,
841 VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
842 return;
843 }
844
845 sc->sc_active_features = negotiated;
846 return;
847 }
848
849 /* -------------------------------------
850 * Read/write device config code
851 * -------------------------------------*/
852
853 static uint8_t
854 virtio_pci_read_device_config_1(struct virtio_softc *vsc, int index)
855 {
856 bus_space_tag_t iot = vsc->sc_devcfg_iot;
857 bus_space_handle_t ioh = vsc->sc_devcfg_ioh;
858
859 return bus_space_read_1(iot, ioh, index);
860 }
861
862 static uint16_t
863 virtio_pci_read_device_config_2(struct virtio_softc *vsc, int index)
864 {
865 bus_space_tag_t iot = vsc->sc_devcfg_iot;
866 bus_space_handle_t ioh = vsc->sc_devcfg_ioh;
867 uint16_t val;
868
869 #if defined(__aarch64__) && BYTE_ORDER == BIG_ENDIAN
870 val = bus_space_read_2(iot, ioh, index);
871 return val;
872 #else
873 val = bus_space_read_stream_2(iot, ioh, index);
874 if (vsc->sc_devcfg_swap)
875 return bswap16(val);
876 return val;
877 #endif
878 }
879
880 static uint32_t
881 virtio_pci_read_device_config_4(struct virtio_softc *vsc, int index)
882 {
883 bus_space_tag_t iot = vsc->sc_devcfg_iot;
884 bus_space_handle_t ioh = vsc->sc_devcfg_ioh;
885 uint32_t val;
886
887 #if defined(__aarch64__) && BYTE_ORDER == BIG_ENDIAN
888 val = bus_space_read_4(iot, ioh, index);
889 return val;
890 #else
891 val = bus_space_read_stream_4(iot, ioh, index);
892 if (vsc->sc_devcfg_swap)
893 return bswap32(val);
894 return val;
895 #endif
896 }
897
898 static uint64_t
899 virtio_pci_read_device_config_8(struct virtio_softc *vsc, int index)
900 {
901 bus_space_tag_t iot = vsc->sc_devcfg_iot;
902 bus_space_handle_t ioh = vsc->sc_devcfg_ioh;
903 uint64_t val, val_h, val_l;
904
905 #if defined(__aarch64__) && BYTE_ORDER == BIG_ENDIAN
906 if (vsc->sc_devcfg_swap) {
907 val_l = bus_space_read_4(iot, ioh, index);
908 val_h = bus_space_read_4(iot, ioh, index + 4);
909 } else {
910 val_h = bus_space_read_4(iot, ioh, index);
911 val_l = bus_space_read_4(iot, ioh, index + 4);
912 }
913 val = val_h << 32;
914 val |= val_l;
915 return val;
916 #elif BYTE_ORDER == BIG_ENDIAN
917 val_h = bus_space_read_stream_4(iot, ioh, index);
918 val_l = bus_space_read_stream_4(iot, ioh, index + 4);
919 val = val_h << 32;
920 val |= val_l;
921 if (vsc->sc_devcfg_swap)
922 return bswap64(val);
923 return val;
924 #else
925 val_l = bus_space_read_4(iot, ioh, index);
926 val_h = bus_space_read_4(iot, ioh, index + 4);
927 val = val_h << 32;
928 val |= val_l;
929
930 return val;
931 #endif
932 }
933
934 static void
935 virtio_pci_write_device_config_1(struct virtio_softc *vsc,
936 int index, uint8_t value)
937 {
938 bus_space_tag_t iot = vsc->sc_devcfg_iot;
939 bus_space_handle_t ioh = vsc->sc_devcfg_ioh;
940
941 bus_space_write_1(iot, ioh, index, value);
942 }
943
944 static void
945 virtio_pci_write_device_config_2(struct virtio_softc *vsc,
946 int index, uint16_t value)
947 {
948 bus_space_tag_t iot = vsc->sc_devcfg_iot;
949 bus_space_handle_t ioh = vsc->sc_devcfg_ioh;
950
951 #if defined(__aarch64__) && BYTE_ORDER == BIG_ENDIAN
952 bus_space_write_2(iot, ioh, index, value);
953 #else
954 if (vsc->sc_devcfg_swap)
955 value = bswap16(value);
956 bus_space_write_stream_2(iot, ioh, index, value);
957 #endif
958 }
959
960 static void
961 virtio_pci_write_device_config_4(struct virtio_softc *vsc,
962 int index, uint32_t value)
963 {
964 bus_space_tag_t iot = vsc->sc_devcfg_iot;
965 bus_space_handle_t ioh = vsc->sc_devcfg_ioh;
966
967 #if defined(__aarch64__) && BYTE_ORDER == BIG_ENDIAN
968 bus_space_write_4(iot, ioh, index, value);
969 #else
970 if (vsc->sc_devcfg_swap)
971 value = bswap32(value);
972 bus_space_write_stream_4(iot, ioh, index, value);
973 #endif
974 }
975
976 static void
977 virtio_pci_write_device_config_8(struct virtio_softc *vsc,
978 int index, uint64_t value)
979 {
980 bus_space_tag_t iot = vsc->sc_devcfg_iot;
981 bus_space_handle_t ioh = vsc->sc_devcfg_ioh;
982 uint64_t val_h, val_l;
983
984 #if defined(__aarch64__) && BYTE_ORDER == BIG_ENDIAN
985 val_l = value & 0xffffffff;
986 val_h = value >> 32;
987 if (vsc->sc_devcfg_swap) {
988 bus_space_write_4(iot, ioh, index, val_l);
989 bus_space_write_4(iot, ioh, index + 4, val_h);
990 } else {
991 bus_space_write_4(iot, ioh, index, val_h);
992 bus_space_write_4(iot, ioh, index + 4, val_l);
993 }
994 #elif BYTE_ORDER == BIG_ENDIAN
995 if (vsc->sc_devcfg_swap)
996 value = bswap64(value);
997 val_l = value & 0xffffffff;
998 val_h = value >> 32;
999
1000 bus_space_write_stream_4(iot, ioh, index, val_h);
1001 bus_space_write_stream_4(iot, ioh, index + 4, val_l);
1002 #else
1003 val_l = value & 0xffffffff;
1004 val_h = value >> 32;
1005 bus_space_write_stream_4(iot, ioh, index, val_l);
1006 bus_space_write_stream_4(iot, ioh, index + 4, val_h);
1007 #endif
1008 }
1009
1010 /* -------------------------------------
1011 * Generic PCI interrupt code
1012 * -------------------------------------*/
1013
1014 static int
1015 virtio_pci_setup_msix_vectors_10(struct virtio_softc *sc)
1016 {
1017 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
1018 device_t self = sc->sc_dev;
1019 bus_space_tag_t iot = psc->sc_iot;
1020 bus_space_handle_t ioh = psc->sc_ioh;
1021 int vector, ret, qid;
1022
1023 vector = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
1024 bus_space_write_2(iot, ioh,
1025 VIRTIO_CONFIG1_CONFIG_MSIX_VECTOR, vector);
1026 ret = bus_space_read_2(iot, ioh, VIRTIO_CONFIG1_CONFIG_MSIX_VECTOR);
1027 if (ret != vector) {
1028 aprint_error_dev(self, "can't set config msix vector\n");
1029 return -1;
1030 }
1031
1032 for (qid = 0; qid < sc->sc_nvqs; qid++) {
1033 vector = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
1034
1035 if (sc->sc_child_mq)
1036 vector += qid;
1037 bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SELECT, qid);
1038 bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_MSIX_VECTOR,
1039 vector);
1040 ret = bus_space_read_2(iot, ioh,
1041 VIRTIO_CONFIG1_QUEUE_MSIX_VECTOR);
1042 if (ret != vector) {
1043 aprint_error_dev(self, "can't set queue %d "
1044 "msix vector\n", qid);
1045 return -1;
1046 }
1047 }
1048
1049 return 0;
1050 }
1051
1052 static int
1053 virtio_pci_setup_msix_vectors_09(struct virtio_softc *sc)
1054 {
1055 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
1056 device_t self = sc->sc_dev;
1057 int offset, vector, ret, qid;
1058
1059 offset = VIRTIO_CONFIG_MSI_CONFIG_VECTOR;
1060 vector = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
1061
1062 bus_space_write_2(psc->sc_iot, psc->sc_ioh, offset, vector);
1063 ret = bus_space_read_2(psc->sc_iot, psc->sc_ioh, offset);
1064 aprint_debug_dev(sc->sc_dev, "expected=%d, actual=%d\n",
1065 vector, ret);
1066 if (ret != vector) {
1067 aprint_error_dev(self, "can't set config msix vector\n");
1068 return -1;
1069 }
1070
1071 for (qid = 0; qid < sc->sc_nvqs; qid++) {
1072 offset = VIRTIO_CONFIG_QUEUE_SELECT;
1073 bus_space_write_2(psc->sc_iot, psc->sc_ioh, offset, qid);
1074
1075 offset = VIRTIO_CONFIG_MSI_QUEUE_VECTOR;
1076 vector = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
1077
1078 if (sc->sc_child_mq)
1079 vector += qid;
1080
1081 bus_space_write_2(psc->sc_iot, psc->sc_ioh, offset, vector);
1082 ret = bus_space_read_2(psc->sc_iot, psc->sc_ioh, offset);
1083 aprint_debug_dev(sc->sc_dev, "expected=%d, actual=%d\n",
1084 vector, ret);
1085 if (ret != vector) {
1086 aprint_error_dev(self, "can't set queue %d "
1087 "msix vector\n", qid);
1088 return -1;
1089 }
1090 }
1091
1092 return 0;
1093 }
1094
1095 static int
1096 virtio_pci_setup_msix_interrupts(struct virtio_softc *sc,
1097 struct pci_attach_args *pa)
1098 {
1099 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
1100 device_t self = sc->sc_dev;
1101 pci_chipset_tag_t pc = pa->pa_pc;
1102 struct virtqueue *vq;
1103 char intrbuf[PCI_INTRSTR_LEN];
1104 char intr_xname[INTRDEVNAMEBUF];
1105 char const *intrstr;
1106 int idx, qid, n;
1107 int ret;
1108
1109 idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
1110 if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE)
1111 pci_intr_setattr(pc, &psc->sc_ihp[idx], PCI_INTR_MPSAFE, true);
1112
1113 snprintf(intr_xname, sizeof(intr_xname), "%s config",
1114 device_xname(sc->sc_dev));
1115
1116 psc->sc_ihs[idx] = pci_intr_establish_xname(pc, psc->sc_ihp[idx],
1117 sc->sc_ipl, virtio_pci_msix_config_intr, sc, intr_xname);
1118 if (psc->sc_ihs[idx] == NULL) {
1119 aprint_error_dev(self, "couldn't establish MSI-X for config\n");
1120 goto error;
1121 }
1122
1123 idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
1124 if (sc->sc_child_mq) {
1125 for (qid = 0; qid < sc->sc_nvqs; qid++) {
1126 n = idx + qid;
1127 vq = &sc->sc_vqs[qid];
1128
1129 snprintf(intr_xname, sizeof(intr_xname), "%s vq#%d",
1130 device_xname(sc->sc_dev), qid);
1131
1132 if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE) {
1133 pci_intr_setattr(pc, &psc->sc_ihp[n],
1134 PCI_INTR_MPSAFE, true);
1135 }
1136
1137 psc->sc_ihs[n] = pci_intr_establish_xname(pc, psc->sc_ihp[n],
1138 sc->sc_ipl, vq->vq_intrhand, vq->vq_intrhand_arg, intr_xname);
1139 if (psc->sc_ihs[n] == NULL) {
1140 aprint_error_dev(self, "couldn't establish MSI-X for a vq\n");
1141 goto error;
1142 }
1143 }
1144 } else {
1145 if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE)
1146 pci_intr_setattr(pc, &psc->sc_ihp[idx], PCI_INTR_MPSAFE, true);
1147
1148 snprintf(intr_xname, sizeof(intr_xname), "%s queues",
1149 device_xname(sc->sc_dev));
1150 psc->sc_ihs[idx] = pci_intr_establish_xname(pc, psc->sc_ihp[idx],
1151 sc->sc_ipl, virtio_pci_msix_queue_intr, sc, intr_xname);
1152 if (psc->sc_ihs[idx] == NULL) {
1153 aprint_error_dev(self, "couldn't establish MSI-X for queues\n");
1154 goto error;
1155 }
1156 }
1157
1158 if (sc->sc_version_1) {
1159 ret = virtio_pci_setup_msix_vectors_10(sc);
1160 } else {
1161 ret = virtio_pci_setup_msix_vectors_09(sc);
1162 }
1163 if (ret) {
1164 aprint_error_dev(self, "couldn't setup MSI-X vectors\n");
1165 goto error;
1166 }
1167
1168 idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
1169 intrstr = pci_intr_string(pc, psc->sc_ihp[idx], intrbuf, sizeof(intrbuf));
1170 aprint_normal_dev(self, "config interrupting at %s\n", intrstr);
1171 idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
1172 if (sc->sc_child_mq) {
1173 kcpuset_t *affinity;
1174 int affinity_to, r;
1175
1176 kcpuset_create(&affinity, false);
1177
1178 for (qid = 0; qid < sc->sc_nvqs; qid++) {
1179 n = idx + qid;
1180 affinity_to = (qid / 2) % ncpu;
1181
1182 intrstr = pci_intr_string(pc, psc->sc_ihp[n],
1183 intrbuf, sizeof(intrbuf));
1184
1185 kcpuset_zero(affinity);
1186 kcpuset_set(affinity, affinity_to);
1187 r = interrupt_distribute(psc->sc_ihs[n], affinity, NULL);
1188 if (r == 0) {
1189 aprint_normal_dev(self,
1190 "for vq #%d interrupting at %s affinity to %u\n",
1191 qid, intrstr, affinity_to);
1192 } else {
1193 aprint_normal_dev(self,
1194 "for vq #%d interrupting at %s\n",
1195 qid, intrstr);
1196 }
1197 }
1198
1199 kcpuset_destroy(affinity);
1200 } else {
1201 intrstr = pci_intr_string(pc, psc->sc_ihp[idx], intrbuf, sizeof(intrbuf));
1202 aprint_normal_dev(self, "queues interrupting at %s\n", intrstr);
1203 }
1204
1205 return 0;
1206
1207 error:
1208 idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
1209 if (psc->sc_ihs[idx] != NULL)
1210 pci_intr_disestablish(psc->sc_pa.pa_pc, psc->sc_ihs[idx]);
1211 idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
1212 if (sc->sc_child_mq) {
1213 for (qid = 0; qid < sc->sc_nvqs; qid++) {
1214 n = idx + qid;
1215 if (psc->sc_ihs[n] == NULL)
1216 continue;
1217 pci_intr_disestablish(psc->sc_pa.pa_pc, psc->sc_ihs[n]);
1218 }
1219
1220 } else {
1221 if (psc->sc_ihs[idx] != NULL)
1222 pci_intr_disestablish(psc->sc_pa.pa_pc, psc->sc_ihs[idx]);
1223 }
1224
1225 return -1;
1226 }
1227
1228 static int
1229 virtio_pci_setup_intx_interrupt(struct virtio_softc *sc,
1230 struct pci_attach_args *pa)
1231 {
1232 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
1233 device_t self = sc->sc_dev;
1234 pci_chipset_tag_t pc = pa->pa_pc;
1235 char intrbuf[PCI_INTRSTR_LEN];
1236 char const *intrstr;
1237
1238 if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE)
1239 pci_intr_setattr(pc, &psc->sc_ihp[0], PCI_INTR_MPSAFE, true);
1240
1241 psc->sc_ihs[0] = pci_intr_establish_xname(pc, psc->sc_ihp[0],
1242 sc->sc_ipl, virtio_pci_intr, sc, device_xname(sc->sc_dev));
1243 if (psc->sc_ihs[0] == NULL) {
1244 aprint_error_dev(self, "couldn't establish INTx\n");
1245 return -1;
1246 }
1247
1248 intrstr = pci_intr_string(pc, psc->sc_ihp[0], intrbuf, sizeof(intrbuf));
1249 aprint_normal_dev(self, "interrupting at %s\n", intrstr);
1250
1251 return 0;
1252 }
1253
1254 static int
1255 virtio_pci_setup_interrupts(struct virtio_softc *sc)
1256 {
1257 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
1258 device_t self = sc->sc_dev;
1259 pci_chipset_tag_t pc = psc->sc_pa.pa_pc;
1260 pcitag_t tag = psc->sc_pa.pa_tag;
1261 int error;
1262 int nmsix;
1263 int off;
1264 int counts[PCI_INTR_TYPE_SIZE];
1265 pci_intr_type_t max_type;
1266 pcireg_t ctl;
1267
1268 nmsix = pci_msix_count(psc->sc_pa.pa_pc, psc->sc_pa.pa_tag);
1269 aprint_debug_dev(self, "pci_msix_count=%d\n", nmsix);
1270
1271 /* We need at least two: one for config and the other for queues */
1272 if ((sc->sc_flags & VIRTIO_F_INTR_MSIX) == 0 || nmsix < 2) {
1273 /* Try INTx only */
1274 max_type = PCI_INTR_TYPE_INTX;
1275 counts[PCI_INTR_TYPE_INTX] = 1;
1276 } else {
1277 /* Try MSI-X first and INTx second */
1278 if (sc->sc_nvqs + VIRTIO_MSIX_QUEUE_VECTOR_INDEX <= nmsix) {
1279 nmsix = sc->sc_nvqs + VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
1280 } else {
1281 sc->sc_child_mq = false;
1282 }
1283
1284 if (sc->sc_child_mq == false) {
1285 nmsix = 2;
1286 }
1287
1288 max_type = PCI_INTR_TYPE_MSIX;
1289 counts[PCI_INTR_TYPE_MSIX] = nmsix;
1290 counts[PCI_INTR_TYPE_MSI] = 0;
1291 counts[PCI_INTR_TYPE_INTX] = 1;
1292 }
1293
1294 retry:
1295 error = pci_intr_alloc(&psc->sc_pa, &psc->sc_ihp, counts, max_type);
1296 if (error != 0) {
1297 aprint_error_dev(self, "couldn't map interrupt\n");
1298 return -1;
1299 }
1300
1301 if (pci_intr_type(pc, psc->sc_ihp[0]) == PCI_INTR_TYPE_MSIX) {
1302 psc->sc_ihs = kmem_zalloc(sizeof(*psc->sc_ihs) * nmsix,
1303 KM_SLEEP);
1304
1305 error = virtio_pci_setup_msix_interrupts(sc, &psc->sc_pa);
1306 if (error != 0) {
1307 kmem_free(psc->sc_ihs, sizeof(*psc->sc_ihs) * nmsix);
1308 pci_intr_release(pc, psc->sc_ihp, nmsix);
1309
1310 /* Retry INTx */
1311 max_type = PCI_INTR_TYPE_INTX;
1312 counts[PCI_INTR_TYPE_INTX] = 1;
1313 goto retry;
1314 }
1315
1316 psc->sc_ihs_num = nmsix;
1317 psc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_MSI;
1318 virtio_pci_adjust_config_region(psc);
1319 } else if (pci_intr_type(pc, psc->sc_ihp[0]) == PCI_INTR_TYPE_INTX) {
1320 psc->sc_ihs = kmem_zalloc(sizeof(*psc->sc_ihs) * 1,
1321 KM_SLEEP);
1322
1323 error = virtio_pci_setup_intx_interrupt(sc, &psc->sc_pa);
1324 if (error != 0) {
1325 kmem_free(psc->sc_ihs, sizeof(*psc->sc_ihs) * 1);
1326 pci_intr_release(pc, psc->sc_ihp, 1);
1327 return -1;
1328 }
1329
1330 psc->sc_ihs_num = 1;
1331 psc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
1332 virtio_pci_adjust_config_region(psc);
1333
1334 error = pci_get_capability(pc, tag, PCI_CAP_MSIX, &off, NULL);
1335 if (error != 0) {
1336 ctl = pci_conf_read(pc, tag, off + PCI_MSIX_CTL);
1337 ctl &= ~PCI_MSIX_CTL_ENABLE;
1338 pci_conf_write(pc, tag, off + PCI_MSIX_CTL, ctl);
1339 }
1340 }
1341
1342 return 0;
1343 }
1344
1345 static void
1346 virtio_pci_free_interrupts(struct virtio_softc *sc)
1347 {
1348 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
1349
1350 for (int i = 0; i < psc->sc_ihs_num; i++) {
1351 if (psc->sc_ihs[i] == NULL)
1352 continue;
1353 pci_intr_disestablish(psc->sc_pa.pa_pc, psc->sc_ihs[i]);
1354 psc->sc_ihs[i] = NULL;
1355 }
1356
1357 if (psc->sc_ihs_num > 0)
1358 pci_intr_release(psc->sc_pa.pa_pc, psc->sc_ihp, psc->sc_ihs_num);
1359
1360 if (psc->sc_ihs != NULL) {
1361 kmem_free(psc->sc_ihs, sizeof(*psc->sc_ihs) * psc->sc_ihs_num);
1362 psc->sc_ihs = NULL;
1363 }
1364 psc->sc_ihs_num = 0;
1365 }
1366
1367 /*
1368 * Interrupt handler.
1369 */
1370 static int
1371 virtio_pci_intr(void *arg)
1372 {
1373 struct virtio_softc *sc = arg;
1374 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
1375 int isr, r = 0;
1376
1377 /* check and ack the interrupt */
1378 isr = bus_space_read_1(psc->sc_isr_iot, psc->sc_isr_ioh, 0);
1379 if (isr == 0)
1380 return 0;
1381 if ((isr & VIRTIO_CONFIG_ISR_CONFIG_CHANGE) &&
1382 (sc->sc_config_change != NULL))
1383 r = (sc->sc_config_change)(sc);
1384 if (sc->sc_intrhand != NULL) {
1385 if (sc->sc_soft_ih != NULL)
1386 softint_schedule(sc->sc_soft_ih);
1387 else
1388 r |= (sc->sc_intrhand)(sc);
1389 }
1390
1391 return r;
1392 }
1393
1394 static int
1395 virtio_pci_msix_queue_intr(void *arg)
1396 {
1397 struct virtio_softc *sc = arg;
1398 int r = 0;
1399
1400 if (sc->sc_intrhand != NULL) {
1401 if (sc->sc_soft_ih != NULL)
1402 softint_schedule(sc->sc_soft_ih);
1403 else
1404 r |= (sc->sc_intrhand)(sc);
1405 }
1406
1407 return r;
1408 }
1409
1410 static int
1411 virtio_pci_msix_config_intr(void *arg)
1412 {
1413 struct virtio_softc *sc = arg;
1414 int r = 0;
1415
1416 if (sc->sc_config_change != NULL)
1417 r = (sc->sc_config_change)(sc);
1418 return r;
1419 }
1420
1421 MODULE(MODULE_CLASS_DRIVER, virtio_pci, "pci,virtio");
1422
1423 #ifdef _MODULE
1424 #include "ioconf.c"
1425 #endif
1426
1427 static int
1428 virtio_pci_modcmd(modcmd_t cmd, void *opaque)
1429 {
1430 int error = 0;
1431
1432 #ifdef _MODULE
1433 switch (cmd) {
1434 case MODULE_CMD_INIT:
1435 error = config_init_component(cfdriver_ioconf_virtio_pci,
1436 cfattach_ioconf_virtio_pci, cfdata_ioconf_virtio_pci);
1437 break;
1438 case MODULE_CMD_FINI:
1439 error = config_fini_component(cfdriver_ioconf_virtio_pci,
1440 cfattach_ioconf_virtio_pci, cfdata_ioconf_virtio_pci);
1441 break;
1442 default:
1443 error = ENOTTY;
1444 break;
1445 }
1446 #endif
1447
1448 return error;
1449 }
1450