virtio_pci.c revision 1.16 1 /* $NetBSD: virtio_pci.c,v 1.16 2021/01/20 21:59:48 reinoud Exp $ */
2
3 /*
4 * Copyright (c) 2020 The NetBSD Foundation, Inc.
5 * Copyright (c) 2012 Stefan Fritsch.
6 * Copyright (c) 2010 Minoura Makoto.
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: virtio_pci.c,v 1.16 2021/01/20 21:59:48 reinoud Exp $");
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/kmem.h>
36 #include <sys/module.h>
37 #include <sys/interrupt.h>
38
39 #include <sys/device.h>
40
41 #include <dev/pci/pcidevs.h>
42 #include <dev/pci/pcireg.h>
43 #include <dev/pci/pcivar.h>
44
45 #include <dev/pci/virtioreg.h> /* XXX: move to non-pci */
46 #include <dev/pci/virtio_pcireg.h>
47
48 #define VIRTIO_PRIVATE
49 #include <dev/pci/virtiovar.h> /* XXX: move to non-pci */
50
51
52 static int virtio_pci_match(device_t, cfdata_t, void *);
53 static void virtio_pci_attach(device_t, device_t, void *);
54 static int virtio_pci_rescan(device_t, const char *, const int *);
55 static int virtio_pci_detach(device_t, int);
56
57 struct virtio_pci_softc {
58 struct virtio_softc sc_sc;
59
60 /* IO space */
61 bus_space_tag_t sc_iot;
62 bus_space_handle_t sc_ioh;
63 bus_size_t sc_iosize;
64 bus_size_t sc_mapped_iosize;
65
66 /* BARs */
67 bus_space_tag_t sc_bars_iot[4];
68 bus_space_handle_t sc_bars_ioh[4];
69 bus_size_t sc_bars_iosize[4];
70
71 /* notify space */
72 bus_space_tag_t sc_notify_iot;
73 bus_space_handle_t sc_notify_ioh;
74 bus_size_t sc_notify_iosize;
75 uint32_t sc_notify_off_multiplier;
76
77 /* isr space */
78 bus_space_tag_t sc_isr_iot;
79 bus_space_handle_t sc_isr_ioh;
80 bus_size_t sc_isr_iosize;
81
82 /* generic */
83 struct pci_attach_args sc_pa;
84 pci_intr_handle_t *sc_ihp;
85 void **sc_ihs;
86 int sc_ihs_num;
87 int sc_devcfg_offset; /* for 0.9 */
88 };
89
90 static int virtio_pci_attach_09(device_t, void *);
91 static void virtio_pci_kick_09(struct virtio_softc *, uint16_t);
92 static uint16_t virtio_pci_read_queue_size_09(struct virtio_softc *, uint16_t);
93 static void virtio_pci_setup_queue_09(struct virtio_softc *, uint16_t, uint64_t);
94 static void virtio_pci_set_status_09(struct virtio_softc *, int);
95 static void virtio_pci_negotiate_features_09(struct virtio_softc *, uint64_t);
96
97 static int virtio_pci_attach_10(device_t, void *);
98 static void virtio_pci_kick_10(struct virtio_softc *, uint16_t);
99 static uint16_t virtio_pci_read_queue_size_10(struct virtio_softc *, uint16_t);
100 static void virtio_pci_setup_queue_10(struct virtio_softc *, uint16_t, uint64_t);
101 static void virtio_pci_set_status_10(struct virtio_softc *, int);
102 static void virtio_pci_negotiate_features_10(struct virtio_softc *, uint64_t);
103 static int virtio_pci_find_cap(struct virtio_pci_softc *psc, int cfg_type, void *buf, int buflen);
104
105 static uint8_t virtio_pci_read_device_config_1(struct virtio_softc *, int);
106 static uint16_t virtio_pci_read_device_config_2(struct virtio_softc *, int);
107 static uint32_t virtio_pci_read_device_config_4(struct virtio_softc *, int);
108 static uint64_t virtio_pci_read_device_config_8(struct virtio_softc *, int);
109 static void virtio_pci_write_device_config_1(struct virtio_softc *, int, uint8_t);
110 static void virtio_pci_write_device_config_2(struct virtio_softc *, int, uint16_t);
111 static void virtio_pci_write_device_config_4(struct virtio_softc *, int, uint32_t);
112 static void virtio_pci_write_device_config_8(struct virtio_softc *, int, uint64_t);
113
114 static int virtio_pci_setup_interrupts(struct virtio_softc *);
115 static void virtio_pci_free_interrupts(struct virtio_softc *);
116 static int virtio_pci_adjust_config_region(struct virtio_pci_softc *psc);
117 static int virtio_pci_intr(void *arg);
118 static int virtio_pci_msix_queue_intr(void *);
119 static int virtio_pci_msix_config_intr(void *);
120 static int virtio_pci_setup_msix_vectors_09(struct virtio_softc *);
121 static int virtio_pci_setup_msix_vectors_10(struct virtio_softc *);
122 static int virtio_pci_setup_msix_interrupts(struct virtio_softc *,
123 struct pci_attach_args *);
124 static int virtio_pci_setup_intx_interrupt(struct virtio_softc *,
125 struct pci_attach_args *);
126
127 #define VIRTIO_MSIX_CONFIG_VECTOR_INDEX 0
128 #define VIRTIO_MSIX_QUEUE_VECTOR_INDEX 1
129
130 #if 0
131 /* we use the legacy virtio spec, so the PCI registers are host native
132 * byte order, not PCI (i.e. LE) byte order */
133 #if BYTE_ORDER == BIG_ENDIAN
134 #define REG_HI_OFF 0
135 #define REG_LO_OFF 4
136 #ifndef __BUS_SPACE_HAS_STREAM_METHODS
137 #define bus_space_read_stream_1 bus_space_read_1
138 #define bus_space_write_stream_1 bus_space_write_1
139 static inline uint16_t
140 bus_space_read_stream_2(bus_space_tag_t t, bus_space_handle_t h,
141 bus_size_t o)
142 {
143 return le16toh(bus_space_read_2(t, h, o));
144 }
145 static inline void
146 bus_space_write_stream_2(bus_space_tag_t t, bus_space_handle_t h,
147 bus_size_t o, uint16_t v)
148 {
149 bus_space_write_2(t, h, o, htole16(v));
150 }
151 static inline uint32_t
152 bus_space_read_stream_4(bus_space_tag_t t, bus_space_handle_t h,
153 bus_size_t o)
154 {
155 return le32toh(bus_space_read_4(t, h, o));
156 }
157 static inline void
158 bus_space_write_stream_4(bus_space_tag_t t, bus_space_handle_t h,
159 bus_size_t o, uint32_t v)
160 {
161 bus_space_write_4(t, h, o, htole32(v));
162 }
163 #endif
164 #else
165 #define REG_HI_OFF 4
166 #define REG_LO_OFF 0
167 #ifndef __BUS_SPACE_HAS_STREAM_METHODS
168 #define bus_space_read_stream_1 bus_space_read_1
169 #define bus_space_read_stream_2 bus_space_read_2
170 #define bus_space_read_stream_4 bus_space_read_4
171 #define bus_space_write_stream_1 bus_space_write_1
172 #define bus_space_write_stream_2 bus_space_write_2
173 #define bus_space_write_stream_4 bus_space_write_4
174 #endif
175 #endif
176 #endif
177
178
179 #if BYTE_ORDER == LITTLE_ENDIAN
180 # define VIODEVRW_SWAP_09 false
181 # define VIODEVRW_SWAP_10 false
182 #else /* big endian */
183 # define VIODEVRW_SWAP_09 false
184 # define VIODEVRW_SWAP_10 true
185 #endif
186
187
188 CFATTACH_DECL3_NEW(virtio_pci, sizeof(struct virtio_pci_softc),
189 virtio_pci_match, virtio_pci_attach, virtio_pci_detach, NULL,
190 virtio_pci_rescan, NULL, DVF_DETACH_SHUTDOWN);
191
192 static const struct virtio_ops virtio_pci_ops_09 = {
193 .kick = virtio_pci_kick_09,
194
195 .read_dev_cfg_1 = virtio_pci_read_device_config_1,
196 .read_dev_cfg_2 = virtio_pci_read_device_config_2,
197 .read_dev_cfg_4 = virtio_pci_read_device_config_4,
198 .read_dev_cfg_8 = virtio_pci_read_device_config_8,
199 .write_dev_cfg_1 = virtio_pci_write_device_config_1,
200 .write_dev_cfg_2 = virtio_pci_write_device_config_2,
201 .write_dev_cfg_4 = virtio_pci_write_device_config_4,
202 .write_dev_cfg_8 = virtio_pci_write_device_config_8,
203
204 .read_queue_size = virtio_pci_read_queue_size_09,
205 .setup_queue = virtio_pci_setup_queue_09,
206 .set_status = virtio_pci_set_status_09,
207 .neg_features = virtio_pci_negotiate_features_09,
208 .setup_interrupts = virtio_pci_setup_interrupts,
209 .free_interrupts = virtio_pci_free_interrupts,
210 };
211
212 static const struct virtio_ops virtio_pci_ops_10 = {
213 .kick = virtio_pci_kick_10,
214
215 .read_dev_cfg_1 = virtio_pci_read_device_config_1,
216 .read_dev_cfg_2 = virtio_pci_read_device_config_2,
217 .read_dev_cfg_4 = virtio_pci_read_device_config_4,
218 .read_dev_cfg_8 = virtio_pci_read_device_config_8,
219 .write_dev_cfg_1 = virtio_pci_write_device_config_1,
220 .write_dev_cfg_2 = virtio_pci_write_device_config_2,
221 .write_dev_cfg_4 = virtio_pci_write_device_config_4,
222 .write_dev_cfg_8 = virtio_pci_write_device_config_8,
223
224 .read_queue_size = virtio_pci_read_queue_size_10,
225 .setup_queue = virtio_pci_setup_queue_10,
226 .set_status = virtio_pci_set_status_10,
227 .neg_features = virtio_pci_negotiate_features_10,
228 .setup_interrupts = virtio_pci_setup_interrupts,
229 .free_interrupts = virtio_pci_free_interrupts,
230 };
231
232 static int
233 virtio_pci_match(device_t parent, cfdata_t match, void *aux)
234 {
235 struct pci_attach_args *pa;
236
237 pa = (struct pci_attach_args *)aux;
238 switch (PCI_VENDOR(pa->pa_id)) {
239 case PCI_VENDOR_QUMRANET:
240 if (((PCI_PRODUCT_QUMRANET_VIRTIO_1000 <=
241 PCI_PRODUCT(pa->pa_id)) &&
242 (PCI_PRODUCT(pa->pa_id) <=
243 PCI_PRODUCT_QUMRANET_VIRTIO_103F)) &&
244 PCI_REVISION(pa->pa_class) == 0)
245 return 1;
246 if (((PCI_PRODUCT_QUMRANET_VIRTIO_1040 <=
247 PCI_PRODUCT(pa->pa_id)) &&
248 (PCI_PRODUCT(pa->pa_id) <=
249 PCI_PRODUCT_QUMRANET_VIRTIO_107F)) &&
250 PCI_REVISION(pa->pa_class) == 1)
251 return 1;
252 break;
253 }
254
255 return 0;
256 }
257
258 static void
259 virtio_pci_attach(device_t parent, device_t self, void *aux)
260 {
261 struct virtio_pci_softc * const psc = device_private(self);
262 struct virtio_softc * const sc = &psc->sc_sc;
263 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
264 pci_chipset_tag_t pc = pa->pa_pc;
265 pcitag_t tag = pa->pa_tag;
266 int revision;
267 int ret;
268 pcireg_t id;
269 pcireg_t csr;
270
271 revision = PCI_REVISION(pa->pa_class);
272 switch (revision) {
273 case 0:
274 /* subsystem ID shows what I am */
275 id = PCI_SUBSYS_ID(pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG));
276 break;
277 case 1:
278 /* pci product number shows what I am */
279 id = PCI_PRODUCT(pa->pa_id) - PCI_PRODUCT_QUMRANET_VIRTIO_1040;
280 break;
281 default:
282 aprint_normal(": unknown revision 0x%02x; giving up\n",
283 revision);
284 return;
285 }
286
287 aprint_normal("\n");
288 aprint_naive("\n");
289 virtio_print_device_type(self, id, revision);
290
291 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
292 csr |= PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_IO_ENABLE;
293 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
294
295 sc->sc_dev = self;
296 psc->sc_pa = *pa;
297 psc->sc_iot = pa->pa_iot;
298
299 sc->sc_dmat = pa->pa_dmat;
300 if (pci_dma64_available(pa))
301 sc->sc_dmat = pa->pa_dmat64;
302
303 /* attach is dependent on revision */
304 ret = 0;
305 if (revision == 1) {
306 /* try to attach 1.0 */
307 ret = virtio_pci_attach_10(self, aux);
308 }
309 if (ret == 0 && revision == 0) {
310 /* revision 0 means 0.9 only or both 0.9 and 1.0 */
311 ret = virtio_pci_attach_09(self, aux);
312 }
313 if (ret) {
314 aprint_error_dev(self, "cannot attach (%d)\n", ret);
315 return;
316 }
317 KASSERT(sc->sc_ops);
318
319 /* preset config region */
320 psc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
321 if (virtio_pci_adjust_config_region(psc))
322 return;
323
324 /* generic */
325 virtio_device_reset(sc);
326 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_ACK);
327 virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER);
328
329 sc->sc_childdevid = id;
330 sc->sc_child = NULL;
331 virtio_pci_rescan(self, "virtio", 0);
332 return;
333 }
334
335 /* ARGSUSED */
336 static int
337 virtio_pci_rescan(device_t self, const char *attr, const int *scan_flags)
338 {
339 struct virtio_pci_softc * const psc = device_private(self);
340 struct virtio_softc * const sc = &psc->sc_sc;
341 struct virtio_attach_args va;
342
343 if (sc->sc_child) /* Child already attached? */
344 return 0;
345
346 memset(&va, 0, sizeof(va));
347 va.sc_childdevid = sc->sc_childdevid;
348
349 config_found_ia(self, attr, &va, NULL);
350
351 if (virtio_attach_failed(sc))
352 return 0;
353
354 return 0;
355 }
356
357
358 static int
359 virtio_pci_detach(device_t self, int flags)
360 {
361 struct virtio_pci_softc * const psc = device_private(self);
362 struct virtio_softc * const sc = &psc->sc_sc;
363 int r;
364
365 if (sc->sc_child != NULL) {
366 r = config_detach(sc->sc_child, flags);
367 if (r)
368 return r;
369 }
370
371 /* Check that child detached properly */
372 KASSERT(sc->sc_child == NULL);
373 KASSERT(sc->sc_vqs == NULL);
374 KASSERT(psc->sc_ihs_num == 0);
375
376 if (psc->sc_iosize)
377 bus_space_unmap(psc->sc_iot, psc->sc_ioh,
378 psc->sc_mapped_iosize);
379 psc->sc_iosize = 0;
380
381 return 0;
382 }
383
384
385 static int
386 virtio_pci_attach_09(device_t self, void *aux)
387 //struct virtio_pci_softc *psc, struct pci_attach_args *pa)
388 {
389 struct virtio_pci_softc * const psc = device_private(self);
390 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
391 struct virtio_softc * const sc = &psc->sc_sc;
392 // pci_chipset_tag_t pc = pa->pa_pc;
393 // pcitag_t tag = pa->pa_tag;
394
395 /* complete IO region */
396 if (pci_mapreg_map(pa, PCI_MAPREG_START, PCI_MAPREG_TYPE_IO, 0,
397 &psc->sc_iot, &psc->sc_ioh, NULL, &psc->sc_iosize)) {
398 aprint_error_dev(self, "can't map i/o space\n");
399 return EIO;
400 }
401 psc->sc_mapped_iosize = psc->sc_iosize;
402
403 /* queue space */
404 if (bus_space_subregion(psc->sc_iot, psc->sc_ioh,
405 VIRTIO_CONFIG_QUEUE_NOTIFY, 2, &psc->sc_notify_ioh)) {
406 aprint_error_dev(self, "can't map notify i/o space\n");
407 return EIO;
408 }
409 psc->sc_notify_iosize = 2;
410 psc->sc_notify_iot = psc->sc_iot;
411
412 /* ISR space */
413 if (bus_space_subregion(psc->sc_iot, psc->sc_ioh,
414 VIRTIO_CONFIG_ISR_STATUS, 1, &psc->sc_isr_ioh)) {
415 aprint_error_dev(self, "can't map isr i/o space\n");
416 return EIO;
417 }
418 psc->sc_isr_iosize = 1;
419 psc->sc_isr_iot = psc->sc_iot;
420
421 /* set our version 0.9 ops */
422 sc->sc_ops = &virtio_pci_ops_09;
423 sc->sc_devcfg_swap = VIODEVRW_SWAP_09;
424 return 0;
425 }
426
427
428 #define NMAPREG ((PCI_MAPREG_END - PCI_MAPREG_START) / \
429 sizeof(pcireg_t))
430 static int
431 virtio_pci_attach_10(device_t self, void *aux)
432 {
433 struct virtio_pci_softc * const psc = device_private(self);
434 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
435 struct virtio_softc * const sc = &psc->sc_sc;
436 pci_chipset_tag_t pc = pa->pa_pc;
437 pcitag_t tag = pa->pa_tag;
438
439 struct virtio_pci_cap common, isr, device;
440 struct virtio_pci_notify_cap notify;
441 int have_device_cfg = 0;
442 bus_size_t bars[NMAPREG] = { 0 };
443 int bars_idx[NMAPREG] = { 0 };
444 struct virtio_pci_cap *caps[] = { &common, &isr, &device, ¬ify.cap };
445 int i, j = 0, ret = 0;
446
447 if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_COMMON_CFG,
448 &common, sizeof(common)))
449 return ENODEV;
450 if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_NOTIFY_CFG,
451 ¬ify, sizeof(notify)))
452 return ENODEV;
453 if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_ISR_CFG,
454 &isr, sizeof(isr)))
455 return ENODEV;
456 if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_DEVICE_CFG,
457 &device, sizeof(device)))
458 memset(&device, 0, sizeof(device));
459 else
460 have_device_cfg = 1;
461
462 /*
463 * XXX Maybe there are devices that offer the pci caps but not the
464 * XXX VERSION_1 feature bit? Then we should check the feature bit
465 * XXX here and fall back to 0.9 out if not present.
466 */
467
468 /* Figure out which bars we need to map */
469 for (i = 0; i < __arraycount(caps); i++) {
470 int bar = caps[i]->bar;
471 bus_size_t len = caps[i]->offset + caps[i]->length;
472 if (caps[i]->length == 0)
473 continue;
474 if (bars[bar] < len)
475 bars[bar] = len;
476 }
477
478 for (i = 0; i < __arraycount(bars); i++) {
479 int reg;
480 pcireg_t type;
481 if (bars[i] == 0)
482 continue;
483 reg = PCI_MAPREG_START + i * 4;
484 type = pci_mapreg_type(pc, tag, reg);
485 if (pci_mapreg_map(pa, reg, type, 0,
486 &psc->sc_bars_iot[j], &psc->sc_bars_ioh[j],
487 NULL, &psc->sc_bars_iosize[j])) {
488 aprint_error_dev(self, "can't map bar %u \n", i);
489 ret = EIO;
490 goto err;
491 }
492 aprint_debug_dev(self, "bar[%d]: iot %p, size 0x%lx\n",
493 j, psc->sc_bars_iot[j], psc->sc_bars_iosize[j]);
494 bars_idx[i] = j;
495 j++;
496 }
497
498 i = bars_idx[notify.cap.bar];
499 if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i],
500 notify.cap.offset, notify.cap.length,
501 &psc->sc_notify_ioh)) {
502 aprint_error_dev(self, "can't map notify i/o space\n");
503 ret = EIO;
504 goto err;
505 }
506 psc->sc_notify_iosize = notify.cap.length;
507 psc->sc_notify_iot = psc->sc_bars_iot[i];
508 psc->sc_notify_off_multiplier = le32toh(notify.notify_off_multiplier);
509
510 if (have_device_cfg) {
511 i = bars_idx[device.bar];
512 if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i],
513 device.offset, device.length,
514 &sc->sc_devcfg_ioh)) {
515 aprint_error_dev(self, "can't map devcfg i/o space\n");
516 ret = EIO;
517 goto err;
518 }
519 aprint_debug_dev(self,
520 "device.offset = 0x%x, device.length = 0x%x\n",
521 device.offset, device.length);
522 sc->sc_devcfg_iosize = device.length;
523 sc->sc_devcfg_iot = psc->sc_bars_iot[i];
524 }
525
526 i = bars_idx[isr.bar];
527 if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i],
528 isr.offset, isr.length, &psc->sc_isr_ioh)) {
529 aprint_error_dev(self, "can't map isr i/o space\n");
530 ret = EIO;
531 goto err;
532 }
533 psc->sc_isr_iosize = isr.length;
534 psc->sc_isr_iot = psc->sc_bars_iot[i];
535
536 i = bars_idx[common.bar];
537 if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i],
538 common.offset, common.length, &psc->sc_ioh)) {
539 aprint_error_dev(self, "can't map common i/o space\n");
540 ret = EIO;
541 goto err;
542 }
543 psc->sc_iosize = common.length;
544 psc->sc_iot = psc->sc_bars_iot[i];
545 psc->sc_mapped_iosize = psc->sc_bars_iosize[i];
546
547 psc->sc_sc.sc_version_1 = 1;
548
549 /* set our version 1.0 ops */
550 sc->sc_ops = &virtio_pci_ops_10;
551 sc->sc_devcfg_swap = VIODEVRW_SWAP_10;
552 return 0;
553
554 err:
555 /* there is no pci_mapreg_unmap() */
556 return ret;
557 }
558
559 /* v1.0 attach helper */
560 static int
561 virtio_pci_find_cap(struct virtio_pci_softc *psc, int cfg_type, void *buf, int buflen)
562 {
563 device_t self = psc->sc_sc.sc_dev;
564 pci_chipset_tag_t pc = psc->sc_pa.pa_pc;
565 pcitag_t tag = psc->sc_pa.pa_tag;
566 unsigned int offset, i, len;
567 union {
568 pcireg_t reg[8];
569 struct virtio_pci_cap vcap;
570 } *v = buf;
571
572 if (buflen < sizeof(struct virtio_pci_cap))
573 return ERANGE;
574
575 if (!pci_get_capability(pc, tag, PCI_CAP_VENDSPEC, &offset, &v->reg[0]))
576 return ENOENT;
577
578 do {
579 for (i = 0; i < 4; i++)
580 v->reg[i] =
581 le32toh(pci_conf_read(pc, tag, offset + i * 4));
582 if (v->vcap.cfg_type == cfg_type)
583 break;
584 offset = v->vcap.cap_next;
585 } while (offset != 0);
586
587 if (offset == 0)
588 return ENOENT;
589
590 if (v->vcap.cap_len > sizeof(struct virtio_pci_cap)) {
591 len = roundup(v->vcap.cap_len, sizeof(pcireg_t));
592 if (len > buflen) {
593 aprint_error_dev(self, "%s cap too large\n", __func__);
594 return ERANGE;
595 }
596 for (i = 4; i < len / sizeof(pcireg_t); i++)
597 v->reg[i] =
598 le32toh(pci_conf_read(pc, tag, offset + i * 4));
599 }
600
601 /* endian fixup */
602 v->vcap.offset = le32toh(v->vcap.offset);
603 v->vcap.length = le32toh(v->vcap.length);
604 return 0;
605 }
606
607
608 /* -------------------------------------
609 * Version 0.9 support
610 * -------------------------------------*/
611
612 static void
613 virtio_pci_kick_09(struct virtio_softc *sc, uint16_t idx)
614 {
615 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
616
617 bus_space_write_2(psc->sc_notify_iot, psc->sc_notify_ioh, 0, idx);
618 }
619
620 /* only applicable for v 0.9 but also called for 1.0 */
621 static int
622 virtio_pci_adjust_config_region(struct virtio_pci_softc *psc)
623 {
624 struct virtio_softc * const sc = (struct virtio_softc *) psc;
625 device_t self = psc->sc_sc.sc_dev;
626
627 if (psc->sc_sc.sc_version_1)
628 return 0;
629
630 sc->sc_devcfg_iosize = psc->sc_iosize - psc->sc_devcfg_offset;
631 sc->sc_devcfg_iot = psc->sc_iot;
632 if (bus_space_subregion(psc->sc_iot, psc->sc_ioh,
633 psc->sc_devcfg_offset, sc->sc_devcfg_iosize,
634 &sc->sc_devcfg_ioh)) {
635 aprint_error_dev(self, "can't map config i/o space\n");
636 return EIO;
637 }
638
639 return 0;
640 }
641
642 static uint16_t
643 virtio_pci_read_queue_size_09(struct virtio_softc *sc, uint16_t idx)
644 {
645 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
646
647 bus_space_write_2(psc->sc_iot, psc->sc_ioh,
648 VIRTIO_CONFIG_QUEUE_SELECT, idx);
649 return bus_space_read_2(psc->sc_iot, psc->sc_ioh,
650 VIRTIO_CONFIG_QUEUE_SIZE);
651 }
652
653 static void
654 virtio_pci_setup_queue_09(struct virtio_softc *sc, uint16_t idx, uint64_t addr)
655 {
656 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
657
658 bus_space_write_2(psc->sc_iot, psc->sc_ioh,
659 VIRTIO_CONFIG_QUEUE_SELECT, idx);
660 bus_space_write_4(psc->sc_iot, psc->sc_ioh,
661 VIRTIO_CONFIG_QUEUE_ADDRESS, addr / VIRTIO_PAGE_SIZE);
662
663 if (psc->sc_ihs_num > 1) {
664 int vec = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
665 if (sc->sc_child_mq)
666 vec += idx;
667 bus_space_write_2(psc->sc_iot, psc->sc_ioh,
668 VIRTIO_CONFIG_MSI_QUEUE_VECTOR, vec);
669 }
670 }
671
672 static void
673 virtio_pci_set_status_09(struct virtio_softc *sc, int status)
674 {
675 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
676 int old = 0;
677
678 if (status != 0) {
679 old = bus_space_read_1(psc->sc_iot, psc->sc_ioh,
680 VIRTIO_CONFIG_DEVICE_STATUS);
681 }
682 bus_space_write_1(psc->sc_iot, psc->sc_ioh,
683 VIRTIO_CONFIG_DEVICE_STATUS, status|old);
684 }
685
686 static void
687 virtio_pci_negotiate_features_09(struct virtio_softc *sc, uint64_t guest_features)
688 {
689 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
690 uint32_t r;
691
692 r = bus_space_read_4(psc->sc_iot, psc->sc_ioh,
693 VIRTIO_CONFIG_DEVICE_FEATURES);
694
695 r &= guest_features;
696
697 bus_space_write_4(psc->sc_iot, psc->sc_ioh,
698 VIRTIO_CONFIG_GUEST_FEATURES, r);
699
700 sc->sc_active_features = r;
701 }
702
703 /* -------------------------------------
704 * Version 1.0 support
705 * -------------------------------------*/
706
707 static void
708 virtio_pci_kick_10(struct virtio_softc *sc, uint16_t idx)
709 {
710 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
711 unsigned offset = sc->sc_vqs[idx].vq_notify_off *
712 psc->sc_notify_off_multiplier;
713
714 bus_space_write_2(psc->sc_notify_iot, psc->sc_notify_ioh, offset, idx);
715 }
716
717
718 static uint16_t
719 virtio_pci_read_queue_size_10(struct virtio_softc *sc, uint16_t idx)
720 {
721 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
722 bus_space_tag_t iot = psc->sc_iot;
723 bus_space_handle_t ioh = psc->sc_ioh;
724
725 bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SELECT, idx);
726 return bus_space_read_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SIZE);
727 }
728
729 static void
730 virtio_pci_setup_queue_10(struct virtio_softc *sc, uint16_t idx, uint64_t addr)
731 {
732 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
733 struct virtqueue *vq = &sc->sc_vqs[idx];
734 bus_space_tag_t iot = psc->sc_iot;
735 bus_space_handle_t ioh = psc->sc_ioh;
736 KASSERT(vq->vq_index == idx);
737
738 bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SELECT, vq->vq_index);
739 if (addr == 0) {
740 bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_ENABLE, 0);
741 bus_space_write_8(iot, ioh, VIRTIO_CONFIG1_QUEUE_DESC, 0);
742 bus_space_write_8(iot, ioh, VIRTIO_CONFIG1_QUEUE_AVAIL, 0);
743 bus_space_write_8(iot, ioh, VIRTIO_CONFIG1_QUEUE_USED, 0);
744 } else {
745 bus_space_write_8(iot, ioh,
746 VIRTIO_CONFIG1_QUEUE_DESC, addr);
747 bus_space_write_8(iot, ioh,
748 VIRTIO_CONFIG1_QUEUE_AVAIL, addr + vq->vq_availoffset);
749 bus_space_write_8(iot, ioh,
750 VIRTIO_CONFIG1_QUEUE_USED, addr + vq->vq_usedoffset);
751 bus_space_write_2(iot, ioh,
752 VIRTIO_CONFIG1_QUEUE_ENABLE, 1);
753 vq->vq_notify_off = bus_space_read_2(iot, ioh,
754 VIRTIO_CONFIG1_QUEUE_NOTIFY_OFF);
755 }
756
757 if (psc->sc_ihs_num > 1) {
758 int vec = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
759 if (sc->sc_child_mq)
760 vec += idx;
761 bus_space_write_2(iot, ioh,
762 VIRTIO_CONFIG1_QUEUE_MSIX_VECTOR, vec);
763 }
764 }
765
766 static void
767 virtio_pci_set_status_10(struct virtio_softc *sc, int status)
768 {
769 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
770 bus_space_tag_t iot = psc->sc_iot;
771 bus_space_handle_t ioh = psc->sc_ioh;
772 int old = 0;
773
774 if (status)
775 old = bus_space_read_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS);
776 bus_space_write_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS, status | old);
777 }
778
779 void
780 virtio_pci_negotiate_features_10(struct virtio_softc *sc, uint64_t guest_features)
781 {
782 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
783 device_t self = sc->sc_dev;
784 bus_space_tag_t iot = psc->sc_iot;
785 bus_space_handle_t ioh = psc->sc_ioh;
786 uint64_t host, negotiated, device_status;
787
788 guest_features |= VIRTIO_F_VERSION_1;
789 /* notify on empty is 0.9 only */
790 guest_features &= ~VIRTIO_F_NOTIFY_ON_EMPTY;
791 sc->sc_active_features = 0;
792
793 bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE_SELECT, 0);
794 host = bus_space_read_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE);
795 bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE_SELECT, 1);
796 host |= (uint64_t)
797 bus_space_read_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE) << 32;
798
799 negotiated = host & guest_features;
800
801 bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE_SELECT, 0);
802 bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE,
803 negotiated & 0xffffffff);
804 bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE_SELECT, 1);
805 bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE,
806 negotiated >> 32);
807 virtio_pci_set_status_10(sc, VIRTIO_CONFIG_DEVICE_STATUS_FEATURES_OK);
808
809 device_status = bus_space_read_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS);
810 if ((device_status & VIRTIO_CONFIG_DEVICE_STATUS_FEATURES_OK) == 0) {
811 aprint_error_dev(self, "feature negotiation failed\n");
812 bus_space_write_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS,
813 VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
814 return;
815 }
816
817 if ((negotiated & VIRTIO_F_VERSION_1) == 0) {
818 aprint_error_dev(self, "host rejected version 1\n");
819 bus_space_write_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS,
820 VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
821 return;
822 }
823
824 sc->sc_active_features = negotiated;
825 return;
826 }
827
828 /* -------------------------------------
829 * Read/write device config code
830 * -------------------------------------*/
831
832 static uint8_t
833 virtio_pci_read_device_config_1(struct virtio_softc *vsc, int index)
834 {
835 bus_space_tag_t iot = vsc->sc_devcfg_iot;
836 bus_space_handle_t ioh = vsc->sc_devcfg_ioh;
837
838 return bus_space_read_1(iot, ioh, index);
839 }
840
841 static uint16_t
842 virtio_pci_read_device_config_2(struct virtio_softc *vsc, int index)
843 {
844 bus_space_tag_t iot = vsc->sc_devcfg_iot;
845 bus_space_handle_t ioh = vsc->sc_devcfg_ioh;
846 uint16_t val;
847
848 #if defined(__aarch64__) && BYTE_ORDER == BIG_ENDIAN
849 val = bus_space_read_2(iot, ioh, index);
850 return val;
851 #else
852 val = bus_space_read_stream_2(iot, ioh, index);
853 if (vsc->sc_devcfg_swap)
854 return bswap16(val);
855 return val;
856 #endif
857 }
858
859 static uint32_t
860 virtio_pci_read_device_config_4(struct virtio_softc *vsc, int index)
861 {
862 bus_space_tag_t iot = vsc->sc_devcfg_iot;
863 bus_space_handle_t ioh = vsc->sc_devcfg_ioh;
864 uint32_t val;
865
866 #if defined(__aarch64__) && BYTE_ORDER == BIG_ENDIAN
867 val = bus_space_read_4(iot, ioh, index);
868 return val;
869 #else
870 val = bus_space_read_stream_4(iot, ioh, index);
871 if (vsc->sc_devcfg_swap)
872 return bswap32(val);
873 return val;
874 #endif
875 }
876
877 static uint64_t
878 virtio_pci_read_device_config_8(struct virtio_softc *vsc, int index)
879 {
880 bus_space_tag_t iot = vsc->sc_devcfg_iot;
881 bus_space_handle_t ioh = vsc->sc_devcfg_ioh;
882 uint64_t val, val_h, val_l;
883
884 #if defined(__aarch64__) && BYTE_ORDER == BIG_ENDIAN
885 if (vsc->sc_devcfg_swap) {
886 val_l = bus_space_read_4(iot, ioh, index);
887 val_h = bus_space_read_4(iot, ioh, index + 4);
888 } else {
889 val_h = bus_space_read_4(iot, ioh, index);
890 val_l = bus_space_read_4(iot, ioh, index + 4);
891 }
892 val = val_h << 32;
893 val |= val_l;
894 return val;
895 #elif BYTE_ORDER == BIG_ENDIAN
896 val_h = bus_space_read_stream_4(iot, ioh, index);
897 val_l = bus_space_read_stream_4(iot, ioh, index + 4);
898 val = val_h << 32;
899 val |= val_l;
900 if (vsc->sc_devcfg_swap)
901 return bswap64(val);
902 return val;
903 #else
904 val_l = bus_space_read_4(iot, ioh, index);
905 val_h = bus_space_read_4(iot, ioh, index + 4);
906 val = val_h << 32;
907 val |= val_l;
908
909 return val;
910 #endif
911 }
912
913 static void
914 virtio_pci_write_device_config_1(struct virtio_softc *vsc,
915 int index, uint8_t value)
916 {
917 bus_space_tag_t iot = vsc->sc_devcfg_iot;
918 bus_space_handle_t ioh = vsc->sc_devcfg_ioh;
919
920 bus_space_write_1(iot, ioh, index, value);
921 }
922
923 static void
924 virtio_pci_write_device_config_2(struct virtio_softc *vsc,
925 int index, uint16_t value)
926 {
927 bus_space_tag_t iot = vsc->sc_devcfg_iot;
928 bus_space_handle_t ioh = vsc->sc_devcfg_ioh;
929
930 #if defined(__aarch64__) && BYTE_ORDER == BIG_ENDIAN
931 bus_space_write_2(iot, ioh, index, value);
932 #else
933 if (vsc->sc_devcfg_swap)
934 value = bswap16(value);
935 bus_space_write_stream_2(iot, ioh, index, value);
936 #endif
937 }
938
939 static void
940 virtio_pci_write_device_config_4(struct virtio_softc *vsc,
941 int index, uint32_t value)
942 {
943 bus_space_tag_t iot = vsc->sc_devcfg_iot;
944 bus_space_handle_t ioh = vsc->sc_devcfg_ioh;
945
946 #if defined(__aarch64__) && BYTE_ORDER == BIG_ENDIAN
947 bus_space_write_4(iot, ioh, index, value);
948 #else
949 if (vsc->sc_devcfg_swap)
950 value = bswap32(value);
951 bus_space_write_stream_4(iot, ioh, index, value);
952 #endif
953 }
954
955 static void
956 virtio_pci_write_device_config_8(struct virtio_softc *vsc,
957 int index, uint64_t value)
958 {
959 bus_space_tag_t iot = vsc->sc_devcfg_iot;
960 bus_space_handle_t ioh = vsc->sc_devcfg_ioh;
961 uint64_t val_h, val_l;
962
963 #if defined(__aarch64__) && BYTE_ORDER == BIG_ENDIAN
964 val_l = value & 0xffffffff;
965 val_h = value >> 32;
966 if (vsc->sc_devcfg_swap) {
967 bus_space_write_4(iot, ioh, index, val_l);
968 bus_space_write_4(iot, ioh, index + 4, val_h);
969 } else {
970 bus_space_write_4(iot, ioh, index, val_h);
971 bus_space_write_4(iot, ioh, index + 4, val_l);
972 }
973 #elif BYTE_ORDER == BIG_ENDIAN
974 if (vsc->sc_devcfg_swap)
975 value = bswap64(value);
976 val_l = value & 0xffffffff;
977 val_h = value >> 32;
978
979 bus_space_write_stream_4(iot, ioh, index, val_h);
980 bus_space_write_stream_4(iot, ioh, index + 4, val_l);
981 #else
982 val_l = value & 0xffffffff;
983 val_h = value >> 32;
984 bus_space_write_stream_4(iot, ioh, index, val_l);
985 bus_space_write_stream_4(iot, ioh, index + 4, val_h);
986 #endif
987 }
988
989 /* -------------------------------------
990 * Generic PCI interrupt code
991 * -------------------------------------*/
992
993 static int
994 virtio_pci_setup_msix_vectors_10(struct virtio_softc *sc)
995 {
996 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
997 device_t self = sc->sc_dev;
998 bus_space_tag_t iot = psc->sc_iot;
999 bus_space_handle_t ioh = psc->sc_ioh;
1000 int vector, ret, qid;
1001
1002 vector = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
1003 bus_space_write_2(iot, ioh,
1004 VIRTIO_CONFIG1_CONFIG_MSIX_VECTOR, vector);
1005 ret = bus_space_read_2(iot, ioh, VIRTIO_CONFIG1_CONFIG_MSIX_VECTOR);
1006 if (ret != vector) {
1007 aprint_error_dev(self, "can't set config msix vector\n");
1008 return -1;
1009 }
1010
1011 for (qid = 0; qid < sc->sc_nvqs; qid++) {
1012 vector = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
1013
1014 if (sc->sc_child_mq)
1015 vector += qid;
1016 bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SELECT, qid);
1017 bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_MSIX_VECTOR,
1018 vector);
1019 ret = bus_space_read_2(iot, ioh,
1020 VIRTIO_CONFIG1_QUEUE_MSIX_VECTOR);
1021 if (ret != vector) {
1022 aprint_error_dev(self, "can't set queue %d "
1023 "msix vector\n", qid);
1024 return -1;
1025 }
1026 }
1027
1028 return 0;
1029 }
1030
1031 static int
1032 virtio_pci_setup_msix_vectors_09(struct virtio_softc *sc)
1033 {
1034 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
1035 device_t self = sc->sc_dev;
1036 int offset, vector, ret, qid;
1037
1038 offset = VIRTIO_CONFIG_MSI_CONFIG_VECTOR;
1039 vector = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
1040
1041 bus_space_write_2(psc->sc_iot, psc->sc_ioh, offset, vector);
1042 ret = bus_space_read_2(psc->sc_iot, psc->sc_ioh, offset);
1043 aprint_debug_dev(sc->sc_dev, "expected=%d, actual=%d\n",
1044 vector, ret);
1045 if (ret != vector) {
1046 aprint_error_dev(self, "can't set config msix vector\n");
1047 return -1;
1048 }
1049
1050 for (qid = 0; qid < sc->sc_nvqs; qid++) {
1051 offset = VIRTIO_CONFIG_QUEUE_SELECT;
1052 bus_space_write_2(psc->sc_iot, psc->sc_ioh, offset, qid);
1053
1054 offset = VIRTIO_CONFIG_MSI_QUEUE_VECTOR;
1055 vector = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
1056
1057 if (sc->sc_child_mq)
1058 vector += qid;
1059
1060 bus_space_write_2(psc->sc_iot, psc->sc_ioh, offset, vector);
1061 ret = bus_space_read_2(psc->sc_iot, psc->sc_ioh, offset);
1062 aprint_debug_dev(sc->sc_dev, "expected=%d, actual=%d\n",
1063 vector, ret);
1064 if (ret != vector) {
1065 aprint_error_dev(self, "can't set queue %d "
1066 "msix vector\n", qid);
1067 return -1;
1068 }
1069 }
1070
1071 return 0;
1072 }
1073
1074 static int
1075 virtio_pci_setup_msix_interrupts(struct virtio_softc *sc,
1076 struct pci_attach_args *pa)
1077 {
1078 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
1079 device_t self = sc->sc_dev;
1080 pci_chipset_tag_t pc = pa->pa_pc;
1081 struct virtqueue *vq;
1082 char intrbuf[PCI_INTRSTR_LEN];
1083 char intr_xname[INTRDEVNAMEBUF];
1084 char const *intrstr;
1085 int idx, qid, n;
1086 int ret;
1087
1088 idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
1089 if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE)
1090 pci_intr_setattr(pc, &psc->sc_ihp[idx], PCI_INTR_MPSAFE, true);
1091
1092 snprintf(intr_xname, sizeof(intr_xname), "%s config",
1093 device_xname(sc->sc_dev));
1094
1095 psc->sc_ihs[idx] = pci_intr_establish_xname(pc, psc->sc_ihp[idx],
1096 sc->sc_ipl, virtio_pci_msix_config_intr, sc, intr_xname);
1097 if (psc->sc_ihs[idx] == NULL) {
1098 aprint_error_dev(self, "couldn't establish MSI-X for config\n");
1099 goto error;
1100 }
1101
1102 idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
1103 if (sc->sc_child_mq) {
1104 for (qid = 0; qid < sc->sc_nvqs; qid++) {
1105 n = idx + qid;
1106 vq = &sc->sc_vqs[qid];
1107
1108 snprintf(intr_xname, sizeof(intr_xname), "%s vq#%d",
1109 device_xname(sc->sc_dev), qid);
1110
1111 if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE) {
1112 pci_intr_setattr(pc, &psc->sc_ihp[n],
1113 PCI_INTR_MPSAFE, true);
1114 }
1115
1116 psc->sc_ihs[n] = pci_intr_establish_xname(pc, psc->sc_ihp[n],
1117 sc->sc_ipl, vq->vq_intrhand, vq->vq_intrhand_arg, intr_xname);
1118 if (psc->sc_ihs[n] == NULL) {
1119 aprint_error_dev(self, "couldn't establish MSI-X for a vq\n");
1120 goto error;
1121 }
1122 }
1123 } else {
1124 if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE)
1125 pci_intr_setattr(pc, &psc->sc_ihp[idx], PCI_INTR_MPSAFE, true);
1126
1127 snprintf(intr_xname, sizeof(intr_xname), "%s queues",
1128 device_xname(sc->sc_dev));
1129 psc->sc_ihs[idx] = pci_intr_establish_xname(pc, psc->sc_ihp[idx],
1130 sc->sc_ipl, virtio_pci_msix_queue_intr, sc, intr_xname);
1131 if (psc->sc_ihs[idx] == NULL) {
1132 aprint_error_dev(self, "couldn't establish MSI-X for queues\n");
1133 goto error;
1134 }
1135 }
1136
1137 if (sc->sc_version_1) {
1138 ret = virtio_pci_setup_msix_vectors_10(sc);
1139 } else {
1140 ret = virtio_pci_setup_msix_vectors_09(sc);
1141 }
1142 if (ret) {
1143 aprint_error_dev(self, "couldn't setup MSI-X vectors\n");
1144 goto error;
1145 }
1146
1147 idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
1148 intrstr = pci_intr_string(pc, psc->sc_ihp[idx], intrbuf, sizeof(intrbuf));
1149 aprint_normal_dev(self, "config interrupting at %s\n", intrstr);
1150 idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
1151 if (sc->sc_child_mq) {
1152 kcpuset_t *affinity;
1153 int affinity_to, r;
1154
1155 kcpuset_create(&affinity, false);
1156
1157 for (qid = 0; qid < sc->sc_nvqs; qid++) {
1158 n = idx + qid;
1159 affinity_to = (qid / 2) % ncpu;
1160
1161 intrstr = pci_intr_string(pc, psc->sc_ihp[n],
1162 intrbuf, sizeof(intrbuf));
1163
1164 kcpuset_zero(affinity);
1165 kcpuset_set(affinity, affinity_to);
1166 r = interrupt_distribute(psc->sc_ihs[n], affinity, NULL);
1167 if (r == 0) {
1168 aprint_normal_dev(self,
1169 "for vq #%d interrupting at %s affinity to %u\n",
1170 qid, intrstr, affinity_to);
1171 } else {
1172 aprint_normal_dev(self,
1173 "for vq #%d interrupting at %s\n",
1174 qid, intrstr);
1175 }
1176 }
1177
1178 kcpuset_destroy(affinity);
1179 } else {
1180 intrstr = pci_intr_string(pc, psc->sc_ihp[idx], intrbuf, sizeof(intrbuf));
1181 aprint_normal_dev(self, "queues interrupting at %s\n", intrstr);
1182 }
1183
1184 return 0;
1185
1186 error:
1187 idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
1188 if (psc->sc_ihs[idx] != NULL)
1189 pci_intr_disestablish(psc->sc_pa.pa_pc, psc->sc_ihs[idx]);
1190 idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
1191 if (sc->sc_child_mq) {
1192 for (qid = 0; qid < sc->sc_nvqs; qid++) {
1193 n = idx + qid;
1194 if (psc->sc_ihs[n] == NULL)
1195 continue;
1196 pci_intr_disestablish(psc->sc_pa.pa_pc, psc->sc_ihs[n]);
1197 }
1198
1199 } else {
1200 if (psc->sc_ihs[idx] != NULL)
1201 pci_intr_disestablish(psc->sc_pa.pa_pc, psc->sc_ihs[idx]);
1202 }
1203
1204 return -1;
1205 }
1206
1207 static int
1208 virtio_pci_setup_intx_interrupt(struct virtio_softc *sc,
1209 struct pci_attach_args *pa)
1210 {
1211 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
1212 device_t self = sc->sc_dev;
1213 pci_chipset_tag_t pc = pa->pa_pc;
1214 char intrbuf[PCI_INTRSTR_LEN];
1215 char const *intrstr;
1216
1217 if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE)
1218 pci_intr_setattr(pc, &psc->sc_ihp[0], PCI_INTR_MPSAFE, true);
1219
1220 psc->sc_ihs[0] = pci_intr_establish_xname(pc, psc->sc_ihp[0],
1221 sc->sc_ipl, virtio_pci_intr, sc, device_xname(sc->sc_dev));
1222 if (psc->sc_ihs[0] == NULL) {
1223 aprint_error_dev(self, "couldn't establish INTx\n");
1224 return -1;
1225 }
1226
1227 intrstr = pci_intr_string(pc, psc->sc_ihp[0], intrbuf, sizeof(intrbuf));
1228 aprint_normal_dev(self, "interrupting at %s\n", intrstr);
1229
1230 return 0;
1231 }
1232
1233 static int
1234 virtio_pci_setup_interrupts(struct virtio_softc *sc)
1235 {
1236 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
1237 device_t self = sc->sc_dev;
1238 pci_chipset_tag_t pc = psc->sc_pa.pa_pc;
1239 pcitag_t tag = psc->sc_pa.pa_tag;
1240 int error;
1241 int nmsix;
1242 int off;
1243 int counts[PCI_INTR_TYPE_SIZE];
1244 pci_intr_type_t max_type;
1245 pcireg_t ctl;
1246
1247 nmsix = pci_msix_count(psc->sc_pa.pa_pc, psc->sc_pa.pa_tag);
1248 aprint_debug_dev(self, "pci_msix_count=%d\n", nmsix);
1249
1250 /* We need at least two: one for config and the other for queues */
1251 if ((sc->sc_flags & VIRTIO_F_INTR_MSIX) == 0 || nmsix < 2) {
1252 /* Try INTx only */
1253 max_type = PCI_INTR_TYPE_INTX;
1254 counts[PCI_INTR_TYPE_INTX] = 1;
1255 } else {
1256 /* Try MSI-X first and INTx second */
1257 if (sc->sc_nvqs + VIRTIO_MSIX_QUEUE_VECTOR_INDEX <= nmsix) {
1258 nmsix = sc->sc_nvqs + VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
1259 } else {
1260 sc->sc_child_mq = false;
1261 }
1262
1263 if (sc->sc_child_mq == false) {
1264 nmsix = 2;
1265 }
1266
1267 max_type = PCI_INTR_TYPE_MSIX;
1268 counts[PCI_INTR_TYPE_MSIX] = nmsix;
1269 counts[PCI_INTR_TYPE_MSI] = 0;
1270 counts[PCI_INTR_TYPE_INTX] = 1;
1271 }
1272
1273 retry:
1274 error = pci_intr_alloc(&psc->sc_pa, &psc->sc_ihp, counts, max_type);
1275 if (error != 0) {
1276 aprint_error_dev(self, "couldn't map interrupt\n");
1277 return -1;
1278 }
1279
1280 if (pci_intr_type(pc, psc->sc_ihp[0]) == PCI_INTR_TYPE_MSIX) {
1281 psc->sc_ihs = kmem_zalloc(sizeof(*psc->sc_ihs) * nmsix,
1282 KM_SLEEP);
1283
1284 error = virtio_pci_setup_msix_interrupts(sc, &psc->sc_pa);
1285 if (error != 0) {
1286 kmem_free(psc->sc_ihs, sizeof(*psc->sc_ihs) * nmsix);
1287 pci_intr_release(pc, psc->sc_ihp, nmsix);
1288
1289 /* Retry INTx */
1290 max_type = PCI_INTR_TYPE_INTX;
1291 counts[PCI_INTR_TYPE_INTX] = 1;
1292 goto retry;
1293 }
1294
1295 psc->sc_ihs_num = nmsix;
1296 psc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_MSI;
1297 virtio_pci_adjust_config_region(psc);
1298 } else if (pci_intr_type(pc, psc->sc_ihp[0]) == PCI_INTR_TYPE_INTX) {
1299 psc->sc_ihs = kmem_zalloc(sizeof(*psc->sc_ihs) * 1,
1300 KM_SLEEP);
1301
1302 error = virtio_pci_setup_intx_interrupt(sc, &psc->sc_pa);
1303 if (error != 0) {
1304 kmem_free(psc->sc_ihs, sizeof(*psc->sc_ihs) * 1);
1305 pci_intr_release(pc, psc->sc_ihp, 1);
1306 return -1;
1307 }
1308
1309 psc->sc_ihs_num = 1;
1310 psc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
1311 virtio_pci_adjust_config_region(psc);
1312
1313 error = pci_get_capability(pc, tag, PCI_CAP_MSIX, &off, NULL);
1314 if (error != 0) {
1315 ctl = pci_conf_read(pc, tag, off + PCI_MSIX_CTL);
1316 ctl &= ~PCI_MSIX_CTL_ENABLE;
1317 pci_conf_write(pc, tag, off + PCI_MSIX_CTL, ctl);
1318 }
1319 }
1320
1321 return 0;
1322 }
1323
1324 static void
1325 virtio_pci_free_interrupts(struct virtio_softc *sc)
1326 {
1327 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
1328
1329 for (int i = 0; i < psc->sc_ihs_num; i++) {
1330 if (psc->sc_ihs[i] == NULL)
1331 continue;
1332 pci_intr_disestablish(psc->sc_pa.pa_pc, psc->sc_ihs[i]);
1333 psc->sc_ihs[i] = NULL;
1334 }
1335
1336 if (psc->sc_ihs_num > 0)
1337 pci_intr_release(psc->sc_pa.pa_pc, psc->sc_ihp, psc->sc_ihs_num);
1338
1339 if (psc->sc_ihs != NULL) {
1340 kmem_free(psc->sc_ihs, sizeof(*psc->sc_ihs) * psc->sc_ihs_num);
1341 psc->sc_ihs = NULL;
1342 }
1343 psc->sc_ihs_num = 0;
1344 }
1345
1346 /*
1347 * Interrupt handler.
1348 */
1349 static int
1350 virtio_pci_intr(void *arg)
1351 {
1352 struct virtio_softc *sc = arg;
1353 struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
1354 int isr, r = 0;
1355
1356 /* check and ack the interrupt */
1357 isr = bus_space_read_1(psc->sc_isr_iot, psc->sc_isr_ioh, 0);
1358 if (isr == 0)
1359 return 0;
1360 if ((isr & VIRTIO_CONFIG_ISR_CONFIG_CHANGE) &&
1361 (sc->sc_config_change != NULL))
1362 r = (sc->sc_config_change)(sc);
1363 if (sc->sc_intrhand != NULL) {
1364 if (sc->sc_soft_ih != NULL)
1365 softint_schedule(sc->sc_soft_ih);
1366 else
1367 r |= (sc->sc_intrhand)(sc);
1368 }
1369
1370 return r;
1371 }
1372
1373 static int
1374 virtio_pci_msix_queue_intr(void *arg)
1375 {
1376 struct virtio_softc *sc = arg;
1377 int r = 0;
1378
1379 if (sc->sc_intrhand != NULL) {
1380 if (sc->sc_soft_ih != NULL)
1381 softint_schedule(sc->sc_soft_ih);
1382 else
1383 r |= (sc->sc_intrhand)(sc);
1384 }
1385
1386 return r;
1387 }
1388
1389 static int
1390 virtio_pci_msix_config_intr(void *arg)
1391 {
1392 struct virtio_softc *sc = arg;
1393 int r = 0;
1394
1395 if (sc->sc_config_change != NULL)
1396 r = (sc->sc_config_change)(sc);
1397 return r;
1398 }
1399
1400 MODULE(MODULE_CLASS_DRIVER, virtio_pci, "pci,virtio");
1401
1402 #ifdef _MODULE
1403 #include "ioconf.c"
1404 #endif
1405
1406 static int
1407 virtio_pci_modcmd(modcmd_t cmd, void *opaque)
1408 {
1409 int error = 0;
1410
1411 #ifdef _MODULE
1412 switch (cmd) {
1413 case MODULE_CMD_INIT:
1414 error = config_init_component(cfdriver_ioconf_virtio_pci,
1415 cfattach_ioconf_virtio_pci, cfdata_ioconf_virtio_pci);
1416 break;
1417 case MODULE_CMD_FINI:
1418 error = config_fini_component(cfdriver_ioconf_virtio_pci,
1419 cfattach_ioconf_virtio_pci, cfdata_ioconf_virtio_pci);
1420 break;
1421 default:
1422 error = ENOTTY;
1423 break;
1424 }
1425 #endif
1426
1427 return error;
1428 }
1429