msipic.c revision 1.21 1 /* $NetBSD: msipic.c,v 1.21 2020/04/25 15:26:18 bouyer Exp $ */
2
3 /*
4 * Copyright (c) 2015 Internet Initiative Japan Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: msipic.c,v 1.21 2020/04/25 15:26:18 bouyer Exp $");
31
32 #include "opt_intrdebug.h"
33
34 #include <sys/types.h>
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/errno.h>
38 #include <sys/kmem.h>
39 #include <sys/mutex.h>
40 #include <sys/bitops.h>
41
42 #include <dev/pci/pcivar.h>
43
44 #include <machine/i82489reg.h>
45 #include <machine/i82489var.h>
46 #include <machine/i82093reg.h>
47 #include <machine/i82093var.h>
48 #include <machine/pic.h>
49 #include <machine/lock.h>
50
51 #include <x86/pci/msipic.h>
52
53 #ifdef INTRDEBUG
54 #define MSIPICDEBUG
55 #endif
56
57 #ifdef MSIPICDEBUG
58 #define DPRINTF(msg) printf msg
59 #else
60 #define DPRINTF(msg)
61 #endif
62
63 #define BUS_SPACE_WRITE_FLUSH(pc, tag) (void)bus_space_read_4(pc, tag, 0)
64
65 #define MSIPICNAMEBUF 16
66
67 /*
68 * A Pseudo pic for single MSI/MSI-X device.
69 * The pic and MSI/MSI-X device are distinbuished by "devid". The "devid"
70 * is managed by below "dev_seqs".
71 */
72 struct msipic {
73 int mp_bus;
74 int mp_dev;
75 int mp_fun;
76
77 int mp_devid; /* The device id for the MSI/MSI-X device. */
78 int mp_veccnt; /* The number of MSI/MSI-X vectors. */
79
80 char mp_pic_name[MSIPICNAMEBUF]; /* The MSI/MSI-X device's name. */
81
82 struct pci_attach_args mp_pa;
83 bus_space_tag_t mp_bstag;
84 bus_space_handle_t mp_bshandle;
85 bus_size_t mp_bssize;
86 struct pic *mp_pic;
87
88 LIST_ENTRY(msipic) mp_list;
89 };
90
91 static kmutex_t msipic_list_lock;
92
93 static LIST_HEAD(, msipic) msipic_list =
94 LIST_HEAD_INITIALIZER(msipic_list);
95
96 /*
97 * This struct managements "devid" to use the same "devid" for the device
98 * re-attached. If the device's bus number and device number and function
99 * number are equal, it is assumed re-attached.
100 */
101 struct dev_last_used_seq {
102 bool ds_using;
103 int ds_bus;
104 int ds_dev;
105 int ds_fun;
106 };
107 /* The number of MSI/MSI-X devices supported by system. */
108 #define NUM_MSI_DEVS 256
109 /* Record devids to use the same devid when the device is re-attached. */
110 static struct dev_last_used_seq dev_seqs[NUM_MSI_DEVS];
111
112 static int msipic_allocate_common_msi_devid(const struct pci_attach_args *);
113 static void msipic_release_common_msi_devid(int);
114
115 static struct pic *msipic_find_msi_pic_locked(int);
116 static struct pic *msipic_construct_common_msi_pic(const struct pci_attach_args *,
117 struct pic *);
118 static void msipic_destruct_common_msi_pic(struct pic *);
119
120 static void msi_set_msictl_enablebit(struct pic *, int, int);
121 static void msi_hwmask(struct pic *, int);
122 static void msi_hwunmask(struct pic *, int);
123 static void msi_addroute(struct pic *, struct cpu_info *, int, int, int);
124 static void msi_delroute(struct pic *, struct cpu_info *, int, int, int);
125
126 static void msix_set_vecctl_mask(struct pic *, int, int);
127 static void msix_hwmask(struct pic *, int);
128 static void msix_hwunmask(struct pic *, int);
129 static void msix_addroute(struct pic *, struct cpu_info *, int, int, int);
130 static void msix_delroute(struct pic *, struct cpu_info *, int, int, int);
131
132 /*
133 * Return new "devid" for the device attached first.
134 * Return the same "devid" for the device re-attached after dettached once.
135 * Return -1 if the number of attached MSI/MSI-X devices is over NUM_MSI_DEVS.
136 */
137 static int
138 msipic_allocate_common_msi_devid(const struct pci_attach_args *pa)
139 {
140 pci_chipset_tag_t pc;
141 pcitag_t tag;
142 int bus, dev, fun, i;
143
144 KASSERT(mutex_owned(&msipic_list_lock));
145
146 pc = pa->pa_pc;
147 tag = pa->pa_tag;
148 pci_decompose_tag(pc, tag, &bus, &dev, &fun);
149
150 /* if the device was once attached, use same devid */
151 for (i = 0; i < NUM_MSI_DEVS; i++) {
152 /* skip host bridge */
153 if (dev_seqs[i].ds_bus == 0
154 && dev_seqs[i].ds_dev == 0
155 && dev_seqs[i].ds_fun == 0)
156 break;
157
158 if (dev_seqs[i].ds_bus == bus
159 && dev_seqs[i].ds_dev == dev
160 && dev_seqs[i].ds_fun == fun) {
161 dev_seqs[i].ds_using = true;
162 return i;
163 }
164 }
165
166 for (i = 0; i < NUM_MSI_DEVS; i++) {
167 if (dev_seqs[i].ds_using == 0) {
168 dev_seqs[i].ds_using = true;
169 dev_seqs[i].ds_bus = bus;
170 dev_seqs[i].ds_dev = dev;
171 dev_seqs[i].ds_fun = fun;
172 return i;
173 }
174 }
175
176 DPRINTF(("too many MSI devices.\n"));
177 return -1;
178 }
179
180 /*
181 * Set the "devid" unused, but keep reserving the "devid" to reuse when
182 * the device is re-attached.
183 */
184 static void
185 msipic_release_common_msi_devid(int devid)
186 {
187
188 KASSERT(mutex_owned(&msipic_list_lock));
189
190 if (devid < 0 || NUM_MSI_DEVS <= devid) {
191 DPRINTF(("%s: invalid devid.\n", __func__));
192 return;
193 }
194
195 dev_seqs[devid].ds_using = false;
196 /* Keep ds_* to reuse the same devid for the same device. */
197 }
198
199 static struct pic *
200 msipic_find_msi_pic_locked(int devid)
201 {
202 struct msipic *mpp;
203
204 KASSERT(mutex_owned(&msipic_list_lock));
205
206 LIST_FOREACH(mpp, &msipic_list, mp_list) {
207 if (mpp->mp_devid == devid)
208 return mpp->mp_pic;
209 }
210 return NULL;
211 }
212
213 /*
214 * Return the msi_pic whose device is already registered.
215 * If the device is not registered yet, return NULL.
216 */
217 struct pic *
218 msipic_find_msi_pic(int devid)
219 {
220 struct pic *msipic;
221
222 mutex_enter(&msipic_list_lock);
223 msipic = msipic_find_msi_pic_locked(devid);
224 mutex_exit(&msipic_list_lock);
225
226 return msipic;
227 }
228
229 /*
230 * A common construct process of MSI and MSI-X.
231 */
232 static struct pic *
233 msipic_construct_common_msi_pic(const struct pci_attach_args *pa,
234 struct pic *pic_tmpl)
235 {
236 struct pic *pic;
237 struct msipic *msipic;
238 int devid;
239
240 pic = kmem_alloc(sizeof(*pic), KM_SLEEP);
241 msipic = kmem_zalloc(sizeof(*msipic), KM_SLEEP);
242
243 mutex_enter(&msipic_list_lock);
244
245 devid = msipic_allocate_common_msi_devid(pa);
246 if (devid == -1) {
247 mutex_exit(&msipic_list_lock);
248 kmem_free(pic, sizeof(*pic));
249 kmem_free(msipic, sizeof(*msipic));
250 return NULL;
251 }
252
253 memcpy(pic, pic_tmpl, sizeof(*pic));
254 pic->pic_edge_stubs
255 = x2apic_mode ? x2apic_edge_stubs : ioapic_edge_stubs;
256 pic->pic_msipic = msipic;
257 msipic->mp_pic = pic;
258 pci_decompose_tag(pa->pa_pc, pa->pa_tag,
259 &msipic->mp_bus, &msipic->mp_dev, &msipic->mp_fun);
260 memcpy(&msipic->mp_pa, pa, sizeof(msipic->mp_pa));
261 msipic->mp_devid = devid;
262 /*
263 * pci_msi{,x}_alloc() must be called only once in the device driver.
264 */
265 KASSERT(msipic_find_msi_pic_locked(msipic->mp_devid) == NULL);
266
267 LIST_INSERT_HEAD(&msipic_list, msipic, mp_list);
268
269 mutex_exit(&msipic_list_lock);
270
271 return pic;
272 }
273
274 static void
275 msipic_destruct_common_msi_pic(struct pic *msi_pic)
276 {
277 struct msipic *msipic;
278
279 if (msi_pic == NULL)
280 return;
281
282 msipic = msi_pic->pic_msipic;
283 mutex_enter(&msipic_list_lock);
284 LIST_REMOVE(msipic, mp_list);
285 msipic_release_common_msi_devid(msipic->mp_devid);
286 mutex_exit(&msipic_list_lock);
287
288 kmem_free(msipic, sizeof(*msipic));
289 kmem_free(msi_pic, sizeof(*msi_pic));
290 }
291
292 /*
293 * The pic is MSI/MSI-X pic or not.
294 */
295 bool
296 msipic_is_msi_pic(struct pic *pic)
297 {
298
299 return (pic->pic_msipic != NULL);
300 }
301
302 /*
303 * Return the MSI/MSI-X devid which is unique for each devices.
304 */
305 int
306 msipic_get_devid(struct pic *pic)
307 {
308
309 KASSERT(msipic_is_msi_pic(pic));
310
311 return pic->pic_msipic->mp_devid;
312 }
313
314 #define MSI_MSICTL_ENABLE 1
315 #define MSI_MSICTL_DISABLE 0
316 static void
317 msi_set_msictl_enablebit(struct pic *pic, int msi_vec, int flag)
318 {
319 pci_chipset_tag_t pc;
320 struct pci_attach_args *pa;
321 pcitag_t tag;
322 pcireg_t ctl;
323 int off, err __diagused;
324
325 pc = NULL;
326 pa = &pic->pic_msipic->mp_pa;
327 tag = pa->pa_tag;
328 err = pci_get_capability(pc, tag, PCI_CAP_MSI, &off, NULL);
329 KASSERT(err != 0);
330
331 /*
332 * MSI can establish only one vector at once.
333 * So, use whole device mask bit instead of a vector mask bit.
334 */
335 ctl = pci_conf_read(pc, tag, off + PCI_MSI_CTL);
336 if (flag == MSI_MSICTL_ENABLE)
337 ctl |= PCI_MSI_CTL_MSI_ENABLE;
338 else
339 ctl &= ~PCI_MSI_CTL_MSI_ENABLE;
340
341 pci_conf_write(pc, tag, off, ctl);
342 }
343
344 static void
345 msi_hwmask(struct pic *pic, int msi_vec)
346 {
347
348 msi_set_msictl_enablebit(pic, msi_vec, MSI_MSICTL_DISABLE);
349 }
350
351 /*
352 * Do not use pic->hwunmask() immediately after pic->delroute().
353 * It is required to use pic->addroute() before pic->hwunmask().
354 */
355 static void
356 msi_hwunmask(struct pic *pic, int msi_vec)
357 {
358
359 msi_set_msictl_enablebit(pic, msi_vec, MSI_MSICTL_ENABLE);
360 }
361
362 static void
363 msi_addroute(struct pic *pic, struct cpu_info *ci,
364 int unused, int idt_vec, int type)
365 {
366 pci_chipset_tag_t pc;
367 struct pci_attach_args *pa;
368 pcitag_t tag;
369 pcireg_t addr, data, ctl;
370 int off, err __diagused;
371
372 pc = NULL;
373 pa = &pic->pic_msipic->mp_pa;
374 tag = pa->pa_tag;
375 err = pci_get_capability(pc, tag, PCI_CAP_MSI, &off, NULL);
376 KASSERT(err != 0);
377
378 /*
379 * See Intel 64 and IA-32 Architectures Software Developer's Manual
380 * Volume 3 10.11 Message Signalled Interrupts.
381 */
382 /*
383 * "cpuid" for MSI address is local APIC ID. In NetBSD, the ID is
384 * the same as ci->ci_cpuid.
385 */
386 addr = LAPIC_MSIADDR_BASE | __SHIFTIN(ci->ci_cpuid,
387 LAPIC_MSIADDR_DSTID_MASK);
388 /* If trigger mode is edge, it don't care level for trigger mode. */
389 data = __SHIFTIN(idt_vec, LAPIC_VECTOR_MASK)
390 | LAPIC_TRIGMODE_EDGE | LAPIC_DLMODE_FIXED;
391
392 /*
393 * The size of the message data register is 16bit if the extended
394 * message data is not implemented. If it's 16bit and the per-vector
395 * masking is not capable, the location of the upper 16bit is out of
396 * the MSI capability structure's range. The PCI spec says the upper
397 * 16bit is driven to 0 if the message data register is 16bit. It's the
398 * spec, so it's OK just to write it regardless of the value of the
399 * upper 16bit.
400 */
401 ctl = pci_conf_read(pc, tag, off + PCI_MSI_CTL);
402 if (ctl & PCI_MSI_CTL_64BIT_ADDR) {
403 pci_conf_write(pc, tag, off + PCI_MSI_MADDR64_LO, addr);
404 pci_conf_write(pc, tag, off + PCI_MSI_MADDR64_HI, 0);
405 pci_conf_write(pc, tag, off + PCI_MSI_MDATA64, data);
406 } else {
407 pci_conf_write(pc, tag, off + PCI_MSI_MADDR, addr);
408 pci_conf_write(pc, tag, off + PCI_MSI_MDATA, data);
409 }
410 ctl |= PCI_MSI_CTL_MSI_ENABLE;
411 pci_conf_write(pc, tag, off + PCI_MSI_CTL, ctl);
412 }
413
414 /*
415 * Do not use pic->hwunmask() immediately after pic->delroute().
416 * It is required to use pic->addroute() before pic->hwunmask().
417 */
418 static void
419 msi_delroute(struct pic *pic, struct cpu_info *ci,
420 int msi_vec, int idt_vec, int type)
421 {
422
423 msi_hwmask(pic, msi_vec);
424 }
425
426 /*
427 * Template for MSI pic.
428 * .pic_msipic is set later in construct_msi_pic().
429 */
430 static struct pic msi_pic_tmpl = {
431 .pic_type = PIC_MSI,
432 .pic_vecbase = 0,
433 .pic_apicid = 0,
434 .pic_lock = __SIMPLELOCK_UNLOCKED, /* not used for msi_pic */
435 .pic_hwmask = msi_hwmask,
436 .pic_hwunmask = msi_hwunmask,
437 .pic_addroute = msi_addroute,
438 .pic_delroute = msi_delroute,
439 .pic_intr_get_devname = x86_intr_get_devname,
440 .pic_intr_get_assigned = x86_intr_get_assigned,
441 .pic_intr_get_count = x86_intr_get_count,
442 };
443
444 /*
445 * Create pseudo pic for a MSI device.
446 */
447 struct pic *
448 msipic_construct_msi_pic(const struct pci_attach_args *pa)
449 {
450 struct pic *msi_pic;
451 char pic_name_buf[MSIPICNAMEBUF];
452
453 msi_pic = msipic_construct_common_msi_pic(pa, &msi_pic_tmpl);
454 if (msi_pic == NULL) {
455 DPRINTF(("cannot allocate MSI pic.\n"));
456 return NULL;
457 }
458
459 memset(pic_name_buf, 0, MSIPICNAMEBUF);
460 snprintf(pic_name_buf, MSIPICNAMEBUF, "msi%d",
461 msi_pic->pic_msipic->mp_devid);
462 strncpy(msi_pic->pic_msipic->mp_pic_name, pic_name_buf,
463 MSIPICNAMEBUF - 1);
464 msi_pic->pic_name = msi_pic->pic_msipic->mp_pic_name;
465
466 return msi_pic;
467 }
468
469 /*
470 * Delete pseudo pic for a MSI device.
471 */
472 void
473 msipic_destruct_msi_pic(struct pic *msi_pic)
474 {
475
476 msipic_destruct_common_msi_pic(msi_pic);
477 }
478
479 #define MSIX_VECCTL_HWMASK 1
480 #define MSIX_VECCTL_HWUNMASK 0
481 static void
482 msix_set_vecctl_mask(struct pic *pic, int msix_vec, int flag)
483 {
484 bus_space_tag_t bstag;
485 bus_space_handle_t bshandle;
486 uint64_t entry_base;
487 uint32_t vecctl;
488
489 if (msix_vec < 0) {
490 DPRINTF(("%s: invalid MSI-X table index, devid=%d vecid=%d",
491 __func__, msipic_get_devid(pic), msix_vec));
492 return;
493 }
494
495 entry_base = PCI_MSIX_TABLE_ENTRY_SIZE * msix_vec;
496
497 bstag = pic->pic_msipic->mp_bstag;
498 bshandle = pic->pic_msipic->mp_bshandle;
499 vecctl = bus_space_read_4(bstag, bshandle,
500 entry_base + PCI_MSIX_TABLE_ENTRY_VECTCTL);
501 if (flag == MSIX_VECCTL_HWMASK)
502 vecctl |= PCI_MSIX_VECTCTL_MASK;
503 else
504 vecctl &= ~PCI_MSIX_VECTCTL_MASK;
505
506 bus_space_write_4(bstag, bshandle,
507 entry_base + PCI_MSIX_TABLE_ENTRY_VECTCTL, vecctl);
508 BUS_SPACE_WRITE_FLUSH(bstag, bshandle);
509 }
510
511 static void
512 msix_hwmask(struct pic *pic, int msix_vec)
513 {
514
515 msix_set_vecctl_mask(pic, msix_vec, MSIX_VECCTL_HWMASK);
516 }
517
518 /*
519 * Do not use pic->hwunmask() immediately after pic->delroute().
520 * It is required to use pic->addroute() before pic->hwunmask().
521 */
522 static void
523 msix_hwunmask(struct pic *pic, int msix_vec)
524 {
525
526 msix_set_vecctl_mask(pic, msix_vec, MSIX_VECCTL_HWUNMASK);
527 }
528
529 static void
530 msix_addroute(struct pic *pic, struct cpu_info *ci,
531 int msix_vec, int idt_vec, int type)
532 {
533 pci_chipset_tag_t pc;
534 struct pci_attach_args *pa;
535 pcitag_t tag;
536 bus_space_tag_t bstag;
537 bus_space_handle_t bshandle;
538 uint64_t entry_base;
539 pcireg_t addr, data, ctl;
540 int off, err __diagused;
541
542 if (msix_vec < 0) {
543 DPRINTF(("%s: invalid MSI-X table index, devid=%d vecid=%d",
544 __func__, msipic_get_devid(pic), msix_vec));
545 return;
546 }
547
548 pa = &pic->pic_msipic->mp_pa;
549 pc = pa->pa_pc;
550 tag = pa->pa_tag;
551 err = pci_get_capability(pc, tag, PCI_CAP_MSIX, &off, NULL);
552 KASSERT(err != 0);
553
554 /* Disable MSI-X before writing MSI-X table */
555 ctl = pci_conf_read(pc, tag, off + PCI_MSIX_CTL);
556 ctl &= ~PCI_MSIX_CTL_ENABLE;
557 pci_conf_write(pc, tag, off + PCI_MSIX_CTL, ctl);
558
559 entry_base = PCI_MSIX_TABLE_ENTRY_SIZE * msix_vec;
560
561 /*
562 * See Intel 64 and IA-32 Architectures Software Developer's Manual
563 * Volume 3 10.11 Message Signalled Interrupts.
564 */
565 /*
566 * "cpuid" for MSI-X address is local APIC ID. In NetBSD, the ID is
567 * the same as ci->ci_cpuid.
568 */
569 addr = LAPIC_MSIADDR_BASE | __SHIFTIN(ci->ci_cpuid,
570 LAPIC_MSIADDR_DSTID_MASK);
571 /* If trigger mode is edge, it don't care level for trigger mode. */
572 data = __SHIFTIN(idt_vec, LAPIC_VECTOR_MASK)
573 | LAPIC_TRIGMODE_EDGE | LAPIC_DLMODE_FIXED;
574
575 bstag = pic->pic_msipic->mp_bstag;
576 bshandle = pic->pic_msipic->mp_bshandle;
577 bus_space_write_4(bstag, bshandle,
578 entry_base + PCI_MSIX_TABLE_ENTRY_ADDR_LO, addr);
579 bus_space_write_4(bstag, bshandle,
580 entry_base + PCI_MSIX_TABLE_ENTRY_ADDR_HI, 0);
581 bus_space_write_4(bstag, bshandle,
582 entry_base + PCI_MSIX_TABLE_ENTRY_DATA, data);
583 bus_space_write_4(bstag, bshandle,
584 entry_base + PCI_MSIX_TABLE_ENTRY_VECTCTL, 0);
585 BUS_SPACE_WRITE_FLUSH(bstag, bshandle);
586
587 ctl = pci_conf_read(pc, tag, off + PCI_MSIX_CTL);
588 ctl |= PCI_MSIX_CTL_ENABLE;
589 pci_conf_write(pc, tag, off + PCI_MSIX_CTL, ctl);
590 }
591
592 /*
593 * Do not use pic->hwunmask() immediately after pic->delroute().
594 * It is required to use pic->addroute() before pic->hwunmask().
595 */
596 static void
597 msix_delroute(struct pic *pic, struct cpu_info *ci,
598 int msix_vec, int vec, int type)
599 {
600
601 msix_hwmask(pic, msix_vec);
602 }
603
604 /*
605 * Template for MSI-X pic.
606 * .pic_msipic is set later in construct_msix_pic().
607 */
608 static struct pic msix_pic_tmpl = {
609 .pic_type = PIC_MSIX,
610 .pic_vecbase = 0,
611 .pic_apicid = 0,
612 .pic_lock = __SIMPLELOCK_UNLOCKED, /* not used for msix_pic */
613 .pic_hwmask = msix_hwmask,
614 .pic_hwunmask = msix_hwunmask,
615 .pic_addroute = msix_addroute,
616 .pic_delroute = msix_delroute,
617 .pic_intr_get_devname = x86_intr_get_devname,
618 .pic_intr_get_assigned = x86_intr_get_assigned,
619 .pic_intr_get_count = x86_intr_get_count,
620 };
621
622 struct pic *
623 msipic_construct_msix_pic(const struct pci_attach_args *pa)
624 {
625 struct pic *msix_pic;
626 pci_chipset_tag_t pc;
627 pcitag_t tag;
628 pcireg_t tbl;
629 bus_space_tag_t bstag;
630 bus_space_handle_t bshandle;
631 bus_size_t bssize;
632 size_t table_size;
633 uint32_t table_offset;
634 u_int memtype;
635 bus_addr_t memaddr;
636 int flags;
637 int bir, bar, err, off, table_nentry;
638 char pic_name_buf[MSIPICNAMEBUF];
639
640 table_nentry = pci_msix_count(pa->pa_pc, pa->pa_tag);
641 if (table_nentry == 0) {
642 DPRINTF(("MSI-X table entry is 0.\n"));
643 return NULL;
644 }
645
646 pc = pa->pa_pc;
647 tag = pa->pa_tag;
648 if (pci_get_capability(pc, tag, PCI_CAP_MSIX, &off, NULL) == 0) {
649 DPRINTF(("%s: no msix capability", __func__));
650 return NULL;
651 }
652
653 msix_pic = msipic_construct_common_msi_pic(pa, &msix_pic_tmpl);
654 if (msix_pic == NULL) {
655 DPRINTF(("cannot allocate MSI-X pic.\n"));
656 return NULL;
657 }
658
659 memset(pic_name_buf, 0, MSIPICNAMEBUF);
660 snprintf(pic_name_buf, MSIPICNAMEBUF, "msix%d",
661 msix_pic->pic_msipic->mp_devid);
662 strncpy(msix_pic->pic_msipic->mp_pic_name, pic_name_buf,
663 MSIPICNAMEBUF - 1);
664 msix_pic->pic_name = msix_pic->pic_msipic->mp_pic_name;
665
666 tbl = pci_conf_read(pc, tag, off + PCI_MSIX_TBLOFFSET);
667 table_offset = tbl & PCI_MSIX_TBLOFFSET_MASK;
668 bir = tbl & PCI_MSIX_TBLBIR_MASK;
669 switch (bir) {
670 case 0:
671 bar = PCI_BAR0;
672 break;
673 case 1:
674 bar = PCI_BAR1;
675 break;
676 case 2:
677 bar = PCI_BAR2;
678 break;
679 case 3:
680 bar = PCI_BAR3;
681 break;
682 case 4:
683 bar = PCI_BAR4;
684 break;
685 case 5:
686 bar = PCI_BAR5;
687 break;
688 default:
689 aprint_error("detect an illegal device! "
690 "The device use reserved BIR values.\n");
691 msipic_destruct_common_msi_pic(msix_pic);
692 return NULL;
693 }
694 memtype = pci_mapreg_type(pc, tag, bar);
695 /*
696 * PCI_MSIX_TABLE_ENTRY_SIZE consists below
697 * - Vector Control (32bit)
698 * - Message Data (32bit)
699 * - Message Upper Address (32bit)
700 * - Message Lower Address (32bit)
701 */
702 table_size = table_nentry * PCI_MSIX_TABLE_ENTRY_SIZE;
703 #if 0
704 err = pci_mapreg_submap(pa, bar, memtype, BUS_SPACE_MAP_LINEAR,
705 roundup(table_size, PAGE_SIZE), table_offset,
706 &bstag, &bshandle, NULL, &bssize);
707 #else
708 /*
709 * Workaround for PCI prefetchable bit. Some chips (e.g. Intel 82599)
710 * report SERR and MSI-X doesn't work. This problem might not be the
711 * driver's bug but our PCI common part or VMs' bug. Until we find a
712 * real reason, we ignore the prefetchable bit.
713 */
714 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, bar, memtype,
715 &memaddr, NULL, &flags) != 0) {
716 DPRINTF(("cannot get a map info.\n"));
717 msipic_destruct_common_msi_pic(msix_pic);
718 return NULL;
719 }
720 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
721 DPRINTF(( "clear prefetchable bit\n"));
722 flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
723 }
724 bssize = roundup(table_size, PAGE_SIZE);
725 err = _x86_memio_map(pa->pa_memt, memaddr + table_offset, bssize, flags,
726 &bshandle);
727 bstag = pa->pa_memt;
728 #endif
729 if (err) {
730 DPRINTF(("cannot map msix table.\n"));
731 msipic_destruct_common_msi_pic(msix_pic);
732 return NULL;
733 }
734 msix_pic->pic_msipic->mp_bstag = bstag;
735 msix_pic->pic_msipic->mp_bshandle = bshandle;
736 msix_pic->pic_msipic->mp_bssize = bssize;
737
738 return msix_pic;
739 }
740
741 /*
742 * Delete pseudo pic for a MSI-X device.
743 */
744 void
745 msipic_destruct_msix_pic(struct pic *msix_pic)
746 {
747 struct msipic *msipic;
748
749 KASSERT(msipic_is_msi_pic(msix_pic));
750 KASSERT(msix_pic->pic_type == PIC_MSIX);
751
752 msipic = msix_pic->pic_msipic;
753 _x86_memio_unmap(msipic->mp_bstag, msipic->mp_bshandle,
754 msipic->mp_bssize, NULL);
755
756 msipic_destruct_common_msi_pic(msix_pic);
757 }
758
759 /*
760 * Set the number of MSI vectors for pseudo MSI pic.
761 */
762 int
763 msipic_set_msi_vectors(struct pic *msi_pic, pci_intr_handle_t *pihs,
764 int count)
765 {
766
767 KASSERT(msipic_is_msi_pic(msi_pic));
768
769 if (msi_pic->pic_type == PIC_MSI) {
770 pci_chipset_tag_t pc;
771 struct pci_attach_args *pa;
772 pcitag_t tag;
773 int off, err __diagused;
774 pcireg_t ctl;
775
776 pc = NULL;
777 pa = &msi_pic->pic_msipic->mp_pa;
778 tag = pa->pa_tag;
779 err = pci_get_capability(pc, tag, PCI_CAP_MSI, &off, NULL);
780 KASSERT(err != 0);
781
782 ctl = pci_conf_read(pc, tag, off + PCI_MSI_CTL);
783 ctl &= ~PCI_MSI_CTL_MME_MASK;
784 ctl |= __SHIFTIN(ilog2(count), PCI_MSI_CTL_MME_MASK);
785 pci_conf_write(pc, tag, off + PCI_MSI_CTL, ctl);
786 }
787
788 msi_pic->pic_msipic->mp_veccnt = count;
789 return 0;
790 }
791
792 /*
793 * Initialize the system to use MSI/MSI-X.
794 */
795 void
796 msipic_init(void)
797 {
798
799 mutex_init(&msipic_list_lock, MUTEX_DEFAULT, IPL_NONE);
800 }
801