msipic.c revision 1.10 1 /* $NetBSD: msipic.c,v 1.10 2017/06/01 02:45:08 chs Exp $ */
2
3 /*
4 * Copyright (c) 2015 Internet Initiative Japan Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: msipic.c,v 1.10 2017/06/01 02:45:08 chs Exp $");
31
32 #include "opt_intrdebug.h"
33
34 #include <sys/types.h>
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/errno.h>
38 #include <sys/kmem.h>
39 #include <sys/malloc.h>
40 #include <sys/mutex.h>
41
42 #include <dev/pci/pcivar.h>
43
44 #include <machine/i82489reg.h>
45 #include <machine/i82489var.h>
46 #include <machine/i82093reg.h>
47 #include <machine/i82093var.h>
48 #include <machine/pic.h>
49 #include <machine/lock.h>
50
51 #include <x86/pci/msipic.h>
52
53 #ifdef INTRDEBUG
54 #define MSIPICDEBUG
55 #endif
56
57 #ifdef MSIPICDEBUG
58 #define DPRINTF(msg) printf msg
59 #else
60 #define DPRINTF(msg)
61 #endif
62
63 #define BUS_SPACE_WRITE_FLUSH(pc, tag) (void)bus_space_read_4(pc, tag, 0)
64
65 #define MSIPICNAMEBUF 16
66
67 /*
68 * A Pseudo pic for single MSI/MSI-X device.
69 * The pic and MSI/MSI-X device are distinbuished by "devid". The "devid"
70 * is managed by below "dev_seqs".
71 */
72 struct msipic {
73 int mp_bus;
74 int mp_dev;
75 int mp_fun;
76
77 int mp_devid; /* The device id for the MSI/MSI-X device. */
78 int mp_veccnt; /* The number of MSI/MSI-X vectors. */
79
80 char mp_pic_name[MSIPICNAMEBUF]; /* The MSI/MSI-X device's name. */
81
82 struct pci_attach_args mp_pa;
83 bus_space_tag_t mp_bstag;
84 bus_space_handle_t mp_bshandle;
85 bus_size_t mp_bssize;
86 struct pic *mp_pic;
87
88 LIST_ENTRY(msipic) mp_list;
89 };
90
91 static kmutex_t msipic_list_lock;
92
93 static LIST_HEAD(, msipic) msipic_list =
94 LIST_HEAD_INITIALIZER(msipic_list);
95
96 /*
97 * This struct managements "devid" to use the same "devid" for the device
98 * re-attached. If the device's bus number and device numer and function
99 * number are equal, it is assumed re-attached.
100 */
101 struct dev_last_used_seq {
102 bool ds_using;
103 int ds_bus;
104 int ds_dev;
105 int ds_fun;
106 };
107 /* The number of MSI/MSI-X devices supported by system. */
108 #define NUM_MSI_DEVS 256
109 /* Record devids to use the same devid when the device is re-attached. */
110 static struct dev_last_used_seq dev_seqs[NUM_MSI_DEVS];
111
112 static int msipic_allocate_common_msi_devid(const struct pci_attach_args *);
113 static void msipic_release_common_msi_devid(int);
114
115 static struct pic *msipic_find_msi_pic_locked(int);
116 static struct pic *msipic_construct_common_msi_pic(const struct pci_attach_args *,
117 struct pic *);
118 static void msipic_destruct_common_msi_pic(struct pic *);
119
120 static void msi_set_msictl_enablebit(struct pic *, int, int);
121 static void msi_hwmask(struct pic *, int);
122 static void msi_hwunmask(struct pic *, int);
123 static void msi_addroute(struct pic *, struct cpu_info *, int, int, int);
124 static void msi_delroute(struct pic *, struct cpu_info *, int, int, int);
125
126 static void msix_set_vecctl_mask(struct pic *, int, int);
127 static void msix_hwmask(struct pic *, int);
128 static void msix_hwunmask(struct pic *, int);
129 static void msix_addroute(struct pic *, struct cpu_info *, int, int, int);
130 static void msix_delroute(struct pic *, struct cpu_info *, int, int, int);
131
132 /*
133 * Return new "devid" for the device attached first.
134 * Return the same "devid" for the device re-attached after dettached once.
135 * Return -1 if the number of attached MSI/MSI-X devices is over NUM_MSI_DEVS.
136 */
137 static int
138 msipic_allocate_common_msi_devid(const struct pci_attach_args *pa)
139 {
140 pci_chipset_tag_t pc;
141 pcitag_t tag;
142 int bus, dev, fun, i;
143
144 KASSERT(mutex_owned(&msipic_list_lock));
145
146 pc = pa->pa_pc;
147 tag = pa->pa_tag;
148 pci_decompose_tag(pc, tag, &bus, &dev, &fun);
149
150 /* if the device was once attached, use same devid */
151 for (i = 0; i < NUM_MSI_DEVS; i++) {
152 /* skip host bridge */
153 if (dev_seqs[i].ds_bus == 0
154 && dev_seqs[i].ds_dev == 0
155 && dev_seqs[i].ds_fun == 0)
156 break;
157
158 if (dev_seqs[i].ds_bus == bus
159 && dev_seqs[i].ds_dev == dev
160 && dev_seqs[i].ds_fun == fun) {
161 dev_seqs[i].ds_using = true;
162 return i;
163 }
164 }
165
166 for (i = 0; i < NUM_MSI_DEVS; i++) {
167 if (dev_seqs[i].ds_using == 0) {
168 dev_seqs[i].ds_using = true;
169 dev_seqs[i].ds_bus = bus;
170 dev_seqs[i].ds_dev = dev;
171 dev_seqs[i].ds_fun = fun;
172 return i;
173 }
174 }
175
176 DPRINTF(("too many MSI devices.\n"));
177 return -1;
178 }
179
180 /*
181 * Set the "devid" unused, but keep reserving the "devid" to reuse when
182 * the device is re-attached.
183 */
184 static void
185 msipic_release_common_msi_devid(int devid)
186 {
187
188 KASSERT(mutex_owned(&msipic_list_lock));
189
190 if (devid < 0 || NUM_MSI_DEVS <= devid) {
191 DPRINTF(("%s: invalid devid.\n", __func__));
192 return;
193 }
194
195 dev_seqs[devid].ds_using = false;
196 /* Keep ds_* to reuse the same devid for the same device. */
197 }
198
199 static struct pic *
200 msipic_find_msi_pic_locked(int devid)
201 {
202 struct msipic *mpp;
203
204 KASSERT(mutex_owned(&msipic_list_lock));
205
206 LIST_FOREACH(mpp, &msipic_list, mp_list) {
207 if(mpp->mp_devid == devid)
208 return mpp->mp_pic;
209 }
210 return NULL;
211 }
212
213 /*
214 * Return the msi_pic whose device is already registered.
215 * If the device is not registered yet, return NULL.
216 */
217 struct pic *
218 msipic_find_msi_pic(int devid)
219 {
220 struct pic *msipic;
221
222 mutex_enter(&msipic_list_lock);
223 msipic = msipic_find_msi_pic_locked(devid);
224 mutex_exit(&msipic_list_lock);
225
226 return msipic;
227 }
228
229 /*
230 * A common construct process of MSI and MSI-X.
231 */
232 static struct pic *
233 msipic_construct_common_msi_pic(const struct pci_attach_args *pa,
234 struct pic *pic_tmpl)
235 {
236 struct pic *pic;
237 struct msipic *msipic;
238 int devid;
239
240 pic = kmem_alloc(sizeof(*pic), KM_SLEEP);
241 msipic = kmem_zalloc(sizeof(*msipic), KM_SLEEP);
242
243 mutex_enter(&msipic_list_lock);
244
245 devid = msipic_allocate_common_msi_devid(pa);
246 if (devid == -1) {
247 mutex_exit(&msipic_list_lock);
248 kmem_free(pic, sizeof(*pic));
249 kmem_free(msipic, sizeof(*msipic));
250 return NULL;
251 }
252
253 memcpy(pic, pic_tmpl, sizeof(*pic));
254 pic->pic_edge_stubs = x2apic_mode ? x2apic_edge_stubs : ioapic_edge_stubs,
255 pic->pic_msipic = msipic;
256 msipic->mp_pic = pic;
257 pci_decompose_tag(pa->pa_pc, pa->pa_tag,
258 &msipic->mp_bus, &msipic->mp_dev, &msipic->mp_fun);
259 memcpy(&msipic->mp_pa, pa, sizeof(msipic->mp_pa));
260 msipic->mp_devid = devid;
261 /*
262 * pci_msi{,x}_alloc() must be called only once in the device driver.
263 */
264 KASSERT(msipic_find_msi_pic_locked(msipic->mp_devid) == NULL);
265
266 LIST_INSERT_HEAD(&msipic_list, msipic, mp_list);
267
268 mutex_exit(&msipic_list_lock);
269
270 return pic;
271 }
272
273 static void
274 msipic_destruct_common_msi_pic(struct pic *msi_pic)
275 {
276 struct msipic *msipic;
277
278 if (msi_pic == NULL)
279 return;
280
281 msipic = msi_pic->pic_msipic;
282 mutex_enter(&msipic_list_lock);
283 LIST_REMOVE(msipic, mp_list);
284 msipic_release_common_msi_devid(msipic->mp_devid);
285 mutex_exit(&msipic_list_lock);
286
287 kmem_free(msipic, sizeof(*msipic));
288 kmem_free(msi_pic, sizeof(*msi_pic));
289 }
290
291 /*
292 * The pic is MSI/MSI-X pic or not.
293 */
294 bool
295 msipic_is_msi_pic(struct pic *pic)
296 {
297
298 return (pic->pic_msipic != NULL);
299 }
300
301 /*
302 * Return the MSI/MSI-X devid which is unique for each devices.
303 */
304 int
305 msipic_get_devid(struct pic *pic)
306 {
307
308 KASSERT(msipic_is_msi_pic(pic));
309
310 return pic->pic_msipic->mp_devid;
311 }
312
313 #define MSI_MSICTL_ENABLE 1
314 #define MSI_MSICTL_DISABLE 0
315 static void
316 msi_set_msictl_enablebit(struct pic *pic, int msi_vec, int flag)
317 {
318 pci_chipset_tag_t pc;
319 struct pci_attach_args *pa;
320 pcitag_t tag;
321 pcireg_t ctl;
322 int off, err __diagused;
323
324 pc = NULL;
325 pa = &pic->pic_msipic->mp_pa;
326 tag = pa->pa_tag;
327 err = pci_get_capability(pc, tag, PCI_CAP_MSI, &off, NULL);
328 KASSERT(err != 0);
329
330 /*
331 * MSI can establish only one vector at once.
332 * So, use whole device mask bit instead of a vector mask bit.
333 */
334 ctl = pci_conf_read(pc, tag, off + PCI_MSI_CTL);
335 if (flag == MSI_MSICTL_ENABLE)
336 ctl |= PCI_MSI_CTL_MSI_ENABLE;
337 else
338 ctl &= ~PCI_MSI_CTL_MSI_ENABLE;
339
340 pci_conf_write(pc, tag, off, ctl);
341 }
342
343 static void
344 msi_hwmask(struct pic *pic, int msi_vec)
345 {
346
347 msi_set_msictl_enablebit(pic, msi_vec, MSI_MSICTL_DISABLE);
348 }
349
350 /*
351 * Do not use pic->hwunmask() immediately after pic->delroute().
352 * It is required to use pic->addroute() before pic->hwunmask().
353 */
354 static void
355 msi_hwunmask(struct pic *pic, int msi_vec)
356 {
357
358 msi_set_msictl_enablebit(pic, msi_vec, MSI_MSICTL_ENABLE);
359 }
360
361 static void
362 msi_addroute(struct pic *pic, struct cpu_info *ci,
363 int unused, int idt_vec, int type)
364 {
365 pci_chipset_tag_t pc;
366 struct pci_attach_args *pa;
367 pcitag_t tag;
368 pcireg_t addr, data, ctl;
369 int off, err __diagused;
370
371 pc = NULL;
372 pa = &pic->pic_msipic->mp_pa;
373 tag = pa->pa_tag;
374 err = pci_get_capability(pc, tag, PCI_CAP_MSI, &off, NULL);
375 KASSERT(err != 0);
376
377 /*
378 * See Intel 64 and IA-32 Architectures Software Developer's Manual
379 * Volume 3 10.11 Message Signalled Interrupts.
380 */
381 /*
382 * "cpuid" for MSI address is local APIC ID. In NetBSD, the ID is
383 * the same as ci->ci_cpuid.
384 */
385 addr = LAPIC_MSIADDR_BASE | __SHIFTIN(ci->ci_cpuid,
386 LAPIC_MSIADDR_DSTID_MASK);
387 /* If trigger mode is edge, it don't care level for trigger mode. */
388 data = __SHIFTIN(idt_vec, LAPIC_MSIDATA_VECTOR_MASK)
389 | LAPIC_MSIDATA_TRGMODE_EDGE | LAPIC_MSIDATA_DM_FIXED;
390
391 ctl = pci_conf_read(pc, tag, off + PCI_MSI_CTL);
392 if (ctl & PCI_MSI_CTL_64BIT_ADDR) {
393 pci_conf_write(pc, tag, off + PCI_MSI_MADDR64_LO, addr);
394 pci_conf_write(pc, tag, off + PCI_MSI_MADDR64_HI, 0);
395 pci_conf_write(pc, tag, off + PCI_MSI_MDATA64, data);
396 } else {
397 pci_conf_write(pc, tag, off + PCI_MSI_MADDR, addr);
398 pci_conf_write(pc, tag, off + PCI_MSI_MDATA, data);
399 }
400 ctl |= PCI_MSI_CTL_MSI_ENABLE;
401 pci_conf_write(pc, tag, off + PCI_MSI_CTL, ctl);
402 }
403
404 /*
405 * Do not use pic->hwunmask() immediately after pic->delroute().
406 * It is required to use pic->addroute() before pic->hwunmask().
407 */
408 static void
409 msi_delroute(struct pic *pic, struct cpu_info *ci,
410 int msi_vec, int idt_vec, int type)
411 {
412
413 msi_hwmask(pic, msi_vec);
414 }
415
416 /*
417 * Template for MSI pic.
418 * .pic_msipic is set later in construct_msi_pic().
419 */
420 static struct pic msi_pic_tmpl = {
421 .pic_type = PIC_MSI,
422 .pic_vecbase = 0,
423 .pic_apicid = 0,
424 .pic_lock = __SIMPLELOCK_UNLOCKED, /* not used for msi_pic */
425 .pic_hwmask = msi_hwmask,
426 .pic_hwunmask = msi_hwunmask,
427 .pic_addroute = msi_addroute,
428 .pic_delroute = msi_delroute,
429 };
430
431 /*
432 * Create pseudo pic for a MSI device.
433 */
434 struct pic *
435 msipic_construct_msi_pic(const struct pci_attach_args *pa)
436 {
437 struct pic *msi_pic;
438 char pic_name_buf[MSIPICNAMEBUF];
439
440 msi_pic = msipic_construct_common_msi_pic(pa, &msi_pic_tmpl);
441 if (msi_pic == NULL) {
442 DPRINTF(("cannot allocate MSI pic.\n"));
443 return NULL;
444 }
445
446 memset(pic_name_buf, 0, MSIPICNAMEBUF);
447 snprintf(pic_name_buf, MSIPICNAMEBUF, "msi%d",
448 msi_pic->pic_msipic->mp_devid);
449 strncpy(msi_pic->pic_msipic->mp_pic_name, pic_name_buf,
450 MSIPICNAMEBUF - 1);
451 msi_pic->pic_name = msi_pic->pic_msipic->mp_pic_name;
452
453 return msi_pic;
454 }
455
456 /*
457 * Delete pseudo pic for a MSI device.
458 */
459 void
460 msipic_destruct_msi_pic(struct pic *msi_pic)
461 {
462
463 msipic_destruct_common_msi_pic(msi_pic);
464 }
465
466 #define MSIX_VECCTL_HWMASK 1
467 #define MSIX_VECCTL_HWUNMASK 0
468 static void
469 msix_set_vecctl_mask(struct pic *pic, int msix_vec, int flag)
470 {
471 bus_space_tag_t bstag;
472 bus_space_handle_t bshandle;
473 uint64_t entry_base;
474 uint32_t vecctl;
475
476 if (msix_vec < 0) {
477 DPRINTF(("%s: invalid MSI-X table index, devid=%d vecid=%d",
478 __func__, msipic_get_devid(pic), msix_vec));
479 return;
480 }
481
482 entry_base = PCI_MSIX_TABLE_ENTRY_SIZE * msix_vec;
483
484 bstag = pic->pic_msipic->mp_bstag;
485 bshandle = pic->pic_msipic->mp_bshandle;
486 vecctl = bus_space_read_4(bstag, bshandle,
487 entry_base + PCI_MSIX_TABLE_ENTRY_VECTCTL);
488 if (flag == MSIX_VECCTL_HWMASK)
489 vecctl |= PCI_MSIX_VECTCTL_MASK;
490 else
491 vecctl &= ~PCI_MSIX_VECTCTL_MASK;
492
493 bus_space_write_4(bstag, bshandle,
494 entry_base + PCI_MSIX_TABLE_ENTRY_VECTCTL, vecctl);
495 BUS_SPACE_WRITE_FLUSH(bstag, bshandle);
496 }
497
498 static void
499 msix_hwmask(struct pic *pic, int msix_vec)
500 {
501
502 msix_set_vecctl_mask(pic, msix_vec, MSIX_VECCTL_HWMASK);
503 }
504
505 /*
506 * Do not use pic->hwunmask() immediately after pic->delroute().
507 * It is required to use pic->addroute() before pic->hwunmask().
508 */
509 static void
510 msix_hwunmask(struct pic *pic, int msix_vec)
511 {
512
513 msix_set_vecctl_mask(pic, msix_vec, MSIX_VECCTL_HWUNMASK);
514 }
515
516 static void
517 msix_addroute(struct pic *pic, struct cpu_info *ci,
518 int msix_vec, int idt_vec, int type)
519 {
520 pci_chipset_tag_t pc;
521 struct pci_attach_args *pa;
522 pcitag_t tag;
523 bus_space_tag_t bstag;
524 bus_space_handle_t bshandle;
525 uint64_t entry_base;
526 pcireg_t addr, data, ctl;
527 int off, err __diagused;
528
529 if (msix_vec < 0) {
530 DPRINTF(("%s: invalid MSI-X table index, devid=%d vecid=%d",
531 __func__, msipic_get_devid(pic), msix_vec));
532 return;
533 }
534
535 pa = &pic->pic_msipic->mp_pa;
536 pc = pa->pa_pc;
537 tag = pa->pa_tag;
538 err = pci_get_capability(pc, tag, PCI_CAP_MSIX, &off, NULL);
539 KASSERT(err != 0);
540
541 entry_base = PCI_MSIX_TABLE_ENTRY_SIZE * msix_vec;
542
543 /*
544 * See Intel 64 and IA-32 Architectures Software Developer's Manual
545 * Volume 3 10.11 Message Signalled Interrupts.
546 */
547 /*
548 * "cpuid" for MSI-X address is local APIC ID. In NetBSD, the ID is
549 * the same as ci->ci_cpuid.
550 */
551 addr = LAPIC_MSIADDR_BASE | __SHIFTIN(ci->ci_cpuid,
552 LAPIC_MSIADDR_DSTID_MASK);
553 /* If trigger mode is edge, it don't care level for trigger mode. */
554 data = __SHIFTIN(idt_vec, LAPIC_MSIDATA_VECTOR_MASK)
555 | LAPIC_MSIDATA_TRGMODE_EDGE | LAPIC_MSIDATA_DM_FIXED;
556
557 bstag = pic->pic_msipic->mp_bstag;
558 bshandle = pic->pic_msipic->mp_bshandle;
559 bus_space_write_4(bstag, bshandle,
560 entry_base + PCI_MSIX_TABLE_ENTRY_ADDR_LO, addr);
561 bus_space_write_4(bstag, bshandle,
562 entry_base + PCI_MSIX_TABLE_ENTRY_ADDR_HI, 0);
563 bus_space_write_4(bstag, bshandle,
564 entry_base + PCI_MSIX_TABLE_ENTRY_DATA, data);
565 bus_space_write_4(bstag, bshandle,
566 entry_base + PCI_MSIX_TABLE_ENTRY_VECTCTL, 0);
567 BUS_SPACE_WRITE_FLUSH(bstag, bshandle);
568
569 ctl = pci_conf_read(pc, tag, off + PCI_MSIX_CTL);
570 ctl |= PCI_MSIX_CTL_ENABLE;
571 pci_conf_write(pc, tag, off + PCI_MSIX_CTL, ctl);
572 }
573
574 /*
575 * Do not use pic->hwunmask() immediately after pic->delroute().
576 * It is required to use pic->addroute() before pic->hwunmask().
577 */
578 static void
579 msix_delroute(struct pic *pic, struct cpu_info *ci,
580 int msix_vec, int vec, int type)
581 {
582
583 msix_hwmask(pic, msix_vec);
584 }
585
586 /*
587 * Template for MSI-X pic.
588 * .pic_msipic is set later in construct_msix_pic().
589 */
590 static struct pic msix_pic_tmpl = {
591 .pic_type = PIC_MSIX,
592 .pic_vecbase = 0,
593 .pic_apicid = 0,
594 .pic_lock = __SIMPLELOCK_UNLOCKED, /* not used for msix_pic */
595 .pic_hwmask = msix_hwmask,
596 .pic_hwunmask = msix_hwunmask,
597 .pic_addroute = msix_addroute,
598 .pic_delroute = msix_delroute,
599 };
600
601 struct pic *
602 msipic_construct_msix_pic(const struct pci_attach_args *pa)
603 {
604 struct pic *msix_pic;
605 pci_chipset_tag_t pc;
606 pcitag_t tag;
607 pcireg_t tbl;
608 bus_space_tag_t bstag;
609 bus_space_handle_t bshandle;
610 bus_size_t bssize;
611 size_t table_size;
612 uint32_t table_offset;
613 u_int memtype;
614 bus_addr_t memaddr;
615 int flags;
616 int bir, bar, err, off, table_nentry;
617 char pic_name_buf[MSIPICNAMEBUF];
618
619 table_nentry = pci_msix_count(pa->pa_pc, pa->pa_tag);
620 if (table_nentry == 0) {
621 DPRINTF(("MSI-X table entry is 0.\n"));
622 return NULL;
623 }
624
625 pc = pa->pa_pc;
626 tag = pa->pa_tag;
627 if (pci_get_capability(pc, tag, PCI_CAP_MSIX, &off, NULL) == 0) {
628 DPRINTF(("%s: no msix capability", __func__));
629 return NULL;
630 }
631
632 msix_pic = msipic_construct_common_msi_pic(pa, &msix_pic_tmpl);
633 if (msix_pic == NULL) {
634 DPRINTF(("cannot allocate MSI-X pic.\n"));
635 return NULL;
636 }
637
638 memset(pic_name_buf, 0, MSIPICNAMEBUF);
639 snprintf(pic_name_buf, MSIPICNAMEBUF, "msix%d",
640 msix_pic->pic_msipic->mp_devid);
641 strncpy(msix_pic->pic_msipic->mp_pic_name, pic_name_buf,
642 MSIPICNAMEBUF - 1);
643 msix_pic->pic_name = msix_pic->pic_msipic->mp_pic_name;
644
645 tbl = pci_conf_read(pc, tag, off + PCI_MSIX_TBLOFFSET);
646 table_offset = tbl & PCI_MSIX_TBLOFFSET_MASK;
647 bir = tbl & PCI_MSIX_PBABIR_MASK;
648 switch(bir) {
649 case 0:
650 bar = PCI_BAR0;
651 break;
652 case 1:
653 bar = PCI_BAR1;
654 break;
655 case 2:
656 bar = PCI_BAR2;
657 break;
658 case 3:
659 bar = PCI_BAR3;
660 break;
661 case 4:
662 bar = PCI_BAR4;
663 break;
664 case 5:
665 bar = PCI_BAR5;
666 break;
667 default:
668 aprint_error("detect an illegal device! The device use reserved BIR values.\n");
669 msipic_destruct_common_msi_pic(msix_pic);
670 return NULL;
671 }
672 memtype = pci_mapreg_type(pc, tag, bar);
673 /*
674 * PCI_MSIX_TABLE_ENTRY_SIZE consists below
675 * - Vector Control (32bit)
676 * - Message Data (32bit)
677 * - Message Upper Address (32bit)
678 * - Message Lower Address (32bit)
679 */
680 table_size = table_nentry * PCI_MSIX_TABLE_ENTRY_SIZE;
681 #if 0
682 err = pci_mapreg_submap(pa, bar, memtype, BUS_SPACE_MAP_LINEAR,
683 roundup(table_size, PAGE_SIZE), table_offset,
684 &bstag, &bshandle, NULL, &bssize);
685 #else
686 /*
687 * Workaround for PCI prefetchable bit. Some chips (e.g. Intel 82599)
688 * report SERR and MSI-X doesn't work. This problem might not be the
689 * driver's bug but our PCI common part or VMs' bug. Until we find a
690 * real reason, we ignore the prefetchable bit.
691 */
692 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, bar, memtype,
693 &memaddr, NULL, &flags) != 0) {
694 DPRINTF(("cannot get a map info.\n"));
695 msipic_destruct_common_msi_pic(msix_pic);
696 return NULL;
697 }
698 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
699 DPRINTF(( "clear prefetchable bit\n"));
700 flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
701 }
702 bssize = roundup(table_size, PAGE_SIZE);
703 err = bus_space_map(pa->pa_memt, memaddr + table_offset, bssize, flags,
704 &bshandle);
705 bstag = pa->pa_memt;
706 #endif
707 if (err) {
708 DPRINTF(("cannot map msix table.\n"));
709 msipic_destruct_common_msi_pic(msix_pic);
710 return NULL;
711 }
712 msix_pic->pic_msipic->mp_bstag = bstag;
713 msix_pic->pic_msipic->mp_bshandle = bshandle;
714 msix_pic->pic_msipic->mp_bssize = bssize;
715
716 return msix_pic;
717 }
718
719 /*
720 * Delete pseudo pic for a MSI-X device.
721 */
722 void
723 msipic_destruct_msix_pic(struct pic *msix_pic)
724 {
725 struct msipic *msipic;
726
727 KASSERT(msipic_is_msi_pic(msix_pic));
728 KASSERT(msix_pic->pic_type == PIC_MSIX);
729
730 msipic = msix_pic->pic_msipic;
731 bus_space_unmap(msipic->mp_bstag, msipic->mp_bshandle,
732 msipic->mp_bssize);
733
734 msipic_destruct_common_msi_pic(msix_pic);
735 }
736
737 /*
738 * Set the number of MSI vectors for pseudo MSI pic.
739 */
740 int
741 msipic_set_msi_vectors(struct pic *msi_pic, pci_intr_handle_t *pihs,
742 int count)
743 {
744
745 KASSERT(msipic_is_msi_pic(msi_pic));
746
747 msi_pic->pic_msipic->mp_veccnt = count;
748 return 0;
749 }
750
751 /*
752 * Initialize the system to use MSI/MSI-X.
753 */
754 void
755 msipic_init(void)
756 {
757
758 mutex_init(&msipic_list_lock, MUTEX_DEFAULT, IPL_NONE);
759 }
760