msipic.c revision 1.11.6.1 1 /* $NetBSD: msipic.c,v 1.11.6.1 2019/06/10 22:06:53 christos Exp $ */
2
3 /*
4 * Copyright (c) 2015 Internet Initiative Japan Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: msipic.c,v 1.11.6.1 2019/06/10 22:06:53 christos Exp $");
31
32 #include "opt_intrdebug.h"
33
34 #include <sys/types.h>
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/errno.h>
38 #include <sys/kmem.h>
39 #include <sys/mutex.h>
40
41 #include <dev/pci/pcivar.h>
42
43 #include <machine/i82489reg.h>
44 #include <machine/i82489var.h>
45 #include <machine/i82093reg.h>
46 #include <machine/i82093var.h>
47 #include <machine/pic.h>
48 #include <machine/lock.h>
49
50 #include <x86/pci/msipic.h>
51
52 #ifdef INTRDEBUG
53 #define MSIPICDEBUG
54 #endif
55
56 #ifdef MSIPICDEBUG
57 #define DPRINTF(msg) printf msg
58 #else
59 #define DPRINTF(msg)
60 #endif
61
62 #define BUS_SPACE_WRITE_FLUSH(pc, tag) (void)bus_space_read_4(pc, tag, 0)
63
64 #define MSIPICNAMEBUF 16
65
66 /*
67 * A Pseudo pic for single MSI/MSI-X device.
68 * The pic and MSI/MSI-X device are distinbuished by "devid". The "devid"
69 * is managed by below "dev_seqs".
70 */
71 struct msipic {
72 int mp_bus;
73 int mp_dev;
74 int mp_fun;
75
76 int mp_devid; /* The device id for the MSI/MSI-X device. */
77 int mp_veccnt; /* The number of MSI/MSI-X vectors. */
78
79 char mp_pic_name[MSIPICNAMEBUF]; /* The MSI/MSI-X device's name. */
80
81 struct pci_attach_args mp_pa;
82 bus_space_tag_t mp_bstag;
83 bus_space_handle_t mp_bshandle;
84 bus_size_t mp_bssize;
85 struct pic *mp_pic;
86
87 LIST_ENTRY(msipic) mp_list;
88 };
89
90 static kmutex_t msipic_list_lock;
91
92 static LIST_HEAD(, msipic) msipic_list =
93 LIST_HEAD_INITIALIZER(msipic_list);
94
95 /*
96 * This struct managements "devid" to use the same "devid" for the device
97 * re-attached. If the device's bus number and device number and function
98 * number are equal, it is assumed re-attached.
99 */
100 struct dev_last_used_seq {
101 bool ds_using;
102 int ds_bus;
103 int ds_dev;
104 int ds_fun;
105 };
106 /* The number of MSI/MSI-X devices supported by system. */
107 #define NUM_MSI_DEVS 256
108 /* Record devids to use the same devid when the device is re-attached. */
109 static struct dev_last_used_seq dev_seqs[NUM_MSI_DEVS];
110
111 static int msipic_allocate_common_msi_devid(const struct pci_attach_args *);
112 static void msipic_release_common_msi_devid(int);
113
114 static struct pic *msipic_find_msi_pic_locked(int);
115 static struct pic *msipic_construct_common_msi_pic(const struct pci_attach_args *,
116 struct pic *);
117 static void msipic_destruct_common_msi_pic(struct pic *);
118
119 static void msi_set_msictl_enablebit(struct pic *, int, int);
120 static void msi_hwmask(struct pic *, int);
121 static void msi_hwunmask(struct pic *, int);
122 static void msi_addroute(struct pic *, struct cpu_info *, int, int, int);
123 static void msi_delroute(struct pic *, struct cpu_info *, int, int, int);
124
125 static void msix_set_vecctl_mask(struct pic *, int, int);
126 static void msix_hwmask(struct pic *, int);
127 static void msix_hwunmask(struct pic *, int);
128 static void msix_addroute(struct pic *, struct cpu_info *, int, int, int);
129 static void msix_delroute(struct pic *, struct cpu_info *, int, int, int);
130
131 /*
132 * Return new "devid" for the device attached first.
133 * Return the same "devid" for the device re-attached after dettached once.
134 * Return -1 if the number of attached MSI/MSI-X devices is over NUM_MSI_DEVS.
135 */
136 static int
137 msipic_allocate_common_msi_devid(const struct pci_attach_args *pa)
138 {
139 pci_chipset_tag_t pc;
140 pcitag_t tag;
141 int bus, dev, fun, i;
142
143 KASSERT(mutex_owned(&msipic_list_lock));
144
145 pc = pa->pa_pc;
146 tag = pa->pa_tag;
147 pci_decompose_tag(pc, tag, &bus, &dev, &fun);
148
149 /* if the device was once attached, use same devid */
150 for (i = 0; i < NUM_MSI_DEVS; i++) {
151 /* skip host bridge */
152 if (dev_seqs[i].ds_bus == 0
153 && dev_seqs[i].ds_dev == 0
154 && dev_seqs[i].ds_fun == 0)
155 break;
156
157 if (dev_seqs[i].ds_bus == bus
158 && dev_seqs[i].ds_dev == dev
159 && dev_seqs[i].ds_fun == fun) {
160 dev_seqs[i].ds_using = true;
161 return i;
162 }
163 }
164
165 for (i = 0; i < NUM_MSI_DEVS; i++) {
166 if (dev_seqs[i].ds_using == 0) {
167 dev_seqs[i].ds_using = true;
168 dev_seqs[i].ds_bus = bus;
169 dev_seqs[i].ds_dev = dev;
170 dev_seqs[i].ds_fun = fun;
171 return i;
172 }
173 }
174
175 DPRINTF(("too many MSI devices.\n"));
176 return -1;
177 }
178
179 /*
180 * Set the "devid" unused, but keep reserving the "devid" to reuse when
181 * the device is re-attached.
182 */
183 static void
184 msipic_release_common_msi_devid(int devid)
185 {
186
187 KASSERT(mutex_owned(&msipic_list_lock));
188
189 if (devid < 0 || NUM_MSI_DEVS <= devid) {
190 DPRINTF(("%s: invalid devid.\n", __func__));
191 return;
192 }
193
194 dev_seqs[devid].ds_using = false;
195 /* Keep ds_* to reuse the same devid for the same device. */
196 }
197
198 static struct pic *
199 msipic_find_msi_pic_locked(int devid)
200 {
201 struct msipic *mpp;
202
203 KASSERT(mutex_owned(&msipic_list_lock));
204
205 LIST_FOREACH(mpp, &msipic_list, mp_list) {
206 if(mpp->mp_devid == devid)
207 return mpp->mp_pic;
208 }
209 return NULL;
210 }
211
212 /*
213 * Return the msi_pic whose device is already registered.
214 * If the device is not registered yet, return NULL.
215 */
216 struct pic *
217 msipic_find_msi_pic(int devid)
218 {
219 struct pic *msipic;
220
221 mutex_enter(&msipic_list_lock);
222 msipic = msipic_find_msi_pic_locked(devid);
223 mutex_exit(&msipic_list_lock);
224
225 return msipic;
226 }
227
228 /*
229 * A common construct process of MSI and MSI-X.
230 */
231 static struct pic *
232 msipic_construct_common_msi_pic(const struct pci_attach_args *pa,
233 struct pic *pic_tmpl)
234 {
235 struct pic *pic;
236 struct msipic *msipic;
237 int devid;
238
239 pic = kmem_alloc(sizeof(*pic), KM_SLEEP);
240 msipic = kmem_zalloc(sizeof(*msipic), KM_SLEEP);
241
242 mutex_enter(&msipic_list_lock);
243
244 devid = msipic_allocate_common_msi_devid(pa);
245 if (devid == -1) {
246 mutex_exit(&msipic_list_lock);
247 kmem_free(pic, sizeof(*pic));
248 kmem_free(msipic, sizeof(*msipic));
249 return NULL;
250 }
251
252 memcpy(pic, pic_tmpl, sizeof(*pic));
253 pic->pic_edge_stubs = x2apic_mode ? x2apic_edge_stubs : ioapic_edge_stubs,
254 pic->pic_msipic = msipic;
255 msipic->mp_pic = pic;
256 pci_decompose_tag(pa->pa_pc, pa->pa_tag,
257 &msipic->mp_bus, &msipic->mp_dev, &msipic->mp_fun);
258 memcpy(&msipic->mp_pa, pa, sizeof(msipic->mp_pa));
259 msipic->mp_devid = devid;
260 /*
261 * pci_msi{,x}_alloc() must be called only once in the device driver.
262 */
263 KASSERT(msipic_find_msi_pic_locked(msipic->mp_devid) == NULL);
264
265 LIST_INSERT_HEAD(&msipic_list, msipic, mp_list);
266
267 mutex_exit(&msipic_list_lock);
268
269 return pic;
270 }
271
272 static void
273 msipic_destruct_common_msi_pic(struct pic *msi_pic)
274 {
275 struct msipic *msipic;
276
277 if (msi_pic == NULL)
278 return;
279
280 msipic = msi_pic->pic_msipic;
281 mutex_enter(&msipic_list_lock);
282 LIST_REMOVE(msipic, mp_list);
283 msipic_release_common_msi_devid(msipic->mp_devid);
284 mutex_exit(&msipic_list_lock);
285
286 kmem_free(msipic, sizeof(*msipic));
287 kmem_free(msi_pic, sizeof(*msi_pic));
288 }
289
290 /*
291 * The pic is MSI/MSI-X pic or not.
292 */
293 bool
294 msipic_is_msi_pic(struct pic *pic)
295 {
296
297 return (pic->pic_msipic != NULL);
298 }
299
300 /*
301 * Return the MSI/MSI-X devid which is unique for each devices.
302 */
303 int
304 msipic_get_devid(struct pic *pic)
305 {
306
307 KASSERT(msipic_is_msi_pic(pic));
308
309 return pic->pic_msipic->mp_devid;
310 }
311
312 #define MSI_MSICTL_ENABLE 1
313 #define MSI_MSICTL_DISABLE 0
314 static void
315 msi_set_msictl_enablebit(struct pic *pic, int msi_vec, int flag)
316 {
317 pci_chipset_tag_t pc;
318 struct pci_attach_args *pa;
319 pcitag_t tag;
320 pcireg_t ctl;
321 int off, err __diagused;
322
323 pc = NULL;
324 pa = &pic->pic_msipic->mp_pa;
325 tag = pa->pa_tag;
326 err = pci_get_capability(pc, tag, PCI_CAP_MSI, &off, NULL);
327 KASSERT(err != 0);
328
329 /*
330 * MSI can establish only one vector at once.
331 * So, use whole device mask bit instead of a vector mask bit.
332 */
333 ctl = pci_conf_read(pc, tag, off + PCI_MSI_CTL);
334 if (flag == MSI_MSICTL_ENABLE)
335 ctl |= PCI_MSI_CTL_MSI_ENABLE;
336 else
337 ctl &= ~PCI_MSI_CTL_MSI_ENABLE;
338
339 pci_conf_write(pc, tag, off, ctl);
340 }
341
342 static void
343 msi_hwmask(struct pic *pic, int msi_vec)
344 {
345
346 msi_set_msictl_enablebit(pic, msi_vec, MSI_MSICTL_DISABLE);
347 }
348
349 /*
350 * Do not use pic->hwunmask() immediately after pic->delroute().
351 * It is required to use pic->addroute() before pic->hwunmask().
352 */
353 static void
354 msi_hwunmask(struct pic *pic, int msi_vec)
355 {
356
357 msi_set_msictl_enablebit(pic, msi_vec, MSI_MSICTL_ENABLE);
358 }
359
360 static void
361 msi_addroute(struct pic *pic, struct cpu_info *ci,
362 int unused, int idt_vec, int type)
363 {
364 pci_chipset_tag_t pc;
365 struct pci_attach_args *pa;
366 pcitag_t tag;
367 pcireg_t addr, data, ctl;
368 int off, err __diagused;
369
370 pc = NULL;
371 pa = &pic->pic_msipic->mp_pa;
372 tag = pa->pa_tag;
373 err = pci_get_capability(pc, tag, PCI_CAP_MSI, &off, NULL);
374 KASSERT(err != 0);
375
376 /*
377 * See Intel 64 and IA-32 Architectures Software Developer's Manual
378 * Volume 3 10.11 Message Signalled Interrupts.
379 */
380 /*
381 * "cpuid" for MSI address is local APIC ID. In NetBSD, the ID is
382 * the same as ci->ci_cpuid.
383 */
384 addr = LAPIC_MSIADDR_BASE | __SHIFTIN(ci->ci_cpuid,
385 LAPIC_MSIADDR_DSTID_MASK);
386 /* If trigger mode is edge, it don't care level for trigger mode. */
387 data = __SHIFTIN(idt_vec, LAPIC_MSIDATA_VECTOR_MASK)
388 | LAPIC_MSIDATA_TRGMODE_EDGE | LAPIC_MSIDATA_DM_FIXED;
389
390 ctl = pci_conf_read(pc, tag, off + PCI_MSI_CTL);
391 if (ctl & PCI_MSI_CTL_64BIT_ADDR) {
392 pci_conf_write(pc, tag, off + PCI_MSI_MADDR64_LO, addr);
393 pci_conf_write(pc, tag, off + PCI_MSI_MADDR64_HI, 0);
394 pci_conf_write(pc, tag, off + PCI_MSI_MDATA64, data);
395 } else {
396 pci_conf_write(pc, tag, off + PCI_MSI_MADDR, addr);
397 pci_conf_write(pc, tag, off + PCI_MSI_MDATA, data);
398 }
399 ctl |= PCI_MSI_CTL_MSI_ENABLE;
400 pci_conf_write(pc, tag, off + PCI_MSI_CTL, ctl);
401 }
402
403 /*
404 * Do not use pic->hwunmask() immediately after pic->delroute().
405 * It is required to use pic->addroute() before pic->hwunmask().
406 */
407 static void
408 msi_delroute(struct pic *pic, struct cpu_info *ci,
409 int msi_vec, int idt_vec, int type)
410 {
411
412 msi_hwmask(pic, msi_vec);
413 }
414
415 /*
416 * Template for MSI pic.
417 * .pic_msipic is set later in construct_msi_pic().
418 */
419 static struct pic msi_pic_tmpl = {
420 .pic_type = PIC_MSI,
421 .pic_vecbase = 0,
422 .pic_apicid = 0,
423 .pic_lock = __SIMPLELOCK_UNLOCKED, /* not used for msi_pic */
424 .pic_hwmask = msi_hwmask,
425 .pic_hwunmask = msi_hwunmask,
426 .pic_addroute = msi_addroute,
427 .pic_delroute = msi_delroute,
428 };
429
430 /*
431 * Create pseudo pic for a MSI device.
432 */
433 struct pic *
434 msipic_construct_msi_pic(const struct pci_attach_args *pa)
435 {
436 struct pic *msi_pic;
437 char pic_name_buf[MSIPICNAMEBUF];
438
439 msi_pic = msipic_construct_common_msi_pic(pa, &msi_pic_tmpl);
440 if (msi_pic == NULL) {
441 DPRINTF(("cannot allocate MSI pic.\n"));
442 return NULL;
443 }
444
445 memset(pic_name_buf, 0, MSIPICNAMEBUF);
446 snprintf(pic_name_buf, MSIPICNAMEBUF, "msi%d",
447 msi_pic->pic_msipic->mp_devid);
448 strncpy(msi_pic->pic_msipic->mp_pic_name, pic_name_buf,
449 MSIPICNAMEBUF - 1);
450 msi_pic->pic_name = msi_pic->pic_msipic->mp_pic_name;
451
452 return msi_pic;
453 }
454
455 /*
456 * Delete pseudo pic for a MSI device.
457 */
458 void
459 msipic_destruct_msi_pic(struct pic *msi_pic)
460 {
461
462 msipic_destruct_common_msi_pic(msi_pic);
463 }
464
465 #define MSIX_VECCTL_HWMASK 1
466 #define MSIX_VECCTL_HWUNMASK 0
467 static void
468 msix_set_vecctl_mask(struct pic *pic, int msix_vec, int flag)
469 {
470 bus_space_tag_t bstag;
471 bus_space_handle_t bshandle;
472 uint64_t entry_base;
473 uint32_t vecctl;
474
475 if (msix_vec < 0) {
476 DPRINTF(("%s: invalid MSI-X table index, devid=%d vecid=%d",
477 __func__, msipic_get_devid(pic), msix_vec));
478 return;
479 }
480
481 entry_base = PCI_MSIX_TABLE_ENTRY_SIZE * msix_vec;
482
483 bstag = pic->pic_msipic->mp_bstag;
484 bshandle = pic->pic_msipic->mp_bshandle;
485 vecctl = bus_space_read_4(bstag, bshandle,
486 entry_base + PCI_MSIX_TABLE_ENTRY_VECTCTL);
487 if (flag == MSIX_VECCTL_HWMASK)
488 vecctl |= PCI_MSIX_VECTCTL_MASK;
489 else
490 vecctl &= ~PCI_MSIX_VECTCTL_MASK;
491
492 bus_space_write_4(bstag, bshandle,
493 entry_base + PCI_MSIX_TABLE_ENTRY_VECTCTL, vecctl);
494 BUS_SPACE_WRITE_FLUSH(bstag, bshandle);
495 }
496
497 static void
498 msix_hwmask(struct pic *pic, int msix_vec)
499 {
500
501 msix_set_vecctl_mask(pic, msix_vec, MSIX_VECCTL_HWMASK);
502 }
503
504 /*
505 * Do not use pic->hwunmask() immediately after pic->delroute().
506 * It is required to use pic->addroute() before pic->hwunmask().
507 */
508 static void
509 msix_hwunmask(struct pic *pic, int msix_vec)
510 {
511
512 msix_set_vecctl_mask(pic, msix_vec, MSIX_VECCTL_HWUNMASK);
513 }
514
515 static void
516 msix_addroute(struct pic *pic, struct cpu_info *ci,
517 int msix_vec, int idt_vec, int type)
518 {
519 pci_chipset_tag_t pc;
520 struct pci_attach_args *pa;
521 pcitag_t tag;
522 bus_space_tag_t bstag;
523 bus_space_handle_t bshandle;
524 uint64_t entry_base;
525 pcireg_t addr, data, ctl;
526 int off, err __diagused;
527
528 if (msix_vec < 0) {
529 DPRINTF(("%s: invalid MSI-X table index, devid=%d vecid=%d",
530 __func__, msipic_get_devid(pic), msix_vec));
531 return;
532 }
533
534 pa = &pic->pic_msipic->mp_pa;
535 pc = pa->pa_pc;
536 tag = pa->pa_tag;
537 err = pci_get_capability(pc, tag, PCI_CAP_MSIX, &off, NULL);
538 KASSERT(err != 0);
539
540 entry_base = PCI_MSIX_TABLE_ENTRY_SIZE * msix_vec;
541
542 /*
543 * See Intel 64 and IA-32 Architectures Software Developer's Manual
544 * Volume 3 10.11 Message Signalled Interrupts.
545 */
546 /*
547 * "cpuid" for MSI-X address is local APIC ID. In NetBSD, the ID is
548 * the same as ci->ci_cpuid.
549 */
550 addr = LAPIC_MSIADDR_BASE | __SHIFTIN(ci->ci_cpuid,
551 LAPIC_MSIADDR_DSTID_MASK);
552 /* If trigger mode is edge, it don't care level for trigger mode. */
553 data = __SHIFTIN(idt_vec, LAPIC_MSIDATA_VECTOR_MASK)
554 | LAPIC_MSIDATA_TRGMODE_EDGE | LAPIC_MSIDATA_DM_FIXED;
555
556 bstag = pic->pic_msipic->mp_bstag;
557 bshandle = pic->pic_msipic->mp_bshandle;
558 bus_space_write_4(bstag, bshandle,
559 entry_base + PCI_MSIX_TABLE_ENTRY_ADDR_LO, addr);
560 bus_space_write_4(bstag, bshandle,
561 entry_base + PCI_MSIX_TABLE_ENTRY_ADDR_HI, 0);
562 bus_space_write_4(bstag, bshandle,
563 entry_base + PCI_MSIX_TABLE_ENTRY_DATA, data);
564 bus_space_write_4(bstag, bshandle,
565 entry_base + PCI_MSIX_TABLE_ENTRY_VECTCTL, 0);
566 BUS_SPACE_WRITE_FLUSH(bstag, bshandle);
567
568 ctl = pci_conf_read(pc, tag, off + PCI_MSIX_CTL);
569 ctl |= PCI_MSIX_CTL_ENABLE;
570 pci_conf_write(pc, tag, off + PCI_MSIX_CTL, ctl);
571 }
572
573 /*
574 * Do not use pic->hwunmask() immediately after pic->delroute().
575 * It is required to use pic->addroute() before pic->hwunmask().
576 */
577 static void
578 msix_delroute(struct pic *pic, struct cpu_info *ci,
579 int msix_vec, int vec, int type)
580 {
581
582 msix_hwmask(pic, msix_vec);
583 }
584
585 /*
586 * Template for MSI-X pic.
587 * .pic_msipic is set later in construct_msix_pic().
588 */
589 static struct pic msix_pic_tmpl = {
590 .pic_type = PIC_MSIX,
591 .pic_vecbase = 0,
592 .pic_apicid = 0,
593 .pic_lock = __SIMPLELOCK_UNLOCKED, /* not used for msix_pic */
594 .pic_hwmask = msix_hwmask,
595 .pic_hwunmask = msix_hwunmask,
596 .pic_addroute = msix_addroute,
597 .pic_delroute = msix_delroute,
598 };
599
600 struct pic *
601 msipic_construct_msix_pic(const struct pci_attach_args *pa)
602 {
603 struct pic *msix_pic;
604 pci_chipset_tag_t pc;
605 pcitag_t tag;
606 pcireg_t tbl;
607 bus_space_tag_t bstag;
608 bus_space_handle_t bshandle;
609 bus_size_t bssize;
610 size_t table_size;
611 uint32_t table_offset;
612 u_int memtype;
613 bus_addr_t memaddr;
614 int flags;
615 int bir, bar, err, off, table_nentry;
616 char pic_name_buf[MSIPICNAMEBUF];
617
618 table_nentry = pci_msix_count(pa->pa_pc, pa->pa_tag);
619 if (table_nentry == 0) {
620 DPRINTF(("MSI-X table entry is 0.\n"));
621 return NULL;
622 }
623
624 pc = pa->pa_pc;
625 tag = pa->pa_tag;
626 if (pci_get_capability(pc, tag, PCI_CAP_MSIX, &off, NULL) == 0) {
627 DPRINTF(("%s: no msix capability", __func__));
628 return NULL;
629 }
630
631 msix_pic = msipic_construct_common_msi_pic(pa, &msix_pic_tmpl);
632 if (msix_pic == NULL) {
633 DPRINTF(("cannot allocate MSI-X pic.\n"));
634 return NULL;
635 }
636
637 memset(pic_name_buf, 0, MSIPICNAMEBUF);
638 snprintf(pic_name_buf, MSIPICNAMEBUF, "msix%d",
639 msix_pic->pic_msipic->mp_devid);
640 strncpy(msix_pic->pic_msipic->mp_pic_name, pic_name_buf,
641 MSIPICNAMEBUF - 1);
642 msix_pic->pic_name = msix_pic->pic_msipic->mp_pic_name;
643
644 tbl = pci_conf_read(pc, tag, off + PCI_MSIX_TBLOFFSET);
645 table_offset = tbl & PCI_MSIX_TBLOFFSET_MASK;
646 bir = tbl & PCI_MSIX_PBABIR_MASK;
647 switch(bir) {
648 case 0:
649 bar = PCI_BAR0;
650 break;
651 case 1:
652 bar = PCI_BAR1;
653 break;
654 case 2:
655 bar = PCI_BAR2;
656 break;
657 case 3:
658 bar = PCI_BAR3;
659 break;
660 case 4:
661 bar = PCI_BAR4;
662 break;
663 case 5:
664 bar = PCI_BAR5;
665 break;
666 default:
667 aprint_error("detect an illegal device! The device use reserved BIR values.\n");
668 msipic_destruct_common_msi_pic(msix_pic);
669 return NULL;
670 }
671 memtype = pci_mapreg_type(pc, tag, bar);
672 /*
673 * PCI_MSIX_TABLE_ENTRY_SIZE consists below
674 * - Vector Control (32bit)
675 * - Message Data (32bit)
676 * - Message Upper Address (32bit)
677 * - Message Lower Address (32bit)
678 */
679 table_size = table_nentry * PCI_MSIX_TABLE_ENTRY_SIZE;
680 #if 0
681 err = pci_mapreg_submap(pa, bar, memtype, BUS_SPACE_MAP_LINEAR,
682 roundup(table_size, PAGE_SIZE), table_offset,
683 &bstag, &bshandle, NULL, &bssize);
684 #else
685 /*
686 * Workaround for PCI prefetchable bit. Some chips (e.g. Intel 82599)
687 * report SERR and MSI-X doesn't work. This problem might not be the
688 * driver's bug but our PCI common part or VMs' bug. Until we find a
689 * real reason, we ignore the prefetchable bit.
690 */
691 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, bar, memtype,
692 &memaddr, NULL, &flags) != 0) {
693 DPRINTF(("cannot get a map info.\n"));
694 msipic_destruct_common_msi_pic(msix_pic);
695 return NULL;
696 }
697 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
698 DPRINTF(( "clear prefetchable bit\n"));
699 flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
700 }
701 bssize = roundup(table_size, PAGE_SIZE);
702 err = bus_space_map(pa->pa_memt, memaddr + table_offset, bssize, flags,
703 &bshandle);
704 bstag = pa->pa_memt;
705 #endif
706 if (err) {
707 DPRINTF(("cannot map msix table.\n"));
708 msipic_destruct_common_msi_pic(msix_pic);
709 return NULL;
710 }
711 msix_pic->pic_msipic->mp_bstag = bstag;
712 msix_pic->pic_msipic->mp_bshandle = bshandle;
713 msix_pic->pic_msipic->mp_bssize = bssize;
714
715 return msix_pic;
716 }
717
718 /*
719 * Delete pseudo pic for a MSI-X device.
720 */
721 void
722 msipic_destruct_msix_pic(struct pic *msix_pic)
723 {
724 struct msipic *msipic;
725
726 KASSERT(msipic_is_msi_pic(msix_pic));
727 KASSERT(msix_pic->pic_type == PIC_MSIX);
728
729 msipic = msix_pic->pic_msipic;
730 bus_space_unmap(msipic->mp_bstag, msipic->mp_bshandle,
731 msipic->mp_bssize);
732
733 msipic_destruct_common_msi_pic(msix_pic);
734 }
735
736 /*
737 * Set the number of MSI vectors for pseudo MSI pic.
738 */
739 int
740 msipic_set_msi_vectors(struct pic *msi_pic, pci_intr_handle_t *pihs,
741 int count)
742 {
743
744 KASSERT(msipic_is_msi_pic(msi_pic));
745
746 msi_pic->pic_msipic->mp_veccnt = count;
747 return 0;
748 }
749
750 /*
751 * Initialize the system to use MSI/MSI-X.
752 */
753 void
754 msipic_init(void)
755 {
756
757 mutex_init(&msipic_list_lock, MUTEX_DEFAULT, IPL_NONE);
758 }
759