pci_machdep.c revision 1.51 1 /* $NetBSD: pci_machdep.c,v 1.51 2011/09/13 17:58:42 dyoung Exp $ */
2
3 /*-
4 * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * Copyright (c) 1996 Christopher G. Demetriou. All rights reserved.
35 * Copyright (c) 1994 Charles M. Hannum. All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 3. All advertising materials mentioning features or use of this software
46 * must display the following acknowledgement:
47 * This product includes software developed by Charles M. Hannum.
48 * 4. The name of the author may not be used to endorse or promote products
49 * derived from this software without specific prior written permission.
50 *
51 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
52 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
53 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
54 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
55 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
56 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
60 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 */
62
63 /*
64 * Machine-specific functions for PCI autoconfiguration.
65 *
66 * On PCs, there are two methods of generating PCI configuration cycles.
67 * We try to detect the appropriate mechanism for this machine and set
68 * up a few function pointers to access the correct method directly.
69 *
70 * The configuration method can be hard-coded in the config file by
71 * using `options PCI_CONF_MODE=N', where `N' is the configuration mode
72 * as defined section 3.6.4.1, `Generating Configuration Cycles'.
73 */
74
75 #include <sys/cdefs.h>
76 __KERNEL_RCSID(0, "$NetBSD: pci_machdep.c,v 1.51 2011/09/13 17:58:42 dyoung Exp $");
77
78 #include <sys/types.h>
79 #include <sys/param.h>
80 #include <sys/time.h>
81 #include <sys/systm.h>
82 #include <sys/errno.h>
83 #include <sys/device.h>
84 #include <sys/bus.h>
85 #include <sys/cpu.h>
86 #include <sys/kmem.h>
87
88 #include <uvm/uvm_extern.h>
89
90 #include <machine/bus_private.h>
91
92 #include <machine/pio.h>
93 #include <machine/lock.h>
94
95 #include <dev/isa/isareg.h>
96 #include <dev/isa/isavar.h>
97 #include <dev/pci/pcivar.h>
98 #include <dev/pci/pcireg.h>
99 #include <dev/pci/pccbbreg.h>
100 #include <dev/pci/pcidevs.h>
101
102 #include "acpica.h"
103 #include "opt_mpbios.h"
104 #include "opt_acpi.h"
105
106 #ifdef MPBIOS
107 #include <machine/mpbiosvar.h>
108 #endif
109
110 #if NACPICA > 0
111 #include <machine/mpacpi.h>
112 #endif
113
114 #include <machine/mpconfig.h>
115
116 #include "opt_pci_conf_mode.h"
117
118 #ifdef __i386__
119 #include "opt_xbox.h"
120 #ifdef XBOX
121 #include <machine/xbox.h>
122 #endif
123 #endif
124
125 #ifdef PCI_CONF_MODE
126 #if (PCI_CONF_MODE == 1) || (PCI_CONF_MODE == 2)
127 static int pci_mode = PCI_CONF_MODE;
128 #else
129 #error Invalid PCI configuration mode.
130 #endif
131 #else
132 static int pci_mode = -1;
133 #endif
134
135 struct pci_conf_lock {
136 uint32_t cl_cpuno; /* 0: unlocked
137 * 1 + n: locked by CPU n (0 <= n)
138 */
139 uint32_t cl_sel; /* the address that's being read. */
140 };
141
142 static void pci_conf_unlock(struct pci_conf_lock *);
143 static uint32_t pci_conf_selector(pcitag_t, int);
144 static unsigned int pci_conf_port(pcitag_t, int);
145 static void pci_conf_select(uint32_t);
146 static void pci_conf_lock(struct pci_conf_lock *, uint32_t);
147 static void pci_bridge_hook(pci_chipset_tag_t, pcitag_t, void *);
148 struct pci_bridge_hook_arg {
149 void (*func)(pci_chipset_tag_t, pcitag_t, void *);
150 void *arg;
151 };
152
153 #define PCI_MODE1_ENABLE 0x80000000UL
154 #define PCI_MODE1_ADDRESS_REG 0x0cf8
155 #define PCI_MODE1_DATA_REG 0x0cfc
156
157 #define PCI_MODE2_ENABLE_REG 0x0cf8
158 #define PCI_MODE2_FORWARD_REG 0x0cfa
159
160 #define _m1tag(b, d, f) \
161 (PCI_MODE1_ENABLE | ((b) << 16) | ((d) << 11) | ((f) << 8))
162 #define _qe(bus, dev, fcn, vend, prod) \
163 {_m1tag(bus, dev, fcn), PCI_ID_CODE(vend, prod)}
164 struct {
165 uint32_t tag;
166 pcireg_t id;
167 } pcim1_quirk_tbl[] = {
168 _qe(0, 0, 0, PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_TRIFLEX1),
169 /* XXX Triflex2 not tested */
170 _qe(0, 0, 0, PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_TRIFLEX2),
171 _qe(0, 0, 0, PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_TRIFLEX4),
172 /* Triton needed for Connectix Virtual PC */
173 _qe(0, 0, 0, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82437FX),
174 /* Connectix Virtual PC 5 has a 440BX */
175 _qe(0, 0, 0, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82443BX_NOAGP),
176 /* Parallels Desktop for Mac */
177 _qe(0, 2, 0, PCI_VENDOR_PARALLELS, PCI_PRODUCT_PARALLELS_VIDEO),
178 _qe(0, 3, 0, PCI_VENDOR_PARALLELS, PCI_PRODUCT_PARALLELS_TOOLS),
179 /* SIS 740 */
180 _qe(0, 0, 0, PCI_VENDOR_SIS, PCI_PRODUCT_SIS_740),
181 /* SIS 741 */
182 _qe(0, 0, 0, PCI_VENDOR_SIS, PCI_PRODUCT_SIS_741),
183 {0, 0xffffffff} /* patchable */
184 };
185 #undef _m1tag
186 #undef _id
187 #undef _qe
188
189 /*
190 * PCI doesn't have any special needs; just use the generic versions
191 * of these functions.
192 */
193 struct x86_bus_dma_tag pci_bus_dma_tag = {
194 ._tag_needs_free = 0,
195 #if defined(_LP64) || defined(PAE)
196 ._bounce_thresh = PCI32_DMA_BOUNCE_THRESHOLD,
197 ._bounce_alloc_lo = ISA_DMA_BOUNCE_THRESHOLD,
198 ._bounce_alloc_hi = PCI32_DMA_BOUNCE_THRESHOLD,
199 #else
200 ._bounce_thresh = 0,
201 ._bounce_alloc_lo = 0,
202 ._bounce_alloc_hi = 0,
203 #endif
204 ._may_bounce = NULL,
205 };
206
207 #ifdef _LP64
208 struct x86_bus_dma_tag pci_bus_dma64_tag = {
209 ._tag_needs_free = 0,
210 ._bounce_thresh = 0,
211 ._bounce_alloc_lo = 0,
212 ._bounce_alloc_hi = 0,
213 ._may_bounce = NULL,
214 };
215 #endif
216
217 static struct pci_conf_lock cl0 = {
218 .cl_cpuno = 0UL
219 , .cl_sel = 0UL
220 };
221
222 static struct pci_conf_lock * const cl = &cl0;
223
224 static void
225 pci_conf_lock(struct pci_conf_lock *ocl, uint32_t sel)
226 {
227 uint32_t cpuno;
228
229 KASSERT(sel != 0);
230
231 kpreempt_disable();
232 cpuno = cpu_number() + 1;
233 /* If the kernel enters pci_conf_lock() through an interrupt
234 * handler, then the CPU may already hold the lock.
235 *
236 * If the CPU does not already hold the lock, spin until
237 * we can acquire it.
238 */
239 if (cpuno == cl->cl_cpuno) {
240 ocl->cl_cpuno = cpuno;
241 } else {
242 u_int spins;
243
244 ocl->cl_cpuno = 0;
245
246 spins = SPINLOCK_BACKOFF_MIN;
247 while (atomic_cas_32(&cl->cl_cpuno, 0, cpuno) != 0) {
248 SPINLOCK_BACKOFF(spins);
249 #ifdef LOCKDEBUG
250 if (SPINLOCK_SPINOUT(spins)) {
251 panic("%s: cpu %" PRId32
252 " spun out waiting for cpu %" PRId32,
253 __func__, cpuno, cl->cl_cpuno);
254 }
255 #endif /* LOCKDEBUG */
256 }
257 }
258
259 /* Only one CPU can be here, so an interlocked atomic_swap(3)
260 * is not necessary.
261 *
262 * Evaluating atomic_cas_32_ni()'s argument, cl->cl_sel,
263 * and applying atomic_cas_32_ni() is not an atomic operation,
264 * however, any interrupt that, in the middle of the
265 * operation, modifies cl->cl_sel, will also restore
266 * cl->cl_sel. So cl->cl_sel will have the same value when
267 * we apply atomic_cas_32_ni() as when we evaluated it,
268 * before.
269 */
270 ocl->cl_sel = atomic_cas_32_ni(&cl->cl_sel, cl->cl_sel, sel);
271 pci_conf_select(sel);
272 }
273
274 static void
275 pci_conf_unlock(struct pci_conf_lock *ocl)
276 {
277 uint32_t sel;
278
279 sel = atomic_cas_32_ni(&cl->cl_sel, cl->cl_sel, ocl->cl_sel);
280 pci_conf_select(ocl->cl_sel);
281 if (ocl->cl_cpuno != cl->cl_cpuno)
282 atomic_cas_32(&cl->cl_cpuno, cl->cl_cpuno, ocl->cl_cpuno);
283 kpreempt_enable();
284 }
285
286 static uint32_t
287 pci_conf_selector(pcitag_t tag, int reg)
288 {
289 static const pcitag_t mode2_mask = {
290 .mode2 = {
291 .enable = 0xff
292 , .forward = 0xff
293 }
294 };
295
296 switch (pci_mode) {
297 case 1:
298 return tag.mode1 | reg;
299 case 2:
300 return tag.mode1 & mode2_mask.mode1;
301 default:
302 panic("%s: mode not configured", __func__);
303 }
304 }
305
306 static unsigned int
307 pci_conf_port(pcitag_t tag, int reg)
308 {
309 switch (pci_mode) {
310 case 1:
311 return PCI_MODE1_DATA_REG;
312 case 2:
313 return tag.mode2.port | reg;
314 default:
315 panic("%s: mode not configured", __func__);
316 }
317 }
318
319 static void
320 pci_conf_select(uint32_t sel)
321 {
322 pcitag_t tag;
323
324 switch (pci_mode) {
325 case 1:
326 outl(PCI_MODE1_ADDRESS_REG, sel);
327 return;
328 case 2:
329 tag.mode1 = sel;
330 outb(PCI_MODE2_ENABLE_REG, tag.mode2.enable);
331 if (tag.mode2.enable != 0)
332 outb(PCI_MODE2_FORWARD_REG, tag.mode2.forward);
333 return;
334 default:
335 panic("%s: mode not configured", __func__);
336 }
337 }
338
339 void
340 pci_attach_hook(device_t parent, device_t self, struct pcibus_attach_args *pba)
341 {
342
343 if (pba->pba_bus == 0)
344 aprint_normal(": configuration mode %d", pci_mode);
345 #ifdef MPBIOS
346 mpbios_pci_attach_hook(parent, self, pba);
347 #endif
348 #if NACPICA > 0
349 mpacpi_pci_attach_hook(parent, self, pba);
350 #endif
351 }
352
353 int
354 pci_bus_maxdevs(pci_chipset_tag_t pc, int busno)
355 {
356
357 #if defined(__i386__) && defined(XBOX)
358 /*
359 * Scanning above the first device is fatal on the Microsoft Xbox.
360 * If busno=1, only allow for one device.
361 */
362 if (arch_i386_is_xbox) {
363 if (busno == 1)
364 return 1;
365 else if (busno > 1)
366 return 0;
367 }
368 #endif
369
370 /*
371 * Bus number is irrelevant. If Configuration Mechanism 2 is in
372 * use, can only have devices 0-15 on any bus. If Configuration
373 * Mechanism 1 is in use, can have devices 0-32 (i.e. the `normal'
374 * range).
375 */
376 if (pci_mode == 2)
377 return (16);
378 else
379 return (32);
380 }
381
382 pcitag_t
383 pci_make_tag(pci_chipset_tag_t pc, int bus, int device, int function)
384 {
385 pci_chipset_tag_t ipc;
386 pcitag_t tag;
387
388 for (ipc = pc; ipc != NULL; ipc = ipc->pc_super) {
389 if ((ipc->pc_present & PCI_OVERRIDE_MAKE_TAG) == 0)
390 continue;
391 return (*ipc->pc_ov->ov_make_tag)(ipc->pc_ctx,
392 pc, bus, device, function);
393 }
394
395 switch (pci_mode) {
396 case 1:
397 if (bus >= 256 || device >= 32 || function >= 8)
398 panic("%s: bad request", __func__);
399
400 tag.mode1 = PCI_MODE1_ENABLE |
401 (bus << 16) | (device << 11) | (function << 8);
402 return tag;
403 case 2:
404 if (bus >= 256 || device >= 16 || function >= 8)
405 panic("%s: bad request", __func__);
406
407 tag.mode2.port = 0xc000 | (device << 8);
408 tag.mode2.enable = 0xf0 | (function << 1);
409 tag.mode2.forward = bus;
410 return tag;
411 default:
412 panic("%s: mode not configured", __func__);
413 }
414 }
415
416 void
417 pci_decompose_tag(pci_chipset_tag_t pc, pcitag_t tag,
418 int *bp, int *dp, int *fp)
419 {
420 pci_chipset_tag_t ipc;
421
422 for (ipc = pc; ipc != NULL; ipc = ipc->pc_super) {
423 if ((ipc->pc_present & PCI_OVERRIDE_DECOMPOSE_TAG) == 0)
424 continue;
425 (*ipc->pc_ov->ov_decompose_tag)(ipc->pc_ctx,
426 pc, tag, bp, dp, fp);
427 return;
428 }
429
430 switch (pci_mode) {
431 case 1:
432 if (bp != NULL)
433 *bp = (tag.mode1 >> 16) & 0xff;
434 if (dp != NULL)
435 *dp = (tag.mode1 >> 11) & 0x1f;
436 if (fp != NULL)
437 *fp = (tag.mode1 >> 8) & 0x7;
438 return;
439 case 2:
440 if (bp != NULL)
441 *bp = tag.mode2.forward & 0xff;
442 if (dp != NULL)
443 *dp = (tag.mode2.port >> 8) & 0xf;
444 if (fp != NULL)
445 *fp = (tag.mode2.enable >> 1) & 0x7;
446 return;
447 default:
448 panic("%s: mode not configured", __func__);
449 }
450 }
451
452 pcireg_t
453 pci_conf_read(pci_chipset_tag_t pc, pcitag_t tag, int reg)
454 {
455 pci_chipset_tag_t ipc;
456 pcireg_t data;
457 struct pci_conf_lock ocl;
458
459 KASSERT((reg & 0x3) == 0);
460
461 for (ipc = pc; ipc != NULL; ipc = ipc->pc_super) {
462 if ((ipc->pc_present & PCI_OVERRIDE_CONF_READ) == 0)
463 continue;
464 return (*ipc->pc_ov->ov_conf_read)(ipc->pc_ctx, pc, tag, reg);
465 }
466
467 #if defined(__i386__) && defined(XBOX)
468 if (arch_i386_is_xbox) {
469 int bus, dev, fn;
470 pci_decompose_tag(pc, tag, &bus, &dev, &fn);
471 if (bus == 0 && dev == 0 && (fn == 1 || fn == 2))
472 return (pcireg_t)-1;
473 }
474 #endif
475
476 pci_conf_lock(&ocl, pci_conf_selector(tag, reg));
477 data = inl(pci_conf_port(tag, reg));
478 pci_conf_unlock(&ocl);
479 return data;
480 }
481
482 void
483 pci_conf_write(pci_chipset_tag_t pc, pcitag_t tag, int reg, pcireg_t data)
484 {
485 pci_chipset_tag_t ipc;
486 struct pci_conf_lock ocl;
487
488 KASSERT((reg & 0x3) == 0);
489
490 for (ipc = pc; ipc != NULL; ipc = ipc->pc_super) {
491 if ((ipc->pc_present & PCI_OVERRIDE_CONF_WRITE) == 0)
492 continue;
493 (*ipc->pc_ov->ov_conf_write)(ipc->pc_ctx, pc, tag, reg,
494 data);
495 return;
496 }
497
498 #if defined(__i386__) && defined(XBOX)
499 if (arch_i386_is_xbox) {
500 int bus, dev, fn;
501 pci_decompose_tag(pc, tag, &bus, &dev, &fn);
502 if (bus == 0 && dev == 0 && (fn == 1 || fn == 2))
503 return;
504 }
505 #endif
506
507 pci_conf_lock(&ocl, pci_conf_selector(tag, reg));
508 outl(pci_conf_port(tag, reg), data);
509 pci_conf_unlock(&ocl);
510 }
511
512 void
513 pci_mode_set(int mode)
514 {
515 KASSERT(pci_mode == -1 || pci_mode == mode);
516
517 pci_mode = mode;
518 }
519
520 int
521 pci_mode_detect(void)
522 {
523 uint32_t sav, val;
524 int i;
525 pcireg_t idreg;
526
527 if (pci_mode != -1)
528 return pci_mode;
529
530 /*
531 * We try to divine which configuration mode the host bridge wants.
532 */
533
534 sav = inl(PCI_MODE1_ADDRESS_REG);
535
536 pci_mode = 1; /* assume this for now */
537 /*
538 * catch some known buggy implementations of mode 1
539 */
540 for (i = 0; i < __arraycount(pcim1_quirk_tbl); i++) {
541 pcitag_t t;
542
543 if (!pcim1_quirk_tbl[i].tag)
544 break;
545 t.mode1 = pcim1_quirk_tbl[i].tag;
546 idreg = pci_conf_read(0, t, PCI_ID_REG); /* needs "pci_mode" */
547 if (idreg == pcim1_quirk_tbl[i].id) {
548 #ifdef DEBUG
549 printf("known mode 1 PCI chipset (%08x)\n",
550 idreg);
551 #endif
552 return (pci_mode);
553 }
554 }
555
556 /*
557 * Strong check for standard compliant mode 1:
558 * 1. bit 31 ("enable") can be set
559 * 2. byte/word access does not affect register
560 */
561 outl(PCI_MODE1_ADDRESS_REG, PCI_MODE1_ENABLE);
562 outb(PCI_MODE1_ADDRESS_REG + 3, 0);
563 outw(PCI_MODE1_ADDRESS_REG + 2, 0);
564 val = inl(PCI_MODE1_ADDRESS_REG);
565 if ((val & 0x80fffffc) != PCI_MODE1_ENABLE) {
566 #ifdef DEBUG
567 printf("pci_mode_detect: mode 1 enable failed (%x)\n",
568 val);
569 #endif
570 goto not1;
571 }
572 outl(PCI_MODE1_ADDRESS_REG, 0);
573 val = inl(PCI_MODE1_ADDRESS_REG);
574 if ((val & 0x80fffffc) != 0)
575 goto not1;
576 return (pci_mode);
577 not1:
578 outl(PCI_MODE1_ADDRESS_REG, sav);
579
580 /*
581 * This mode 2 check is quite weak (and known to give false
582 * positives on some Compaq machines).
583 * However, this doesn't matter, because this is the
584 * last test, and simply no PCI devices will be found if
585 * this happens.
586 */
587 outb(PCI_MODE2_ENABLE_REG, 0);
588 outb(PCI_MODE2_FORWARD_REG, 0);
589 if (inb(PCI_MODE2_ENABLE_REG) != 0 ||
590 inb(PCI_MODE2_FORWARD_REG) != 0)
591 goto not2;
592 return (pci_mode = 2);
593 not2:
594
595 return (pci_mode = 0);
596 }
597
598 /*
599 * Determine which flags should be passed to the primary PCI bus's
600 * autoconfiguration node. We use this to detect broken chipsets
601 * which cannot safely use memory-mapped device access.
602 */
603 int
604 pci_bus_flags(void)
605 {
606 int rval = PCI_FLAGS_IO_OKAY | PCI_FLAGS_MEM_OKAY |
607 PCI_FLAGS_MRL_OKAY | PCI_FLAGS_MRM_OKAY | PCI_FLAGS_MWI_OKAY;
608 int device, maxndevs;
609 pcitag_t tag;
610 pcireg_t id;
611
612 maxndevs = pci_bus_maxdevs(NULL, 0);
613
614 for (device = 0; device < maxndevs; device++) {
615 tag = pci_make_tag(NULL, 0, device, 0);
616 id = pci_conf_read(NULL, tag, PCI_ID_REG);
617
618 /* Invalid vendor ID value? */
619 if (PCI_VENDOR(id) == PCI_VENDOR_INVALID)
620 continue;
621 /* XXX Not invalid, but we've done this ~forever. */
622 if (PCI_VENDOR(id) == 0)
623 continue;
624
625 switch (PCI_VENDOR(id)) {
626 case PCI_VENDOR_SIS:
627 switch (PCI_PRODUCT(id)) {
628 case PCI_PRODUCT_SIS_85C496:
629 goto disable_mem;
630 }
631 break;
632 }
633 }
634
635 return (rval);
636
637 disable_mem:
638 printf("Warning: broken PCI-Host bridge detected; "
639 "disabling memory-mapped access\n");
640 rval &= ~(PCI_FLAGS_MEM_OKAY|PCI_FLAGS_MRL_OKAY|PCI_FLAGS_MRM_OKAY|
641 PCI_FLAGS_MWI_OKAY);
642 return (rval);
643 }
644
645 void
646 pci_device_foreach(pci_chipset_tag_t pc, int maxbus,
647 void (*func)(pci_chipset_tag_t, pcitag_t, void *), void *context)
648 {
649 pci_device_foreach_min(pc, 0, maxbus, func, context);
650 }
651
652 void
653 pci_device_foreach_min(pci_chipset_tag_t pc, int minbus, int maxbus,
654 void (*func)(pci_chipset_tag_t, pcitag_t, void *), void *context)
655 {
656 const struct pci_quirkdata *qd;
657 int bus, device, function, maxdevs, nfuncs;
658 pcireg_t id, bhlcr;
659 pcitag_t tag;
660
661 for (bus = minbus; bus <= maxbus; bus++) {
662 maxdevs = pci_bus_maxdevs(pc, bus);
663 for (device = 0; device < maxdevs; device++) {
664 tag = pci_make_tag(pc, bus, device, 0);
665 id = pci_conf_read(pc, tag, PCI_ID_REG);
666
667 /* Invalid vendor ID value? */
668 if (PCI_VENDOR(id) == PCI_VENDOR_INVALID)
669 continue;
670 /* XXX Not invalid, but we've done this ~forever. */
671 if (PCI_VENDOR(id) == 0)
672 continue;
673
674 qd = pci_lookup_quirkdata(PCI_VENDOR(id),
675 PCI_PRODUCT(id));
676
677 bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG);
678 if (PCI_HDRTYPE_MULTIFN(bhlcr) ||
679 (qd != NULL &&
680 (qd->quirks & PCI_QUIRK_MULTIFUNCTION) != 0))
681 nfuncs = 8;
682 else
683 nfuncs = 1;
684
685 for (function = 0; function < nfuncs; function++) {
686 tag = pci_make_tag(pc, bus, device, function);
687 id = pci_conf_read(pc, tag, PCI_ID_REG);
688
689 /* Invalid vendor ID value? */
690 if (PCI_VENDOR(id) == PCI_VENDOR_INVALID)
691 continue;
692 /*
693 * XXX Not invalid, but we've done this
694 * ~forever.
695 */
696 if (PCI_VENDOR(id) == 0)
697 continue;
698 (*func)(pc, tag, context);
699 }
700 }
701 }
702 }
703
704 void
705 pci_bridge_foreach(pci_chipset_tag_t pc, int minbus, int maxbus,
706 void (*func)(pci_chipset_tag_t, pcitag_t, void *), void *ctx)
707 {
708 struct pci_bridge_hook_arg bridge_hook;
709
710 bridge_hook.func = func;
711 bridge_hook.arg = ctx;
712
713 pci_device_foreach_min(pc, minbus, maxbus, pci_bridge_hook,
714 &bridge_hook);
715 }
716
717 static void
718 pci_bridge_hook(pci_chipset_tag_t pc, pcitag_t tag, void *ctx)
719 {
720 struct pci_bridge_hook_arg *bridge_hook = (void *)ctx;
721 pcireg_t reg;
722
723 reg = pci_conf_read(pc, tag, PCI_CLASS_REG);
724 if (PCI_CLASS(reg) == PCI_CLASS_BRIDGE &&
725 (PCI_SUBCLASS(reg) == PCI_SUBCLASS_BRIDGE_PCI ||
726 PCI_SUBCLASS(reg) == PCI_SUBCLASS_BRIDGE_CARDBUS)) {
727 (*bridge_hook->func)(pc, tag, bridge_hook->arg);
728 }
729 }
730
731 static const void *
732 bit_to_function_pointer(const struct pci_overrides *ov, uint64_t bit)
733 {
734 switch (bit) {
735 case PCI_OVERRIDE_CONF_READ:
736 return ov->ov_conf_read;
737 case PCI_OVERRIDE_CONF_WRITE:
738 return ov->ov_conf_write;
739 case PCI_OVERRIDE_INTR_MAP:
740 return ov->ov_intr_map;
741 case PCI_OVERRIDE_INTR_STRING:
742 return ov->ov_intr_string;
743 case PCI_OVERRIDE_INTR_EVCNT:
744 return ov->ov_intr_evcnt;
745 case PCI_OVERRIDE_INTR_ESTABLISH:
746 return ov->ov_intr_establish;
747 case PCI_OVERRIDE_INTR_DISESTABLISH:
748 return ov->ov_intr_disestablish;
749 case PCI_OVERRIDE_MAKE_TAG:
750 return ov->ov_make_tag;
751 case PCI_OVERRIDE_DECOMPOSE_TAG:
752 return ov->ov_decompose_tag;
753 default:
754 return NULL;
755 }
756 }
757
758 void
759 pci_chipset_tag_destroy(pci_chipset_tag_t pc)
760 {
761 kmem_free(pc, sizeof(struct pci_chipset_tag));
762 }
763
764 int
765 pci_chipset_tag_create(pci_chipset_tag_t opc, const uint64_t present,
766 const struct pci_overrides *ov, void *ctx, pci_chipset_tag_t *pcp)
767 {
768 uint64_t bit, bits, nbits;
769 pci_chipset_tag_t pc;
770 const void *fp;
771
772 if (ov == NULL || present == 0)
773 return EINVAL;
774
775 pc = kmem_alloc(sizeof(struct pci_chipset_tag), KM_SLEEP);
776
777 if (pc == NULL)
778 return ENOMEM;
779
780 pc->pc_super = opc;
781
782 for (bits = present; bits != 0; bits = nbits) {
783 nbits = bits & (bits - 1);
784 bit = nbits ^ bits;
785 if ((fp = bit_to_function_pointer(ov, bit)) == NULL) {
786 #ifdef DEBUG
787 printf("%s: missing bit %" PRIx64 "\n", __func__, bit);
788 #endif
789 goto einval;
790 }
791 }
792
793 pc->pc_ov = ov;
794 pc->pc_present = present;
795 pc->pc_ctx = ctx;
796
797 *pcp = pc;
798
799 return 0;
800 einval:
801 kmem_free(pc, sizeof(struct pci_chipset_tag));
802 return EINVAL;
803 }
804