pci_machdep.c revision 1.81 1 /* $NetBSD: pci_machdep.c,v 1.81 2018/06/23 16:05:05 jakllsch Exp $ */
2
3 /*-
4 * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * Copyright (c) 1996 Christopher G. Demetriou. All rights reserved.
35 * Copyright (c) 1994 Charles M. Hannum. All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 3. All advertising materials mentioning features or use of this software
46 * must display the following acknowledgement:
47 * This product includes software developed by Charles M. Hannum.
48 * 4. The name of the author may not be used to endorse or promote products
49 * derived from this software without specific prior written permission.
50 *
51 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
52 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
53 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
54 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
55 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
56 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
60 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 */
62
63 /*
64 * Machine-specific functions for PCI autoconfiguration.
65 *
66 * On PCs, there are two methods of generating PCI configuration cycles.
67 * We try to detect the appropriate mechanism for this machine and set
68 * up a few function pointers to access the correct method directly.
69 *
70 * The configuration method can be hard-coded in the config file by
71 * using `options PCI_CONF_MODE=N', where `N' is the configuration mode
72 * as defined in section 3.6.4.1, `Generating Configuration Cycles'.
73 */
74
75 #include <sys/cdefs.h>
76 __KERNEL_RCSID(0, "$NetBSD: pci_machdep.c,v 1.81 2018/06/23 16:05:05 jakllsch Exp $");
77
78 #include <sys/types.h>
79 #include <sys/param.h>
80 #include <sys/time.h>
81 #include <sys/systm.h>
82 #include <sys/errno.h>
83 #include <sys/device.h>
84 #include <sys/bus.h>
85 #include <sys/cpu.h>
86 #include <sys/kmem.h>
87
88 #include <uvm/uvm_extern.h>
89
90 #include <machine/bus_private.h>
91
92 #include <machine/pio.h>
93 #include <machine/lock.h>
94
95 #include <dev/isa/isareg.h>
96 #include <dev/isa/isavar.h>
97 #include <dev/pci/pcivar.h>
98 #include <dev/pci/pcireg.h>
99 #include <dev/pci/pccbbreg.h>
100 #include <dev/pci/pcidevs.h>
101 #include <dev/pci/ppbvar.h>
102 #include <dev/pci/genfb_pcivar.h>
103
104 #include <dev/wsfb/genfbvar.h>
105 #include <arch/x86/include/genfb_machdep.h>
106 #include <dev/ic/vgareg.h>
107
108 #include "acpica.h"
109 #include "genfb.h"
110 #include "isa.h"
111 #include "opt_acpi.h"
112 #include "opt_ddb.h"
113 #include "opt_mpbios.h"
114 #include "opt_puc.h"
115 #include "opt_vga.h"
116 #include "pci.h"
117 #include "wsdisplay.h"
118 #include "com.h"
119
120 #ifdef DDB
121 #include <machine/db_machdep.h>
122 #include <ddb/db_sym.h>
123 #include <ddb/db_extern.h>
124 #endif
125
126 #ifdef VGA_POST
127 #include <x86/vga_post.h>
128 #endif
129
130 #include <x86/cpuvar.h>
131
132 #include <machine/autoconf.h>
133 #include <machine/bootinfo.h>
134
135 #ifdef MPBIOS
136 #include <machine/mpbiosvar.h>
137 #endif
138
139 #if NACPICA > 0
140 #include <machine/mpacpi.h>
141 #if !defined(NO_PCI_EXTENDED_CONFIG)
142 #include <dev/acpi/acpivar.h>
143 #include <dev/acpi/acpi_mcfg.h>
144 #endif
145 #endif
146
147 #include <machine/mpconfig.h>
148
149 #if NCOM > 0
150 #include <dev/pci/puccn.h>
151 #endif
152
153 #ifndef XEN
154 #include <x86/efi.h>
155 #endif
156
157 #include "opt_pci_conf_mode.h"
158
159 #ifdef PCI_CONF_MODE
160 #if (PCI_CONF_MODE == 1) || (PCI_CONF_MODE == 2)
161 static int pci_mode = PCI_CONF_MODE;
162 #else
163 #error Invalid PCI configuration mode.
164 #endif
165 #else
166 static int pci_mode = -1;
167 #endif
168
169 struct pci_conf_lock {
170 uint32_t cl_cpuno; /* 0: unlocked
171 * 1 + n: locked by CPU n (0 <= n)
172 */
173 uint32_t cl_sel; /* the address that's being read. */
174 };
175
176 static void pci_conf_unlock(struct pci_conf_lock *);
177 static uint32_t pci_conf_selector(pcitag_t, int);
178 static unsigned int pci_conf_port(pcitag_t, int);
179 static void pci_conf_select(uint32_t);
180 static void pci_conf_lock(struct pci_conf_lock *, uint32_t);
181 static void pci_bridge_hook(pci_chipset_tag_t, pcitag_t, void *);
182 struct pci_bridge_hook_arg {
183 void (*func)(pci_chipset_tag_t, pcitag_t, void *);
184 void *arg;
185 };
186
187 #define PCI_MODE1_ENABLE 0x80000000UL
188 #define PCI_MODE1_ADDRESS_REG 0x0cf8
189 #define PCI_MODE1_DATA_REG 0x0cfc
190
191 #define PCI_MODE2_ENABLE_REG 0x0cf8
192 #define PCI_MODE2_FORWARD_REG 0x0cfa
193
194 #define _tag(b, d, f) \
195 {.mode1 = PCI_MODE1_ENABLE | ((b) << 16) | ((d) << 11) | ((f) << 8)}
196 #define _qe(bus, dev, fcn, vend, prod) \
197 {_tag(bus, dev, fcn), PCI_ID_CODE(vend, prod)}
198 const struct {
199 pcitag_t tag;
200 pcireg_t id;
201 } pcim1_quirk_tbl[] = {
202 _qe(0, 0, 0, PCI_VENDOR_INVALID, 0x0000), /* patchable */
203 _qe(0, 0, 0, PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_TRIFLEX1),
204 /* XXX Triflex2 not tested */
205 _qe(0, 0, 0, PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_TRIFLEX2),
206 _qe(0, 0, 0, PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_TRIFLEX4),
207 /* Triton needed for Connectix Virtual PC */
208 _qe(0, 0, 0, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82437FX),
209 /* Connectix Virtual PC 5 has a 440BX */
210 _qe(0, 0, 0, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82443BX_NOAGP),
211 /* Parallels Desktop for Mac */
212 _qe(0, 2, 0, PCI_VENDOR_PARALLELS, PCI_PRODUCT_PARALLELS_VIDEO),
213 _qe(0, 3, 0, PCI_VENDOR_PARALLELS, PCI_PRODUCT_PARALLELS_TOOLS),
214 /* SIS 740 */
215 _qe(0, 0, 0, PCI_VENDOR_SIS, PCI_PRODUCT_SIS_740),
216 /* SIS 741 */
217 _qe(0, 0, 0, PCI_VENDOR_SIS, PCI_PRODUCT_SIS_741),
218 /* VIA Technologies VX900 */
219 _qe(0, 0, 0, PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VX900_HB)
220 };
221 #undef _tag
222 #undef _qe
223
224 /* arch/xen does not support MSI/MSI-X yet. */
225 #ifdef __HAVE_PCI_MSI_MSIX
226 #define PCI_QUIRK_DISABLE_MSI 1 /* Neigher MSI nor MSI-X work */
227 #define PCI_QUIRK_DISABLE_MSIX 2 /* MSI-X does not work */
228 #define PCI_QUIRK_ENABLE_MSI_VM 3 /* Older chipset in VM where MSI and MSI-X works */
229
230 #define _dme(vend, prod) \
231 { PCI_QUIRK_DISABLE_MSI, PCI_ID_CODE(vend, prod) }
232 #define _dmxe(vend, prod) \
233 { PCI_QUIRK_DISABLE_MSIX, PCI_ID_CODE(vend, prod) }
234 #define _emve(vend, prod) \
235 { PCI_QUIRK_ENABLE_MSI_VM, PCI_ID_CODE(vend, prod) }
236 const struct {
237 int type;
238 pcireg_t id;
239 } pci_msi_quirk_tbl[] = {
240 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCMC),
241 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82437FX),
242 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82437MX),
243 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82437VX),
244 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82439HX),
245 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82439TX),
246 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82443GX),
247 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82443GX_AGP),
248 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82440MX),
249 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82441FX),
250 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82443BX),
251 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82443BX_AGP),
252 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82443BX_NOAGP),
253 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82443GX_NOAGP),
254 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82443LX),
255 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82443LX_AGP),
256 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82810_MCH),
257 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82810E_MCH),
258 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82815_FULL_HUB),
259 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82820_MCH),
260 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82830MP_IO_1),
261 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82840_HB),
262 _dme(PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_PCHB),
263 _dme(PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_PCHB),
264 _dme(PCI_VENDOR_AMD, PCI_PRODUCT_AMD_SC751_SC),
265 _dme(PCI_VENDOR_AMD, PCI_PRODUCT_AMD_SC761_SC),
266 _dme(PCI_VENDOR_AMD, PCI_PRODUCT_AMD_SC762_NB),
267
268 _emve(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82441FX), /* QEMU */
269 _emve(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82443BX), /* VMWare */
270 };
271 #undef _dme
272 #undef _dmxe
273 #undef _emve
274 #endif /* __HAVE_PCI_MSI_MSIX */
275
276 /*
277 * PCI doesn't have any special needs; just use the generic versions
278 * of these functions.
279 */
280 struct x86_bus_dma_tag pci_bus_dma_tag = {
281 ._tag_needs_free = 0,
282 #if defined(_LP64) || defined(PAE)
283 ._bounce_thresh = PCI32_DMA_BOUNCE_THRESHOLD,
284 ._bounce_alloc_lo = ISA_DMA_BOUNCE_THRESHOLD,
285 ._bounce_alloc_hi = PCI32_DMA_BOUNCE_THRESHOLD,
286 #else
287 ._bounce_thresh = 0,
288 ._bounce_alloc_lo = 0,
289 ._bounce_alloc_hi = 0,
290 #endif
291 ._may_bounce = NULL,
292 };
293
294 #ifdef _LP64
295 struct x86_bus_dma_tag pci_bus_dma64_tag = {
296 ._tag_needs_free = 0,
297 ._bounce_thresh = 0,
298 ._bounce_alloc_lo = 0,
299 ._bounce_alloc_hi = 0,
300 ._may_bounce = NULL,
301 };
302 #endif
303
304 static struct pci_conf_lock cl0 = {
305 .cl_cpuno = 0UL
306 , .cl_sel = 0UL
307 };
308
309 static struct pci_conf_lock * const cl = &cl0;
310
311 #if NGENFB > 0 && NACPICA > 0 && defined(VGA_POST)
312 extern int acpi_md_vbios_reset;
313 extern int acpi_md_vesa_modenum;
314 #endif
315
316 static struct genfb_colormap_callback gfb_cb;
317 static struct genfb_pmf_callback pmf_cb;
318 static struct genfb_mode_callback mode_cb;
319 #ifdef VGA_POST
320 static struct vga_post *vga_posth = NULL;
321 #endif
322
323 static void
324 pci_conf_lock(struct pci_conf_lock *ocl, uint32_t sel)
325 {
326 uint32_t cpuno;
327
328 KASSERT(sel != 0);
329
330 kpreempt_disable();
331 cpuno = cpu_number() + 1;
332 /* If the kernel enters pci_conf_lock() through an interrupt
333 * handler, then the CPU may already hold the lock.
334 *
335 * If the CPU does not already hold the lock, spin until
336 * we can acquire it.
337 */
338 if (cpuno == cl->cl_cpuno) {
339 ocl->cl_cpuno = cpuno;
340 } else {
341 u_int spins;
342
343 ocl->cl_cpuno = 0;
344
345 spins = SPINLOCK_BACKOFF_MIN;
346 while (atomic_cas_32(&cl->cl_cpuno, 0, cpuno) != 0) {
347 SPINLOCK_BACKOFF(spins);
348 #ifdef LOCKDEBUG
349 if (SPINLOCK_SPINOUT(spins)) {
350 panic("%s: cpu %" PRId32
351 " spun out waiting for cpu %" PRId32,
352 __func__, cpuno, cl->cl_cpuno);
353 }
354 #endif /* LOCKDEBUG */
355 }
356 }
357
358 /* Only one CPU can be here, so an interlocked atomic_swap(3)
359 * is not necessary.
360 *
361 * Evaluating atomic_cas_32_ni()'s argument, cl->cl_sel,
362 * and applying atomic_cas_32_ni() is not an atomic operation,
363 * however, any interrupt that, in the middle of the
364 * operation, modifies cl->cl_sel, will also restore
365 * cl->cl_sel. So cl->cl_sel will have the same value when
366 * we apply atomic_cas_32_ni() as when we evaluated it,
367 * before.
368 */
369 ocl->cl_sel = atomic_cas_32_ni(&cl->cl_sel, cl->cl_sel, sel);
370 pci_conf_select(sel);
371 }
372
373 static void
374 pci_conf_unlock(struct pci_conf_lock *ocl)
375 {
376 atomic_cas_32_ni(&cl->cl_sel, cl->cl_sel, ocl->cl_sel);
377 pci_conf_select(ocl->cl_sel);
378 if (ocl->cl_cpuno != cl->cl_cpuno)
379 atomic_cas_32(&cl->cl_cpuno, cl->cl_cpuno, ocl->cl_cpuno);
380 kpreempt_enable();
381 }
382
383 static uint32_t
384 pci_conf_selector(pcitag_t tag, int reg)
385 {
386 static const pcitag_t mode2_mask = {
387 .mode2 = {
388 .enable = 0xff
389 , .forward = 0xff
390 }
391 };
392
393 switch (pci_mode) {
394 case 1:
395 return tag.mode1 | reg;
396 case 2:
397 return tag.mode1 & mode2_mask.mode1;
398 default:
399 panic("%s: mode %d not configured", __func__, pci_mode);
400 }
401 }
402
403 static unsigned int
404 pci_conf_port(pcitag_t tag, int reg)
405 {
406 switch (pci_mode) {
407 case 1:
408 return PCI_MODE1_DATA_REG;
409 case 2:
410 return tag.mode2.port | reg;
411 default:
412 panic("%s: mode %d not configured", __func__, pci_mode);
413 }
414 }
415
416 static void
417 pci_conf_select(uint32_t sel)
418 {
419 pcitag_t tag;
420
421 switch (pci_mode) {
422 case 1:
423 outl(PCI_MODE1_ADDRESS_REG, sel);
424 return;
425 case 2:
426 tag.mode1 = sel;
427 outb(PCI_MODE2_ENABLE_REG, tag.mode2.enable);
428 if (tag.mode2.enable != 0)
429 outb(PCI_MODE2_FORWARD_REG, tag.mode2.forward);
430 return;
431 default:
432 panic("%s: mode %d not configured", __func__, pci_mode);
433 }
434 }
435
436 static int
437 pci_mode_check(void)
438 {
439 pcireg_t x;
440 pcitag_t t;
441 int device;
442 const int maxdev = pci_bus_maxdevs(NULL, 0);
443
444 for (device = 0; device < maxdev; device++) {
445 t = pci_make_tag(NULL, 0, device, 0);
446 x = pci_conf_read(NULL, t, PCI_CLASS_REG);
447 if (PCI_CLASS(x) == PCI_CLASS_BRIDGE &&
448 PCI_SUBCLASS(x) == PCI_SUBCLASS_BRIDGE_HOST)
449 return 0;
450 x = pci_conf_read(NULL, t, PCI_ID_REG);
451 switch (PCI_VENDOR(x)) {
452 case PCI_VENDOR_COMPAQ:
453 case PCI_VENDOR_INTEL:
454 case PCI_VENDOR_VIATECH:
455 return 0;
456 }
457 }
458 return -1;
459 }
460 #ifdef __HAVE_PCI_MSI_MSIX
461 static int
462 pci_has_msi_quirk(pcireg_t id, int type)
463 {
464 int i;
465
466 for (i = 0; i < __arraycount(pci_msi_quirk_tbl); i++) {
467 if (id == pci_msi_quirk_tbl[i].id &&
468 type == pci_msi_quirk_tbl[i].type)
469 return 1;
470 }
471
472 return 0;
473 }
474 #endif
475
476 void
477 pci_attach_hook(device_t parent, device_t self, struct pcibus_attach_args *pba)
478 {
479 #ifdef __HAVE_PCI_MSI_MSIX
480 pci_chipset_tag_t pc = pba->pba_pc;
481 pcitag_t tag;
482 pcireg_t id, class;
483 #endif
484
485 if (pba->pba_bus == 0)
486 aprint_normal(": configuration mode %d", pci_mode);
487 #ifdef MPBIOS
488 mpbios_pci_attach_hook(parent, self, pba);
489 #endif
490 #if NACPICA > 0
491 mpacpi_pci_attach_hook(parent, self, pba);
492 #endif
493 #if NACPICA > 0 && !defined(NO_PCI_EXTENDED_CONFIG)
494 acpimcfg_map_bus(self, pba->pba_pc, pba->pba_bus);
495 #endif
496
497 #ifdef __HAVE_PCI_MSI_MSIX
498 /*
499 * In order to decide whether the system supports MSI we look
500 * at the host bridge, which should be device 0 function 0 on
501 * bus 0. It is better to not enable MSI on systems that
502 * support it than the other way around, so be conservative
503 * here. So we don't enable MSI if we don't find a host
504 * bridge there. We also deliberately don't enable MSI on
505 * chipsets from low-end manifacturers like VIA and SiS.
506 */
507 tag = pci_make_tag(pc, 0, 0, 0);
508 id = pci_conf_read(pc, tag, PCI_ID_REG);
509 class = pci_conf_read(pc, tag, PCI_CLASS_REG);
510
511 if (PCI_CLASS(class) != PCI_CLASS_BRIDGE ||
512 PCI_SUBCLASS(class) != PCI_SUBCLASS_BRIDGE_HOST)
513 return;
514
515 /* VMware and KVM use old chipset, but they can use MSI/MSI-X */
516 if ((cpu_feature[1] & CPUID2_RAZ)
517 && (pci_has_msi_quirk(id, PCI_QUIRK_ENABLE_MSI_VM))) {
518 pba->pba_flags |= PCI_FLAGS_MSI_OKAY;
519 pba->pba_flags |= PCI_FLAGS_MSIX_OKAY;
520 } else if (pci_has_msi_quirk(id, PCI_QUIRK_DISABLE_MSI)) {
521 pba->pba_flags &= ~PCI_FLAGS_MSI_OKAY;
522 pba->pba_flags &= ~PCI_FLAGS_MSIX_OKAY;
523 aprint_verbose("\n");
524 aprint_verbose_dev(self,
525 "This pci host supports neither MSI nor MSI-X.");
526 } else if (pci_has_msi_quirk(id, PCI_QUIRK_DISABLE_MSIX)) {
527 pba->pba_flags |= PCI_FLAGS_MSI_OKAY;
528 pba->pba_flags &= ~PCI_FLAGS_MSIX_OKAY;
529 aprint_verbose("\n");
530 aprint_verbose_dev(self,
531 "This pci host does not support MSI-X.");
532 } else {
533 pba->pba_flags |= PCI_FLAGS_MSI_OKAY;
534 pba->pba_flags |= PCI_FLAGS_MSIX_OKAY;
535 }
536
537 /*
538 * Don't enable MSI on a HyperTransport bus. In order to
539 * determine that bus 0 is a HyperTransport bus, we look at
540 * device 24 function 0, which is the HyperTransport
541 * host/primary interface integrated on most 64-bit AMD CPUs.
542 * If that device has a HyperTransport capability, bus 0 must
543 * be a HyperTransport bus and we disable MSI.
544 */
545 if (24 < pci_bus_maxdevs(pc, 0)) {
546 tag = pci_make_tag(pc, 0, 24, 0);
547 if (pci_get_capability(pc, tag, PCI_CAP_LDT, NULL, NULL)) {
548 pba->pba_flags &= ~PCI_FLAGS_MSI_OKAY;
549 pba->pba_flags &= ~PCI_FLAGS_MSIX_OKAY;
550 }
551 }
552 #endif /* __HAVE_PCI_MSI_MSIX */
553 }
554
555 int
556 pci_bus_maxdevs(pci_chipset_tag_t pc, int busno)
557 {
558 /*
559 * Bus number is irrelevant. If Configuration Mechanism 2 is in
560 * use, can only have devices 0-15 on any bus. If Configuration
561 * Mechanism 1 is in use, can have devices 0-32 (i.e. the `normal'
562 * range).
563 */
564 if (pci_mode == 2)
565 return (16);
566 else
567 return (32);
568 }
569
570 pcitag_t
571 pci_make_tag(pci_chipset_tag_t pc, int bus, int device, int function)
572 {
573 pci_chipset_tag_t ipc;
574 pcitag_t tag;
575
576 for (ipc = pc; ipc != NULL; ipc = ipc->pc_super) {
577 if ((ipc->pc_present & PCI_OVERRIDE_MAKE_TAG) == 0)
578 continue;
579 return (*ipc->pc_ov->ov_make_tag)(ipc->pc_ctx,
580 pc, bus, device, function);
581 }
582
583 switch (pci_mode) {
584 case 1:
585 if (bus >= 256 || device >= 32 || function >= 8)
586 panic("%s: bad request(%d, %d, %d)", __func__,
587 bus, device, function);
588
589 tag.mode1 = PCI_MODE1_ENABLE |
590 (bus << 16) | (device << 11) | (function << 8);
591 return tag;
592 case 2:
593 if (bus >= 256 || device >= 16 || function >= 8)
594 panic("%s: bad request(%d, %d, %d)", __func__,
595 bus, device, function);
596
597 tag.mode2.port = 0xc000 | (device << 8);
598 tag.mode2.enable = 0xf0 | (function << 1);
599 tag.mode2.forward = bus;
600 return tag;
601 default:
602 panic("%s: mode %d not configured", __func__, pci_mode);
603 }
604 }
605
606 void
607 pci_decompose_tag(pci_chipset_tag_t pc, pcitag_t tag,
608 int *bp, int *dp, int *fp)
609 {
610 pci_chipset_tag_t ipc;
611
612 for (ipc = pc; ipc != NULL; ipc = ipc->pc_super) {
613 if ((ipc->pc_present & PCI_OVERRIDE_DECOMPOSE_TAG) == 0)
614 continue;
615 (*ipc->pc_ov->ov_decompose_tag)(ipc->pc_ctx,
616 pc, tag, bp, dp, fp);
617 return;
618 }
619
620 switch (pci_mode) {
621 case 1:
622 if (bp != NULL)
623 *bp = (tag.mode1 >> 16) & 0xff;
624 if (dp != NULL)
625 *dp = (tag.mode1 >> 11) & 0x1f;
626 if (fp != NULL)
627 *fp = (tag.mode1 >> 8) & 0x7;
628 return;
629 case 2:
630 if (bp != NULL)
631 *bp = tag.mode2.forward & 0xff;
632 if (dp != NULL)
633 *dp = (tag.mode2.port >> 8) & 0xf;
634 if (fp != NULL)
635 *fp = (tag.mode2.enable >> 1) & 0x7;
636 return;
637 default:
638 panic("%s: mode %d not configured", __func__, pci_mode);
639 }
640 }
641
642 pcireg_t
643 pci_conf_read(pci_chipset_tag_t pc, pcitag_t tag, int reg)
644 {
645 pci_chipset_tag_t ipc;
646 pcireg_t data;
647 struct pci_conf_lock ocl;
648 int dev;
649
650 KASSERT((reg & 0x3) == 0);
651
652 for (ipc = pc; ipc != NULL; ipc = ipc->pc_super) {
653 if ((ipc->pc_present & PCI_OVERRIDE_CONF_READ) == 0)
654 continue;
655 return (*ipc->pc_ov->ov_conf_read)(ipc->pc_ctx, pc, tag, reg);
656 }
657
658 pci_decompose_tag(pc, tag, NULL, &dev, NULL);
659 if (__predict_false(pci_mode == 2 && dev >= 16))
660 return (pcireg_t) -1;
661
662 if (reg < 0)
663 return (pcireg_t) -1;
664 if (reg >= PCI_CONF_SIZE) {
665 #if NACPICA > 0 && !defined(NO_PCI_EXTENDED_CONFIG)
666 if (reg >= PCI_EXTCONF_SIZE)
667 return (pcireg_t) -1;
668 acpimcfg_conf_read(pc, tag, reg, &data);
669 return data;
670 #else
671 return (pcireg_t) -1;
672 #endif
673 }
674
675 pci_conf_lock(&ocl, pci_conf_selector(tag, reg));
676 data = inl(pci_conf_port(tag, reg));
677 pci_conf_unlock(&ocl);
678 return data;
679 }
680
681 void
682 pci_conf_write(pci_chipset_tag_t pc, pcitag_t tag, int reg, pcireg_t data)
683 {
684 pci_chipset_tag_t ipc;
685 struct pci_conf_lock ocl;
686 int dev;
687
688 KASSERT((reg & 0x3) == 0);
689
690 for (ipc = pc; ipc != NULL; ipc = ipc->pc_super) {
691 if ((ipc->pc_present & PCI_OVERRIDE_CONF_WRITE) == 0)
692 continue;
693 (*ipc->pc_ov->ov_conf_write)(ipc->pc_ctx, pc, tag, reg,
694 data);
695 return;
696 }
697
698 pci_decompose_tag(pc, tag, NULL, &dev, NULL);
699 if (__predict_false(pci_mode == 2 && dev >= 16)) {
700 return;
701 }
702
703 if (reg < 0)
704 return;
705 if (reg >= PCI_CONF_SIZE) {
706 #if NACPICA > 0 && !defined(NO_PCI_EXTENDED_CONFIG)
707 if (reg >= PCI_EXTCONF_SIZE)
708 return;
709 acpimcfg_conf_write(pc, tag, reg, data);
710 #endif
711 return;
712 }
713
714 pci_conf_lock(&ocl, pci_conf_selector(tag, reg));
715 outl(pci_conf_port(tag, reg), data);
716 pci_conf_unlock(&ocl);
717 }
718
719 void
720 pci_mode_set(int mode)
721 {
722 KASSERT(pci_mode == -1 || pci_mode == mode);
723
724 pci_mode = mode;
725 }
726
727 int
728 pci_mode_detect(void)
729 {
730 uint32_t sav, val;
731 int i;
732 pcireg_t idreg;
733 extern char cpu_brand_string[];
734
735 if (pci_mode != -1)
736 return pci_mode;
737
738 /*
739 * We try to divine which configuration mode the host bridge wants.
740 */
741
742 sav = inl(PCI_MODE1_ADDRESS_REG);
743
744 pci_mode = 1; /* assume this for now */
745 /*
746 * catch some known buggy implementations of mode 1
747 */
748 for (i = 0; i < __arraycount(pcim1_quirk_tbl); i++) {
749 pcitag_t t;
750
751 if (PCI_VENDOR(pcim1_quirk_tbl[i].id) == PCI_VENDOR_INVALID)
752 continue;
753 t.mode1 = pcim1_quirk_tbl[i].tag.mode1;
754 idreg = pci_conf_read(NULL, t, PCI_ID_REG); /* needs "pci_mode" */
755 if (idreg == pcim1_quirk_tbl[i].id) {
756 #ifdef DEBUG
757 printf("%s: known mode 1 PCI chipset (%08x)\n",
758 __func__, idreg);
759 #endif
760 return (pci_mode);
761 }
762 }
763
764 const char *reason, *system_vendor, *system_product;
765 if (memcmp(cpu_brand_string, "QEMU", 4) == 0)
766 /* PR 45671, https://bugs.launchpad.net/qemu/+bug/897771 */
767 reason = "QEMU";
768 else if ((system_vendor = pmf_get_platform("system-vendor")) != NULL &&
769 strcmp(system_vendor, "Xen") == 0 &&
770 (system_product = pmf_get_platform("system-product")) != NULL &&
771 strcmp(system_product, "HVM domU") == 0)
772 reason = "Xen";
773 else
774 reason = NULL;
775
776 if (reason) {
777 #ifdef DEBUG
778 printf("%s: forcing PCI mode 1 for %s\n", __func__, reason);
779 #endif
780 return (pci_mode);
781 }
782
783 /*
784 * Strong check for standard compliant mode 1:
785 * 1. bit 31 ("enable") can be set
786 * 2. byte/word access does not affect register
787 */
788 outl(PCI_MODE1_ADDRESS_REG, PCI_MODE1_ENABLE);
789 outb(PCI_MODE1_ADDRESS_REG + 3, 0);
790 outw(PCI_MODE1_ADDRESS_REG + 2, 0);
791 val = inl(PCI_MODE1_ADDRESS_REG);
792 if ((val & 0x80fffffc) != PCI_MODE1_ENABLE) {
793 #ifdef DEBUG
794 printf("%s: mode 1 enable failed (%x)\n", __func__, val);
795 #endif
796 /* Try out mode 1 to see if we can find a host bridge. */
797 if (pci_mode_check() == 0) {
798 #ifdef DEBUG
799 printf("%s: mode 1 functional, using\n", __func__);
800 #endif
801 return (pci_mode);
802 }
803 goto not1;
804 }
805 outl(PCI_MODE1_ADDRESS_REG, 0);
806 val = inl(PCI_MODE1_ADDRESS_REG);
807 if ((val & 0x80fffffc) != 0)
808 goto not1;
809 return (pci_mode);
810 not1:
811 outl(PCI_MODE1_ADDRESS_REG, sav);
812
813 /*
814 * This mode 2 check is quite weak (and known to give false
815 * positives on some Compaq machines).
816 * However, this doesn't matter, because this is the
817 * last test, and simply no PCI devices will be found if
818 * this happens.
819 */
820 outb(PCI_MODE2_ENABLE_REG, 0);
821 outb(PCI_MODE2_FORWARD_REG, 0);
822 if (inb(PCI_MODE2_ENABLE_REG) != 0 ||
823 inb(PCI_MODE2_FORWARD_REG) != 0)
824 goto not2;
825 return (pci_mode = 2);
826 not2:
827
828 return (pci_mode = 0);
829 }
830
831 void
832 pci_device_foreach(pci_chipset_tag_t pc, int maxbus,
833 void (*func)(pci_chipset_tag_t, pcitag_t, void *), void *context)
834 {
835 pci_device_foreach_min(pc, 0, maxbus, func, context);
836 }
837
838 void
839 pci_device_foreach_min(pci_chipset_tag_t pc, int minbus, int maxbus,
840 void (*func)(pci_chipset_tag_t, pcitag_t, void *), void *context)
841 {
842 const struct pci_quirkdata *qd;
843 int bus, device, function, maxdevs, nfuncs;
844 pcireg_t id, bhlcr;
845 pcitag_t tag;
846
847 for (bus = minbus; bus <= maxbus; bus++) {
848 maxdevs = pci_bus_maxdevs(pc, bus);
849 for (device = 0; device < maxdevs; device++) {
850 tag = pci_make_tag(pc, bus, device, 0);
851 id = pci_conf_read(pc, tag, PCI_ID_REG);
852
853 /* Invalid vendor ID value? */
854 if (PCI_VENDOR(id) == PCI_VENDOR_INVALID)
855 continue;
856 /* XXX Not invalid, but we've done this ~forever. */
857 if (PCI_VENDOR(id) == 0)
858 continue;
859
860 qd = pci_lookup_quirkdata(PCI_VENDOR(id),
861 PCI_PRODUCT(id));
862
863 bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG);
864 if (PCI_HDRTYPE_MULTIFN(bhlcr) ||
865 (qd != NULL &&
866 (qd->quirks & PCI_QUIRK_MULTIFUNCTION) != 0))
867 nfuncs = 8;
868 else
869 nfuncs = 1;
870
871 for (function = 0; function < nfuncs; function++) {
872 tag = pci_make_tag(pc, bus, device, function);
873 id = pci_conf_read(pc, tag, PCI_ID_REG);
874
875 /* Invalid vendor ID value? */
876 if (PCI_VENDOR(id) == PCI_VENDOR_INVALID)
877 continue;
878 /*
879 * XXX Not invalid, but we've done this
880 * ~forever.
881 */
882 if (PCI_VENDOR(id) == 0)
883 continue;
884 (*func)(pc, tag, context);
885 }
886 }
887 }
888 }
889
890 void
891 pci_bridge_foreach(pci_chipset_tag_t pc, int minbus, int maxbus,
892 void (*func)(pci_chipset_tag_t, pcitag_t, void *), void *ctx)
893 {
894 struct pci_bridge_hook_arg bridge_hook;
895
896 bridge_hook.func = func;
897 bridge_hook.arg = ctx;
898
899 pci_device_foreach_min(pc, minbus, maxbus, pci_bridge_hook,
900 &bridge_hook);
901 }
902
903 static void
904 pci_bridge_hook(pci_chipset_tag_t pc, pcitag_t tag, void *ctx)
905 {
906 struct pci_bridge_hook_arg *bridge_hook = (void *)ctx;
907 pcireg_t reg;
908
909 reg = pci_conf_read(pc, tag, PCI_CLASS_REG);
910 if (PCI_CLASS(reg) == PCI_CLASS_BRIDGE &&
911 (PCI_SUBCLASS(reg) == PCI_SUBCLASS_BRIDGE_PCI ||
912 PCI_SUBCLASS(reg) == PCI_SUBCLASS_BRIDGE_CARDBUS)) {
913 (*bridge_hook->func)(pc, tag, bridge_hook->arg);
914 }
915 }
916
917 static const void *
918 bit_to_function_pointer(const struct pci_overrides *ov, uint64_t bit)
919 {
920 switch (bit) {
921 case PCI_OVERRIDE_CONF_READ:
922 return ov->ov_conf_read;
923 case PCI_OVERRIDE_CONF_WRITE:
924 return ov->ov_conf_write;
925 case PCI_OVERRIDE_INTR_MAP:
926 return ov->ov_intr_map;
927 case PCI_OVERRIDE_INTR_STRING:
928 return ov->ov_intr_string;
929 case PCI_OVERRIDE_INTR_EVCNT:
930 return ov->ov_intr_evcnt;
931 case PCI_OVERRIDE_INTR_ESTABLISH:
932 return ov->ov_intr_establish;
933 case PCI_OVERRIDE_INTR_DISESTABLISH:
934 return ov->ov_intr_disestablish;
935 case PCI_OVERRIDE_MAKE_TAG:
936 return ov->ov_make_tag;
937 case PCI_OVERRIDE_DECOMPOSE_TAG:
938 return ov->ov_decompose_tag;
939 default:
940 return NULL;
941 }
942 }
943
944 void
945 pci_chipset_tag_destroy(pci_chipset_tag_t pc)
946 {
947 kmem_free(pc, sizeof(struct pci_chipset_tag));
948 }
949
950 int
951 pci_chipset_tag_create(pci_chipset_tag_t opc, const uint64_t present,
952 const struct pci_overrides *ov, void *ctx, pci_chipset_tag_t *pcp)
953 {
954 uint64_t bit, bits, nbits;
955 pci_chipset_tag_t pc;
956 const void *fp;
957
958 if (ov == NULL || present == 0)
959 return EINVAL;
960
961 pc = kmem_alloc(sizeof(struct pci_chipset_tag), KM_SLEEP);
962 pc->pc_super = opc;
963
964 for (bits = present; bits != 0; bits = nbits) {
965 nbits = bits & (bits - 1);
966 bit = nbits ^ bits;
967 if ((fp = bit_to_function_pointer(ov, bit)) == NULL) {
968 #ifdef DEBUG
969 printf("%s: missing bit %" PRIx64 "\n", __func__, bit);
970 #endif
971 goto einval;
972 }
973 }
974
975 pc->pc_ov = ov;
976 pc->pc_present = present;
977 pc->pc_ctx = ctx;
978
979 *pcp = pc;
980
981 return 0;
982 einval:
983 kmem_free(pc, sizeof(struct pci_chipset_tag));
984 return EINVAL;
985 }
986
987 static void
988 x86_genfb_set_mapreg(void *opaque, int index, int r, int g, int b)
989 {
990 outb(IO_VGA + VGA_DAC_ADDRW, index);
991 outb(IO_VGA + VGA_DAC_PALETTE, (uint8_t)r >> 2);
992 outb(IO_VGA + VGA_DAC_PALETTE, (uint8_t)g >> 2);
993 outb(IO_VGA + VGA_DAC_PALETTE, (uint8_t)b >> 2);
994 }
995
996 static bool
997 x86_genfb_setmode(struct genfb_softc *sc, int newmode)
998 {
999 #if NGENFB > 0
1000 # if NACPICA > 0 && defined(VGA_POST)
1001 static int curmode = WSDISPLAYIO_MODE_EMUL;
1002 # endif
1003
1004 switch (newmode) {
1005 case WSDISPLAYIO_MODE_EMUL:
1006 x86_genfb_mtrr_init(sc->sc_fboffset,
1007 sc->sc_height * sc->sc_stride);
1008 # if NACPICA > 0 && defined(VGA_POST)
1009 if (curmode != newmode) {
1010 if (vga_posth != NULL && acpi_md_vesa_modenum != 0) {
1011 vga_post_set_vbe(vga_posth,
1012 acpi_md_vesa_modenum);
1013 }
1014 }
1015 # endif
1016 break;
1017 }
1018
1019 # if NACPICA > 0 && defined(VGA_POST)
1020 curmode = newmode;
1021 # endif
1022 #endif
1023 return true;
1024 }
1025
1026 static bool
1027 x86_genfb_suspend(device_t dev, const pmf_qual_t *qual)
1028 {
1029 return true;
1030 }
1031
1032 static bool
1033 x86_genfb_resume(device_t dev, const pmf_qual_t *qual)
1034 {
1035 #if NGENFB > 0
1036 struct pci_genfb_softc *psc = device_private(dev);
1037
1038 #if NACPICA > 0 && defined(VGA_POST)
1039 if (vga_posth != NULL && acpi_md_vbios_reset == 2) {
1040 vga_post_call(vga_posth);
1041 if (acpi_md_vesa_modenum != 0)
1042 vga_post_set_vbe(vga_posth, acpi_md_vesa_modenum);
1043 }
1044 #endif
1045 genfb_restore_palette(&psc->sc_gen);
1046 #endif
1047
1048 return true;
1049 }
1050
1051 device_t
1052 device_pci_register(device_t dev, void *aux)
1053 {
1054 static bool found_console = false;
1055 device_t parent = device_parent(dev);
1056
1057 device_pci_props_register(dev, aux);
1058
1059 /*
1060 * Handle network interfaces here, the attachment information is
1061 * not available driver-independently later.
1062 *
1063 * For disks, there is nothing useful available at attach time.
1064 */
1065 if (device_class(dev) == DV_IFNET) {
1066 struct btinfo_netif *bin = lookup_bootinfo(BTINFO_NETIF);
1067 if (bin == NULL)
1068 return NULL;
1069
1070 /*
1071 * We don't check the driver name against the device name
1072 * passed by the boot ROM. The ROM should stay usable if
1073 * the driver becomes obsolete. The physical attachment
1074 * information (checked below) must be sufficient to
1075 * identify the device.
1076 */
1077 if (bin->bus == BI_BUS_PCI && device_is_a(parent, "pci")) {
1078 struct pci_attach_args *paa = aux;
1079 int b, d, f;
1080
1081 /*
1082 * Calculate BIOS representation of:
1083 *
1084 * <bus,device,function>
1085 *
1086 * and compare.
1087 */
1088 pci_decompose_tag(paa->pa_pc, paa->pa_tag, &b, &d, &f);
1089 if (bin->addr.tag == ((b << 8) | (d << 3) | f))
1090 return dev;
1091
1092 #ifndef XEN
1093 /*
1094 * efiboot reports parent ppb bus/device/function.
1095 */
1096 device_t grand = device_parent(parent);
1097 if (efi_probe() && grand && device_is_a(grand, "ppb")) {
1098 struct ppb_softc *ppb_sc = device_private(grand);
1099 pci_decompose_tag(ppb_sc->sc_pc, ppb_sc->sc_tag,
1100 &b, &d, &f);
1101 if (bin->addr.tag == ((b << 8) | (d << 3) | f))
1102 return dev;
1103 }
1104 #endif
1105 }
1106 }
1107 if (parent && device_is_a(parent, "pci") &&
1108 found_console == false) {
1109 struct btinfo_framebuffer *fbinfo;
1110 struct pci_attach_args *pa = aux;
1111 prop_dictionary_t dict;
1112
1113 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_DISPLAY) {
1114 #if NWSDISPLAY > 0 && NGENFB > 0
1115 extern struct vcons_screen x86_genfb_console_screen;
1116 struct rasops_info *ri;
1117
1118 ri = &x86_genfb_console_screen.scr_ri;
1119 #endif
1120
1121 fbinfo = lookup_bootinfo(BTINFO_FRAMEBUFFER);
1122 dict = device_properties(dev);
1123 /*
1124 * framebuffer drivers other than genfb can work
1125 * without the address property
1126 */
1127 if (fbinfo != NULL) {
1128 if (fbinfo->physaddr != 0) {
1129 prop_dictionary_set_uint32(dict, "width",
1130 fbinfo->width);
1131 prop_dictionary_set_uint32(dict, "height",
1132 fbinfo->height);
1133 prop_dictionary_set_uint8(dict, "depth",
1134 fbinfo->depth);
1135 prop_dictionary_set_uint16(dict, "linebytes",
1136 fbinfo->stride);
1137
1138 prop_dictionary_set_uint64(dict, "address",
1139 fbinfo->physaddr);
1140 #if NWSDISPLAY > 0 && NGENFB > 0
1141 if (ri->ri_bits != NULL) {
1142 prop_dictionary_set_uint64(dict,
1143 "virtual_address",
1144 ri->ri_hwbits != NULL ?
1145 (vaddr_t)ri->ri_hworigbits :
1146 (vaddr_t)ri->ri_origbits);
1147 }
1148 #endif
1149 }
1150 #if notyet
1151 prop_dictionary_set_bool(dict, "splash",
1152 fbinfo->flags & BI_FB_SPLASH ?
1153 true : false);
1154 #endif
1155 if (fbinfo->depth == 8) {
1156 gfb_cb.gcc_cookie = NULL;
1157 gfb_cb.gcc_set_mapreg =
1158 x86_genfb_set_mapreg;
1159 prop_dictionary_set_uint64(dict,
1160 "cmap_callback",
1161 (uint64_t)(uintptr_t)&gfb_cb);
1162 }
1163 if (fbinfo->physaddr != 0) {
1164 mode_cb.gmc_setmode = x86_genfb_setmode;
1165 prop_dictionary_set_uint64(dict,
1166 "mode_callback",
1167 (uint64_t)(uintptr_t)&mode_cb);
1168 }
1169
1170 #if NWSDISPLAY > 0 && NGENFB > 0
1171 if (device_is_a(dev, "genfb")) {
1172 prop_dictionary_set_bool(dict,
1173 "enable_shadowfb",
1174 ri->ri_hwbits != NULL ?
1175 true : false);
1176
1177 x86_genfb_set_console_dev(dev);
1178 #ifdef DDB
1179 db_trap_callback =
1180 x86_genfb_ddb_trap_callback;
1181 #endif
1182 }
1183 #endif
1184 }
1185 #if 1 && NWSDISPLAY > 0 && NGENFB > 0
1186 /* XXX */
1187 if (device_is_a(dev, "genfb")) {
1188 prop_dictionary_set_bool(dict, "is_console",
1189 genfb_is_console());
1190 } else
1191 #endif
1192 prop_dictionary_set_bool(dict, "is_console", true);
1193
1194 prop_dictionary_set_bool(dict, "clear-screen", false);
1195 #if NWSDISPLAY > 0 && NGENFB > 0
1196 prop_dictionary_set_uint16(dict, "cursor-row",
1197 x86_genfb_console_screen.scr_ri.ri_crow);
1198 #endif
1199 #if notyet
1200 prop_dictionary_set_bool(dict, "splash",
1201 fbinfo->flags & BI_FB_SPLASH ? true : false);
1202 #endif
1203 pmf_cb.gpc_suspend = x86_genfb_suspend;
1204 pmf_cb.gpc_resume = x86_genfb_resume;
1205 prop_dictionary_set_uint64(dict,
1206 "pmf_callback", (uint64_t)(uintptr_t)&pmf_cb);
1207 #ifdef VGA_POST
1208 vga_posth = vga_post_init(pa->pa_bus, pa->pa_device,
1209 pa->pa_function);
1210 #endif
1211 found_console = true;
1212 return NULL;
1213 }
1214 }
1215 return NULL;
1216 }
1217
1218 #ifndef PUC_CNBUS
1219 #define PUC_CNBUS 0
1220 #endif
1221
1222 #if NCOM > 0
1223 int
1224 cpu_puc_cnprobe(struct consdev *cn, struct pci_attach_args *pa)
1225 {
1226 pci_mode_detect();
1227 pa->pa_iot = x86_bus_space_io;
1228 pa->pa_memt = x86_bus_space_mem;
1229 pa->pa_pc = 0;
1230 pa->pa_tag = pci_make_tag(0, PUC_CNBUS, pci_bus_maxdevs(NULL, 0) - 1,
1231 0);
1232
1233 return 0;
1234 }
1235 #endif
1236