1 /* $NetBSD: pci_machdep.c,v 1.100 2025/05/08 13:57:26 riastradh Exp $ */ 2 3 /*- 4 * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 /* 34 * Copyright (c) 1996 Christopher G. Demetriou. All rights reserved. 35 * Copyright (c) 1994 Charles M. Hannum. All rights reserved. 36 * 37 * Redistribution and use in source and binary forms, with or without 38 * modification, are permitted provided that the following conditions 39 * are met: 40 * 1. Redistributions of source code must retain the above copyright 41 * notice, this list of conditions and the following disclaimer. 42 * 2. Redistributions in binary form must reproduce the above copyright 43 * notice, this list of conditions and the following disclaimer in the 44 * documentation and/or other materials provided with the distribution. 45 * 3. All advertising materials mentioning features or use of this software 46 * must display the following acknowledgement: 47 * This product includes software developed by Charles M. Hannum. 48 * 4. The name of the author may not be used to endorse or promote products 49 * derived from this software without specific prior written permission. 50 * 51 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 52 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 53 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 54 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 55 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 56 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 60 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 61 */ 62 63 /* 64 * Machine-specific functions for PCI autoconfiguration. 65 * 66 * On PCs, there are two methods of generating PCI configuration cycles. 67 * We try to detect the appropriate mechanism for this machine and set 68 * up a few function pointers to access the correct method directly. 69 * 70 * The configuration method can be hard-coded in the config file by 71 * using `options PCI_CONF_MODE=N', where `N' is the configuration mode 72 * as defined in section 3.6.4.1, `Generating Configuration Cycles'. 73 */ 74 75 #include <sys/cdefs.h> 76 __KERNEL_RCSID(0, "$NetBSD: pci_machdep.c,v 1.100 2025/05/08 13:57:26 riastradh Exp $"); 77 78 #include <sys/types.h> 79 #include <sys/param.h> 80 #include <sys/time.h> 81 #include <sys/systm.h> 82 #include <sys/errno.h> 83 #include <sys/device.h> 84 #include <sys/bus.h> 85 #include <sys/cpu.h> 86 #include <sys/kmem.h> 87 88 #include <uvm/uvm_extern.h> 89 90 #include <machine/bus_private.h> 91 92 #include <machine/pio.h> 93 #include <machine/lock.h> 94 95 #include <dev/isa/isareg.h> 96 #include <dev/isa/isavar.h> 97 #include <dev/pci/pcivar.h> 98 #include <dev/pci/pcireg.h> 99 #include <dev/pci/pccbbreg.h> 100 #include <dev/pci/pcidevs.h> 101 #include <dev/pci/ppbvar.h> 102 #include <dev/pci/genfb_pcivar.h> 103 104 #include <dev/wsfb/genfbvar.h> 105 #include <arch/x86/include/genfb_machdep.h> 106 #include <arch/xen/include/hypervisor.h> 107 #include <arch/xen/include/xen.h> 108 #include <dev/ic/vgareg.h> 109 110 #include "acpica.h" 111 #include "genfb.h" 112 #include "isa.h" 113 #include "opt_acpi.h" 114 #include "opt_ddb.h" 115 #include "opt_mpbios.h" 116 #include "opt_puc.h" 117 #include "opt_vga.h" 118 #include "pci.h" 119 #include "wsdisplay.h" 120 #include "com.h" 121 #include "opt_xen.h" 122 123 #ifdef DDB 124 #include <machine/db_machdep.h> 125 #include <ddb/db_sym.h> 126 #include <ddb/db_extern.h> 127 #endif 128 129 #ifdef VGA_POST 130 #include <x86/vga_post.h> 131 #endif 132 133 #include <x86/cpuvar.h> 134 135 #include <machine/autoconf.h> 136 #include <machine/bootinfo.h> 137 138 #ifdef MPBIOS 139 #include <machine/mpbiosvar.h> 140 #endif 141 142 #if NACPICA > 0 143 #include <machine/mpacpi.h> 144 #if !defined(NO_PCI_EXTENDED_CONFIG) 145 #include <dev/acpi/acpivar.h> 146 #include <dev/acpi/acpi_mcfg.h> 147 #endif 148 #endif 149 150 #include <machine/mpconfig.h> 151 152 #if NCOM > 0 153 #include <dev/pci/puccn.h> 154 #endif 155 156 #ifndef XENPV 157 #include <x86/efi.h> 158 #endif 159 160 #include "opt_pci_conf_mode.h" 161 162 #ifdef PCI_CONF_MODE 163 #if (PCI_CONF_MODE == 1) || (PCI_CONF_MODE == 2) 164 static int pci_mode = PCI_CONF_MODE; 165 #else 166 #error Invalid PCI configuration mode. 167 #endif 168 #else 169 static int pci_mode = -1; 170 #endif 171 172 struct pci_conf_lock { 173 uint32_t cl_cpuno; /* 0: unlocked 174 * 1 + n: locked by CPU n (0 <= n) 175 */ 176 uint32_t cl_sel; /* the address that's being read. */ 177 }; 178 179 static void pci_conf_unlock(struct pci_conf_lock *); 180 static uint32_t pci_conf_selector(pcitag_t, int); 181 static unsigned int pci_conf_port(pcitag_t, int); 182 static void pci_conf_select(uint32_t); 183 static void pci_conf_lock(struct pci_conf_lock *, uint32_t); 184 static void pci_bridge_hook(pci_chipset_tag_t, pcitag_t, void *); 185 struct pci_bridge_hook_arg { 186 void (*func)(pci_chipset_tag_t, pcitag_t, void *); 187 void *arg; 188 }; 189 190 #define PCI_MODE1_ENABLE 0x80000000UL 191 #define PCI_MODE1_ADDRESS_REG 0x0cf8 192 #define PCI_MODE1_DATA_REG 0x0cfc 193 194 #define PCI_MODE2_ENABLE_REG 0x0cf8 195 #define PCI_MODE2_FORWARD_REG 0x0cfa 196 197 #define _tag(b, d, f) \ 198 {.mode1 = PCI_MODE1_ENABLE | ((b) << 16) | ((d) << 11) | ((f) << 8)} 199 #define _qe(bus, dev, fcn, vend, prod) \ 200 {_tag(bus, dev, fcn), PCI_ID_CODE(vend, prod)} 201 const struct { 202 pcitag_t tag; 203 pcireg_t id; 204 } pcim1_quirk_tbl[] = { 205 _qe(0, 0, 0, PCI_VENDOR_INVALID, 0x0000), /* patchable */ 206 _qe(0, 0, 0, PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_TRIFLEX1), 207 /* XXX Triflex2 not tested */ 208 _qe(0, 0, 0, PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_TRIFLEX2), 209 _qe(0, 0, 0, PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_TRIFLEX4), 210 #if 0 211 /* Triton needed for Connectix Virtual PC */ 212 _qe(0, 0, 0, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82437FX), 213 /* Connectix Virtual PC 5 has a 440BX */ 214 _qe(0, 0, 0, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82443BX_NOAGP), 215 /* Parallels Desktop for Mac */ 216 _qe(0, 2, 0, PCI_VENDOR_PARALLELS, PCI_PRODUCT_PARALLELS_VIDEO), 217 _qe(0, 3, 0, PCI_VENDOR_PARALLELS, PCI_PRODUCT_PARALLELS_TOOLS), 218 /* SIS 740 */ 219 _qe(0, 0, 0, PCI_VENDOR_SIS, PCI_PRODUCT_SIS_740), 220 /* SIS 741 */ 221 _qe(0, 0, 0, PCI_VENDOR_SIS, PCI_PRODUCT_SIS_741), 222 /* VIA Technologies VX900 */ 223 _qe(0, 0, 0, PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VX900_HB) 224 #endif 225 }; 226 #undef _tag 227 #undef _qe 228 229 /* arch/xen does not support MSI/MSI-X yet. */ 230 #ifdef __HAVE_PCI_MSI_MSIX 231 #define PCI_QUIRK_DISABLE_MSI 1 /* Neither MSI nor MSI-X work */ 232 #define PCI_QUIRK_DISABLE_MSIX 2 /* MSI-X does not work */ 233 #define PCI_QUIRK_ENABLE_MSI_VM 3 /* Older chipset in VM where MSI and MSI-X works */ 234 235 #define _dme(vend, prod) \ 236 { PCI_QUIRK_DISABLE_MSI, PCI_ID_CODE(vend, prod) } 237 #define _dmxe(vend, prod) \ 238 { PCI_QUIRK_DISABLE_MSIX, PCI_ID_CODE(vend, prod) } 239 #define _emve(vend, prod) \ 240 { PCI_QUIRK_ENABLE_MSI_VM, PCI_ID_CODE(vend, prod) } 241 const struct { 242 int type; 243 pcireg_t id; 244 } pci_msi_quirk_tbl[] = { 245 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCMC), 246 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82437FX), 247 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82437MX), 248 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82437VX), 249 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82439HX), 250 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82439TX), 251 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82443GX), 252 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82443GX_AGP), 253 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82440MX), 254 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82441FX), 255 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82443BX), 256 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82443BX_AGP), 257 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82443BX_NOAGP), 258 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82443GX_NOAGP), 259 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82443LX), 260 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82443LX_AGP), 261 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82810_MCH), 262 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82810E_MCH), 263 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82815_FULL_HUB), 264 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82820_MCH), 265 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82830MP_IO_1), 266 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82840_HB), 267 _dme(PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_PCHB), 268 _dme(PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_PCHB), 269 _dme(PCI_VENDOR_AMD, PCI_PRODUCT_AMD_SC751_SC), 270 _dme(PCI_VENDOR_AMD, PCI_PRODUCT_AMD_SC761_SC), 271 _dme(PCI_VENDOR_AMD, PCI_PRODUCT_AMD_SC762_NB), 272 273 _emve(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82441FX), /* QEMU */ 274 _emve(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82443BX), /* VMWare */ 275 }; 276 #undef _dme 277 #undef _dmxe 278 #undef _emve 279 #endif /* __HAVE_PCI_MSI_MSIX */ 280 281 /* 282 * PCI doesn't have any special needs; just use the generic versions 283 * of these functions. 284 */ 285 struct x86_bus_dma_tag pci_bus_dma_tag = { 286 ._tag_needs_free = 0, 287 #if defined(_LP64) || defined(PAE) 288 ._bounce_thresh = PCI32_DMA_BOUNCE_THRESHOLD, 289 ._bounce_alloc_lo = ISA_DMA_BOUNCE_THRESHOLD, 290 ._bounce_alloc_hi = PCI32_DMA_BOUNCE_THRESHOLD, 291 #else 292 ._bounce_thresh = 0, 293 ._bounce_alloc_lo = 0, 294 ._bounce_alloc_hi = 0, 295 #endif 296 ._may_bounce = NULL, 297 }; 298 299 #ifdef _LP64 300 struct x86_bus_dma_tag pci_bus_dma64_tag = { 301 ._tag_needs_free = 0, 302 ._bounce_thresh = 0, 303 ._bounce_alloc_lo = 0, 304 ._bounce_alloc_hi = 0, 305 ._may_bounce = NULL, 306 }; 307 #endif 308 309 static struct pci_conf_lock cl0 = { 310 .cl_cpuno = 0UL 311 , .cl_sel = 0UL 312 }; 313 314 static struct pci_conf_lock * const cl = &cl0; 315 316 static struct genfb_colormap_callback gfb_cb; 317 static struct genfb_pmf_callback pmf_cb; 318 static struct genfb_mode_callback mode_cb; 319 #ifdef VGA_POST 320 static struct vga_post *vga_posth = NULL; 321 #endif 322 323 static void 324 pci_conf_lock(struct pci_conf_lock *ocl, uint32_t sel) 325 { 326 uint32_t cpuno; 327 328 KASSERT(sel != 0); 329 330 kpreempt_disable(); 331 cpuno = cpu_number() + 1; 332 /* If the kernel enters pci_conf_lock() through an interrupt 333 * handler, then the CPU may already hold the lock. 334 * 335 * If the CPU does not already hold the lock, spin until 336 * we can acquire it. 337 */ 338 if (cpuno == cl->cl_cpuno) { 339 ocl->cl_cpuno = cpuno; 340 } else { 341 #ifdef LOCKDEBUG 342 u_int spins = 0; 343 #endif 344 u_int count; 345 count = SPINLOCK_BACKOFF_MIN; 346 347 ocl->cl_cpuno = 0; 348 349 while (atomic_cas_32(&cl->cl_cpuno, 0, cpuno) != 0) { 350 SPINLOCK_BACKOFF(count); 351 #ifdef LOCKDEBUG 352 if (SPINLOCK_SPINOUT(spins)) { 353 panic("%s: cpu %" PRId32 354 " spun out waiting for cpu %" PRId32, 355 __func__, cpuno, cl->cl_cpuno); 356 } 357 #endif 358 } 359 } 360 361 /* Only one CPU can be here, so an interlocked atomic_swap(3) 362 * is not necessary. 363 * 364 * Evaluating atomic_cas_32_ni()'s argument, cl->cl_sel, 365 * and applying atomic_cas_32_ni() is not an atomic operation, 366 * however, any interrupt that, in the middle of the 367 * operation, modifies cl->cl_sel, will also restore 368 * cl->cl_sel. So cl->cl_sel will have the same value when 369 * we apply atomic_cas_32_ni() as when we evaluated it, 370 * before. 371 */ 372 ocl->cl_sel = atomic_cas_32_ni(&cl->cl_sel, cl->cl_sel, sel); 373 pci_conf_select(sel); 374 } 375 376 static void 377 pci_conf_unlock(struct pci_conf_lock *ocl) 378 { 379 atomic_cas_32_ni(&cl->cl_sel, cl->cl_sel, ocl->cl_sel); 380 pci_conf_select(ocl->cl_sel); 381 if (ocl->cl_cpuno != cl->cl_cpuno) 382 atomic_cas_32(&cl->cl_cpuno, cl->cl_cpuno, ocl->cl_cpuno); 383 kpreempt_enable(); 384 } 385 386 static uint32_t 387 pci_conf_selector(pcitag_t tag, int reg) 388 { 389 static const pcitag_t mode2_mask = { 390 .mode2 = { 391 .enable = 0xff 392 , .forward = 0xff 393 } 394 }; 395 396 switch (pci_mode) { 397 case 1: 398 return tag.mode1 | reg; 399 case 2: 400 return tag.mode1 & mode2_mask.mode1; 401 default: 402 panic("%s: mode %d not configured", __func__, pci_mode); 403 } 404 } 405 406 static unsigned int 407 pci_conf_port(pcitag_t tag, int reg) 408 { 409 switch (pci_mode) { 410 case 1: 411 return PCI_MODE1_DATA_REG; 412 case 2: 413 return tag.mode2.port | reg; 414 default: 415 panic("%s: mode %d not configured", __func__, pci_mode); 416 } 417 } 418 419 static void 420 pci_conf_select(uint32_t sel) 421 { 422 pcitag_t tag; 423 424 switch (pci_mode) { 425 case 1: 426 outl(PCI_MODE1_ADDRESS_REG, sel); 427 return; 428 case 2: 429 tag.mode1 = sel; 430 outb(PCI_MODE2_ENABLE_REG, tag.mode2.enable); 431 if (tag.mode2.enable != 0) 432 outb(PCI_MODE2_FORWARD_REG, tag.mode2.forward); 433 return; 434 default: 435 panic("%s: mode %d not configured", __func__, pci_mode); 436 } 437 } 438 439 static int 440 pci_mode_check(void) 441 { 442 pcireg_t x; 443 pcitag_t t; 444 int device; 445 const int maxdev = pci_bus_maxdevs(NULL, 0); 446 447 for (device = 0; device < maxdev; device++) { 448 t = pci_make_tag(NULL, 0, device, 0); 449 x = pci_conf_read(NULL, t, PCI_CLASS_REG); 450 if (PCI_CLASS(x) == PCI_CLASS_BRIDGE && 451 PCI_SUBCLASS(x) == PCI_SUBCLASS_BRIDGE_HOST) 452 return 0; 453 x = pci_conf_read(NULL, t, PCI_ID_REG); 454 switch (PCI_VENDOR(x)) { 455 case PCI_VENDOR_COMPAQ: 456 case PCI_VENDOR_INTEL: 457 case PCI_VENDOR_VIATECH: 458 return 0; 459 } 460 } 461 return -1; 462 } 463 #ifdef __HAVE_PCI_MSI_MSIX 464 static int 465 pci_has_msi_quirk(pcireg_t id, int type) 466 { 467 int i; 468 469 for (i = 0; i < __arraycount(pci_msi_quirk_tbl); i++) { 470 if (id == pci_msi_quirk_tbl[i].id && 471 type == pci_msi_quirk_tbl[i].type) 472 return 1; 473 } 474 475 return 0; 476 } 477 #endif 478 479 void 480 pci_attach_hook(device_t parent, device_t self, struct pcibus_attach_args *pba) 481 { 482 #ifdef __HAVE_PCI_MSI_MSIX 483 pci_chipset_tag_t pc = pba->pba_pc; 484 pcitag_t tag; 485 pcireg_t id, class; 486 int device, function; 487 bool havehb = false; 488 #endif 489 490 if (pba->pba_bus == 0) 491 aprint_normal(": configuration mode %d", pci_mode); 492 #ifdef MPBIOS 493 mpbios_pci_attach_hook(parent, self, pba); 494 #endif 495 #if NACPICA > 0 496 mpacpi_pci_attach_hook(parent, self, pba); 497 #endif 498 #if NACPICA > 0 && !defined(NO_PCI_EXTENDED_CONFIG) 499 acpimcfg_map_bus(self, pba->pba_pc, pba->pba_bus); 500 #endif 501 502 #ifdef __HAVE_PCI_MSI_MSIX 503 /* 504 * In order to decide whether the system supports MSI we look 505 * at the host bridge, which should be on bus 0. 506 * It is better to not enable MSI on systems that 507 * support it than the other way around, so be conservative 508 * here. So we don't enable MSI if we don't find a host 509 * bridge there. We also deliberately don't enable MSI on 510 * chipsets from low-end manufacturers like VIA and SiS. 511 */ 512 for (device = 0; device < pci_bus_maxdevs(pc, 0); device++) { 513 for (function = 0; function <= 7; function++) { 514 tag = pci_make_tag(pc, 0, device, function); 515 id = pci_conf_read(pc, tag, PCI_ID_REG); 516 class = pci_conf_read(pc, tag, PCI_CLASS_REG); 517 518 if (PCI_CLASS(class) == PCI_CLASS_BRIDGE && 519 PCI_SUBCLASS(class) == PCI_SUBCLASS_BRIDGE_HOST) { 520 havehb = true; 521 goto donehb; 522 } 523 } 524 } 525 donehb: 526 527 if (havehb == false) 528 return; 529 530 /* VMware and KVM use old chipset, but they can use MSI/MSI-X */ 531 if ((cpu_feature[1] & CPUID2_RAZ) 532 && (pci_has_msi_quirk(id, PCI_QUIRK_ENABLE_MSI_VM))) { 533 pba->pba_flags |= PCI_FLAGS_MSI_OKAY; 534 pba->pba_flags |= PCI_FLAGS_MSIX_OKAY; 535 } else if (pci_has_msi_quirk(id, PCI_QUIRK_DISABLE_MSI)) { 536 pba->pba_flags &= ~PCI_FLAGS_MSI_OKAY; 537 pba->pba_flags &= ~PCI_FLAGS_MSIX_OKAY; 538 aprint_verbose("\n"); 539 aprint_verbose_dev(self, 540 "This pci host supports neither MSI nor MSI-X."); 541 } else if (pci_has_msi_quirk(id, PCI_QUIRK_DISABLE_MSIX)) { 542 pba->pba_flags |= PCI_FLAGS_MSI_OKAY; 543 pba->pba_flags &= ~PCI_FLAGS_MSIX_OKAY; 544 aprint_verbose("\n"); 545 aprint_verbose_dev(self, 546 "This pci host does not support MSI-X."); 547 #if NACPICA > 0 548 } else if (acpi_active && 549 AcpiGbl_FADT.Header.Revision >= 4 && 550 (AcpiGbl_FADT.BootFlags & ACPI_FADT_NO_MSI) != 0) { 551 pba->pba_flags &= ~PCI_FLAGS_MSI_OKAY; 552 pba->pba_flags &= ~PCI_FLAGS_MSIX_OKAY; 553 aprint_verbose("\n"); 554 aprint_verbose_dev(self, 555 "MSI support disabled via ACPI IAPC_BOOT_ARCH flag.\n"); 556 #endif 557 } else { 558 pba->pba_flags |= PCI_FLAGS_MSI_OKAY; 559 pba->pba_flags |= PCI_FLAGS_MSIX_OKAY; 560 } 561 562 /* 563 * Don't enable MSI on a HyperTransport bus. In order to 564 * determine that bus 0 is a HyperTransport bus, we look at 565 * device 24 function 0, which is the HyperTransport 566 * host/primary interface integrated on most 64-bit AMD CPUs. 567 * If that device has a HyperTransport capability, bus 0 must 568 * be a HyperTransport bus and we disable MSI. 569 */ 570 if (24 < pci_bus_maxdevs(pc, 0)) { 571 tag = pci_make_tag(pc, 0, 24, 0); 572 if (pci_get_capability(pc, tag, PCI_CAP_LDT, NULL, NULL)) { 573 pba->pba_flags &= ~PCI_FLAGS_MSI_OKAY; 574 pba->pba_flags &= ~PCI_FLAGS_MSIX_OKAY; 575 } 576 } 577 578 #endif /* __HAVE_PCI_MSI_MSIX */ 579 } 580 581 int 582 pci_bus_maxdevs(pci_chipset_tag_t pc, int busno) 583 { 584 /* 585 * Bus number is irrelevant. If Configuration Mechanism 2 is in 586 * use, can only have devices 0-15 on any bus. If Configuration 587 * Mechanism 1 is in use, can have devices 0-32 (i.e. the `normal' 588 * range). 589 */ 590 if (pci_mode == 2) 591 return (16); 592 else 593 return (32); 594 } 595 596 pcitag_t 597 pci_make_tag(pci_chipset_tag_t pc, int bus, int device, int function) 598 { 599 pci_chipset_tag_t ipc; 600 pcitag_t tag; 601 602 for (ipc = pc; ipc != NULL; ipc = ipc->pc_super) { 603 if ((ipc->pc_present & PCI_OVERRIDE_MAKE_TAG) == 0) 604 continue; 605 return (*ipc->pc_ov->ov_make_tag)(ipc->pc_ctx, 606 pc, bus, device, function); 607 } 608 609 switch (pci_mode) { 610 case 1: 611 if (bus >= 256 || device >= 32 || function >= 8) 612 panic("%s: bad request(%d, %d, %d)", __func__, 613 bus, device, function); 614 615 tag.mode1 = PCI_MODE1_ENABLE | 616 (bus << 16) | (device << 11) | (function << 8); 617 return tag; 618 case 2: 619 if (bus >= 256 || device >= 16 || function >= 8) 620 panic("%s: bad request(%d, %d, %d)", __func__, 621 bus, device, function); 622 623 tag.mode2.port = 0xc000 | (device << 8); 624 tag.mode2.enable = 0xf0 | (function << 1); 625 tag.mode2.forward = bus; 626 return tag; 627 default: 628 panic("%s: mode %d not configured", __func__, pci_mode); 629 } 630 } 631 632 void 633 pci_decompose_tag(pci_chipset_tag_t pc, pcitag_t tag, 634 int *bp, int *dp, int *fp) 635 { 636 pci_chipset_tag_t ipc; 637 638 for (ipc = pc; ipc != NULL; ipc = ipc->pc_super) { 639 if ((ipc->pc_present & PCI_OVERRIDE_DECOMPOSE_TAG) == 0) 640 continue; 641 (*ipc->pc_ov->ov_decompose_tag)(ipc->pc_ctx, 642 pc, tag, bp, dp, fp); 643 return; 644 } 645 646 switch (pci_mode) { 647 case 1: 648 if (bp != NULL) 649 *bp = (tag.mode1 >> 16) & 0xff; 650 if (dp != NULL) 651 *dp = (tag.mode1 >> 11) & 0x1f; 652 if (fp != NULL) 653 *fp = (tag.mode1 >> 8) & 0x7; 654 return; 655 case 2: 656 if (bp != NULL) 657 *bp = tag.mode2.forward & 0xff; 658 if (dp != NULL) 659 *dp = (tag.mode2.port >> 8) & 0xf; 660 if (fp != NULL) 661 *fp = (tag.mode2.enable >> 1) & 0x7; 662 return; 663 default: 664 panic("%s: mode %d not configured", __func__, pci_mode); 665 } 666 } 667 668 pcireg_t 669 pci_conf_read(pci_chipset_tag_t pc, pcitag_t tag, int reg) 670 { 671 pci_chipset_tag_t ipc; 672 pcireg_t data; 673 struct pci_conf_lock ocl; 674 int dev; 675 676 KASSERT((reg & 0x3) == 0); 677 678 for (ipc = pc; ipc != NULL; ipc = ipc->pc_super) { 679 if ((ipc->pc_present & PCI_OVERRIDE_CONF_READ) == 0) 680 continue; 681 return (*ipc->pc_ov->ov_conf_read)(ipc->pc_ctx, pc, tag, reg); 682 } 683 684 pci_decompose_tag(pc, tag, NULL, &dev, NULL); 685 if (__predict_false(pci_mode == 2 && dev >= 16)) 686 return (pcireg_t) -1; 687 688 if (reg < 0) 689 return (pcireg_t) -1; 690 if (reg >= PCI_CONF_SIZE) { 691 #if NACPICA > 0 && !defined(NO_PCI_EXTENDED_CONFIG) 692 if (reg >= PCI_EXTCONF_SIZE) 693 return (pcireg_t) -1; 694 acpimcfg_conf_read(pc, tag, reg, &data); 695 return data; 696 #else 697 return (pcireg_t) -1; 698 #endif 699 } 700 701 pci_conf_lock(&ocl, pci_conf_selector(tag, reg)); 702 data = inl(pci_conf_port(tag, reg)); 703 pci_conf_unlock(&ocl); 704 return data; 705 } 706 707 void 708 pci_conf_write(pci_chipset_tag_t pc, pcitag_t tag, int reg, pcireg_t data) 709 { 710 pci_chipset_tag_t ipc; 711 struct pci_conf_lock ocl; 712 int dev; 713 714 KASSERT((reg & 0x3) == 0); 715 716 for (ipc = pc; ipc != NULL; ipc = ipc->pc_super) { 717 if ((ipc->pc_present & PCI_OVERRIDE_CONF_WRITE) == 0) 718 continue; 719 (*ipc->pc_ov->ov_conf_write)(ipc->pc_ctx, pc, tag, reg, 720 data); 721 return; 722 } 723 724 pci_decompose_tag(pc, tag, NULL, &dev, NULL); 725 if (__predict_false(pci_mode == 2 && dev >= 16)) { 726 return; 727 } 728 729 if (reg < 0) 730 return; 731 if (reg >= PCI_CONF_SIZE) { 732 #if NACPICA > 0 && !defined(NO_PCI_EXTENDED_CONFIG) 733 if (reg >= PCI_EXTCONF_SIZE) 734 return; 735 acpimcfg_conf_write(pc, tag, reg, data); 736 #endif 737 return; 738 } 739 740 pci_conf_lock(&ocl, pci_conf_selector(tag, reg)); 741 outl(pci_conf_port(tag, reg), data); 742 pci_conf_unlock(&ocl); 743 } 744 745 #ifdef XENPV 746 void 747 pci_conf_write16(pci_chipset_tag_t pc, pcitag_t tag, int reg, uint16_t data) 748 { 749 pci_chipset_tag_t ipc; 750 struct pci_conf_lock ocl; 751 int dev; 752 753 KASSERT((reg & 0x1) == 0); 754 755 for (ipc = pc; ipc != NULL; ipc = ipc->pc_super) { 756 if ((ipc->pc_present & PCI_OVERRIDE_CONF_WRITE) == 0) 757 continue; 758 panic("pci_conf_write16 and override"); 759 } 760 761 pci_decompose_tag(pc, tag, NULL, &dev, NULL); 762 if (__predict_false(pci_mode == 2 && dev >= 16)) { 763 return; 764 } 765 766 if (reg < 0) 767 return; 768 if (reg >= PCI_CONF_SIZE) { 769 #if NACPICA > 0 && !defined(NO_PCI_EXTENDED_CONFIG) 770 if (reg >= PCI_EXTCONF_SIZE) 771 return; 772 panic("pci_conf_write16 and reg >= PCI_CONF_SIZE"); 773 #endif 774 return; 775 } 776 777 pci_conf_lock(&ocl, pci_conf_selector(tag, reg & ~0x3)); 778 outl(pci_conf_port(tag, reg & ~0x3) + (reg & 0x3), data); 779 pci_conf_unlock(&ocl); 780 } 781 #endif /* XENPV */ 782 783 void 784 pci_mode_set(int mode) 785 { 786 KASSERT(pci_mode == -1 || pci_mode == mode); 787 788 pci_mode = mode; 789 } 790 791 int 792 pci_mode_detect(void) 793 { 794 uint32_t sav, val; 795 int i; 796 pcireg_t idreg; 797 798 if (pci_mode != -1) 799 return pci_mode; 800 801 /* 802 * We try to divine which configuration mode the host bridge wants. 803 */ 804 805 sav = inl(PCI_MODE1_ADDRESS_REG); 806 807 pci_mode = 1; /* assume this for now */ 808 /* 809 * catch some known buggy implementations of mode 1 810 */ 811 for (i = 0; i < __arraycount(pcim1_quirk_tbl); i++) { 812 pcitag_t t; 813 814 if (PCI_VENDOR(pcim1_quirk_tbl[i].id) == PCI_VENDOR_INVALID) 815 continue; 816 t.mode1 = pcim1_quirk_tbl[i].tag.mode1; 817 idreg = pci_conf_read(NULL, t, PCI_ID_REG); /* needs "pci_mode" */ 818 if (idreg == pcim1_quirk_tbl[i].id) { 819 #ifdef DEBUG 820 printf("%s: known mode 1 PCI chipset (%08x)\n", 821 __func__, idreg); 822 #endif 823 return (pci_mode); 824 } 825 } 826 827 #if 0 828 extern char cpu_brand_string[]; 829 const char *reason, *system_vendor, *system_product; 830 if (memcmp(cpu_brand_string, "QEMU", 4) == 0) 831 /* PR 45671, https://bugs.launchpad.net/qemu/+bug/897771 */ 832 reason = "QEMU"; 833 else if ((system_vendor = pmf_get_platform("system-vendor")) != NULL && 834 strcmp(system_vendor, "Xen") == 0 && 835 (system_product = pmf_get_platform("system-product")) != NULL && 836 strcmp(system_product, "HVM domU") == 0) 837 reason = "Xen"; 838 else 839 reason = NULL; 840 841 if (reason) { 842 #ifdef DEBUG 843 printf("%s: forcing PCI mode 1 for %s\n", __func__, reason); 844 #endif 845 return (pci_mode); 846 } 847 #endif 848 /* 849 * Strong check for standard compliant mode 1: 850 * 1. bit 31 ("enable") can be set 851 * 2. byte/word access does not affect register 852 */ 853 outl(PCI_MODE1_ADDRESS_REG, PCI_MODE1_ENABLE); 854 outb(PCI_MODE1_ADDRESS_REG + 3, 0); 855 outw(PCI_MODE1_ADDRESS_REG + 2, 0); 856 val = inl(PCI_MODE1_ADDRESS_REG); 857 if ((val & 0x80fffffc) != PCI_MODE1_ENABLE) { 858 #ifdef DEBUG 859 printf("%s: mode 1 enable failed (%x)\n", __func__, val); 860 #endif 861 /* Try out mode 1 to see if we can find a host bridge. */ 862 if (pci_mode_check() == 0) { 863 #ifdef DEBUG 864 printf("%s: mode 1 functional, using\n", __func__); 865 #endif 866 return (pci_mode); 867 } 868 goto not1; 869 } 870 outl(PCI_MODE1_ADDRESS_REG, 0); 871 val = inl(PCI_MODE1_ADDRESS_REG); 872 if ((val & 0x80fffffc) != 0) 873 goto not1; 874 return (pci_mode); 875 not1: 876 outl(PCI_MODE1_ADDRESS_REG, sav); 877 878 /* 879 * This mode 2 check is quite weak (and known to give false 880 * positives on some Compaq machines). 881 * However, this doesn't matter, because this is the 882 * last test, and simply no PCI devices will be found if 883 * this happens. 884 */ 885 outb(PCI_MODE2_ENABLE_REG, 0); 886 outb(PCI_MODE2_FORWARD_REG, 0); 887 if (inb(PCI_MODE2_ENABLE_REG) != 0 || 888 inb(PCI_MODE2_FORWARD_REG) != 0) 889 goto not2; 890 return (pci_mode = 2); 891 not2: 892 893 return (pci_mode = 0); 894 } 895 896 void 897 pci_device_foreach(pci_chipset_tag_t pc, int maxbus, 898 void (*func)(pci_chipset_tag_t, pcitag_t, void *), void *context) 899 { 900 pci_device_foreach_min(pc, 0, maxbus, func, context); 901 } 902 903 void 904 pci_device_foreach_min(pci_chipset_tag_t pc, int minbus, int maxbus, 905 void (*func)(pci_chipset_tag_t, pcitag_t, void *), void *context) 906 { 907 const struct pci_quirkdata *qd; 908 int bus, device, function, maxdevs, nfuncs; 909 pcireg_t id, bhlcr; 910 pcitag_t tag; 911 912 for (bus = minbus; bus <= maxbus; bus++) { 913 maxdevs = pci_bus_maxdevs(pc, bus); 914 for (device = 0; device < maxdevs; device++) { 915 tag = pci_make_tag(pc, bus, device, 0); 916 id = pci_conf_read(pc, tag, PCI_ID_REG); 917 918 /* Invalid vendor ID value? */ 919 if (PCI_VENDOR(id) == PCI_VENDOR_INVALID) 920 continue; 921 /* XXX Not invalid, but we've done this ~forever. */ 922 if (PCI_VENDOR(id) == 0) 923 continue; 924 925 qd = pci_lookup_quirkdata(PCI_VENDOR(id), 926 PCI_PRODUCT(id)); 927 928 bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG); 929 if (PCI_HDRTYPE_MULTIFN(bhlcr) || 930 (qd != NULL && 931 (qd->quirks & PCI_QUIRK_MULTIFUNCTION) != 0)) 932 nfuncs = 8; 933 else 934 nfuncs = 1; 935 936 for (function = 0; function < nfuncs; function++) { 937 tag = pci_make_tag(pc, bus, device, function); 938 id = pci_conf_read(pc, tag, PCI_ID_REG); 939 940 /* Invalid vendor ID value? */ 941 if (PCI_VENDOR(id) == PCI_VENDOR_INVALID) 942 continue; 943 /* 944 * XXX Not invalid, but we've done this 945 * ~forever. 946 */ 947 if (PCI_VENDOR(id) == 0) 948 continue; 949 (*func)(pc, tag, context); 950 } 951 } 952 } 953 } 954 955 void 956 pci_bridge_foreach(pci_chipset_tag_t pc, int minbus, int maxbus, 957 void (*func)(pci_chipset_tag_t, pcitag_t, void *), void *ctx) 958 { 959 struct pci_bridge_hook_arg bridge_hook; 960 961 bridge_hook.func = func; 962 bridge_hook.arg = ctx; 963 964 pci_device_foreach_min(pc, minbus, maxbus, pci_bridge_hook, 965 &bridge_hook); 966 } 967 968 static void 969 pci_bridge_hook(pci_chipset_tag_t pc, pcitag_t tag, void *ctx) 970 { 971 struct pci_bridge_hook_arg *bridge_hook = (void *)ctx; 972 pcireg_t reg; 973 974 reg = pci_conf_read(pc, tag, PCI_CLASS_REG); 975 if (PCI_CLASS(reg) == PCI_CLASS_BRIDGE && 976 (PCI_SUBCLASS(reg) == PCI_SUBCLASS_BRIDGE_PCI || 977 PCI_SUBCLASS(reg) == PCI_SUBCLASS_BRIDGE_CARDBUS)) { 978 (*bridge_hook->func)(pc, tag, bridge_hook->arg); 979 } 980 } 981 982 static const void * 983 bit_to_function_pointer(const struct pci_overrides *ov, uint64_t bit) 984 { 985 switch (bit) { 986 case PCI_OVERRIDE_CONF_READ: 987 return ov->ov_conf_read; 988 case PCI_OVERRIDE_CONF_WRITE: 989 return ov->ov_conf_write; 990 case PCI_OVERRIDE_INTR_MAP: 991 return ov->ov_intr_map; 992 case PCI_OVERRIDE_INTR_STRING: 993 return ov->ov_intr_string; 994 case PCI_OVERRIDE_INTR_EVCNT: 995 return ov->ov_intr_evcnt; 996 case PCI_OVERRIDE_INTR_ESTABLISH: 997 return ov->ov_intr_establish; 998 case PCI_OVERRIDE_INTR_DISESTABLISH: 999 return ov->ov_intr_disestablish; 1000 case PCI_OVERRIDE_MAKE_TAG: 1001 return ov->ov_make_tag; 1002 case PCI_OVERRIDE_DECOMPOSE_TAG: 1003 return ov->ov_decompose_tag; 1004 default: 1005 return NULL; 1006 } 1007 } 1008 1009 void 1010 pci_chipset_tag_destroy(pci_chipset_tag_t pc) 1011 { 1012 kmem_free(pc, sizeof(struct pci_chipset_tag)); 1013 } 1014 1015 int 1016 pci_chipset_tag_create(pci_chipset_tag_t opc, const uint64_t present, 1017 const struct pci_overrides *ov, void *ctx, pci_chipset_tag_t *pcp) 1018 { 1019 uint64_t bit, bits, nbits; 1020 pci_chipset_tag_t pc; 1021 const void *fp; 1022 1023 if (ov == NULL || present == 0) 1024 return EINVAL; 1025 1026 pc = kmem_alloc(sizeof(struct pci_chipset_tag), KM_SLEEP); 1027 pc->pc_super = opc; 1028 1029 for (bits = present; bits != 0; bits = nbits) { 1030 nbits = bits & (bits - 1); 1031 bit = nbits ^ bits; 1032 if ((fp = bit_to_function_pointer(ov, bit)) == NULL) { 1033 #ifdef DEBUG 1034 printf("%s: missing bit %" PRIx64 "\n", __func__, bit); 1035 #endif 1036 goto einval; 1037 } 1038 } 1039 1040 pc->pc_ov = ov; 1041 pc->pc_present = present; 1042 pc->pc_ctx = ctx; 1043 1044 *pcp = pc; 1045 1046 return 0; 1047 einval: 1048 kmem_free(pc, sizeof(struct pci_chipset_tag)); 1049 return EINVAL; 1050 } 1051 1052 static void 1053 x86_genfb_set_mapreg(void *opaque, int index, int r, int g, int b) 1054 { 1055 outb(IO_VGA + VGA_DAC_ADDRW, index); 1056 outb(IO_VGA + VGA_DAC_PALETTE, (uint8_t)r >> 2); 1057 outb(IO_VGA + VGA_DAC_PALETTE, (uint8_t)g >> 2); 1058 outb(IO_VGA + VGA_DAC_PALETTE, (uint8_t)b >> 2); 1059 } 1060 1061 static bool 1062 x86_genfb_setmode(struct genfb_softc *sc, int newmode) 1063 { 1064 #if NGENFB > 0 1065 # if NACPICA > 0 && defined(VGA_POST) && !defined(XENPV) 1066 static int curmode = WSDISPLAYIO_MODE_EMUL; 1067 # endif 1068 1069 switch (newmode) { 1070 case WSDISPLAYIO_MODE_EMUL: 1071 # if NACPICA > 0 && defined(VGA_POST) && !defined(XENPV) 1072 if (curmode != newmode) { 1073 if (vga_posth != NULL && acpi_md_vesa_modenum != 0) { 1074 vga_post_set_vbe(vga_posth, 1075 acpi_md_vesa_modenum); 1076 } 1077 } 1078 # endif 1079 break; 1080 } 1081 1082 # if NACPICA > 0 && defined(VGA_POST) && !defined(XENPV) 1083 curmode = newmode; 1084 # endif 1085 #endif 1086 return true; 1087 } 1088 1089 static bool 1090 x86_genfb_suspend(device_t dev, const pmf_qual_t *qual) 1091 { 1092 return true; 1093 } 1094 1095 static bool 1096 x86_genfb_resume(device_t dev, const pmf_qual_t *qual) 1097 { 1098 #if NGENFB > 0 1099 struct pci_genfb_softc *psc = device_private(dev); 1100 1101 #if NACPICA > 0 && defined(VGA_POST) && !defined(XENPV) 1102 if (vga_posth != NULL && acpi_md_vbios_reset == 2) { 1103 vga_post_call(vga_posth); 1104 if (acpi_md_vesa_modenum != 0) 1105 vga_post_set_vbe(vga_posth, acpi_md_vesa_modenum); 1106 } 1107 #endif 1108 genfb_restore_palette(&psc->sc_gen); 1109 #endif 1110 1111 return true; 1112 } 1113 1114 static void 1115 populate_fbinfo(device_t dev, prop_dictionary_t dict) 1116 { 1117 #if NWSDISPLAY > 0 && NGENFB > 0 1118 struct rasops_info *ri = &x86_genfb_console_screen.scr_ri; 1119 #endif 1120 const void *fbptr = NULL; 1121 struct btinfo_framebuffer fbinfo; 1122 1123 1124 #if NWSDISPLAY > 0 && NGENFB > 0 && defined(XEN) && defined(DOM0OPS) 1125 if ((vm_guest == VM_GUEST_XENPVH || vm_guest == VM_GUEST_XENPV) && 1126 xendomain_is_dom0()) 1127 fbptr = xen_genfb_getbtinfo(); 1128 #endif 1129 if (fbptr == NULL) 1130 fbptr = lookup_bootinfo(BTINFO_FRAMEBUFFER); 1131 1132 if (fbptr == NULL) 1133 return; 1134 1135 memcpy(&fbinfo, fbptr, sizeof(fbinfo)); 1136 1137 if (fbinfo.physaddr != 0) { 1138 prop_dictionary_set_uint32(dict, "width", fbinfo.width); 1139 prop_dictionary_set_uint32(dict, "height", fbinfo.height); 1140 prop_dictionary_set_uint8(dict, "depth", fbinfo.depth); 1141 prop_dictionary_set_uint16(dict, "linebytes", fbinfo.stride); 1142 1143 prop_dictionary_set_uint64(dict, "address", fbinfo.physaddr); 1144 #if NWSDISPLAY > 0 && NGENFB > 0 1145 if (ri->ri_bits != NULL) { 1146 prop_dictionary_set_uint64(dict, "virtual_address", 1147 ri->ri_hwbits != NULL ? 1148 (vaddr_t)ri->ri_hworigbits : 1149 (vaddr_t)ri->ri_origbits); 1150 } 1151 #endif 1152 } 1153 #if notyet 1154 prop_dictionary_set_bool(dict, "splash", 1155 (fbinfo.flags & BI_FB_SPLASH) != 0); 1156 #endif 1157 if (fbinfo.depth == 8) { 1158 gfb_cb.gcc_cookie = NULL; 1159 gfb_cb.gcc_set_mapreg = x86_genfb_set_mapreg; 1160 prop_dictionary_set_uint64(dict, "cmap_callback", 1161 (uint64_t)(uintptr_t)&gfb_cb); 1162 } 1163 if (fbinfo.physaddr != 0) { 1164 mode_cb.gmc_setmode = x86_genfb_setmode; 1165 prop_dictionary_set_uint64(dict, "mode_callback", 1166 (uint64_t)(uintptr_t)&mode_cb); 1167 } 1168 1169 #if NWSDISPLAY > 0 && NGENFB > 0 1170 if (device_is_a(dev, "genfb")) { 1171 prop_dictionary_set_bool(dict, "enable_shadowfb", 1172 ri->ri_hwbits != NULL); 1173 1174 x86_genfb_set_console_dev(dev); 1175 #ifdef DDB 1176 db_trap_callback = x86_genfb_ddb_trap_callback; 1177 #endif 1178 } 1179 #endif 1180 } 1181 1182 device_t 1183 device_pci_register(device_t dev, void *aux) 1184 { 1185 device_t parent = device_parent(dev); 1186 1187 device_pci_props_register(dev, aux); 1188 1189 /* 1190 * Handle network interfaces here, the attachment information is 1191 * not available driver-independently later. 1192 * 1193 * For disks, there is nothing useful available at attach time. 1194 */ 1195 if (device_class(dev) == DV_IFNET) { 1196 struct btinfo_netif *bin = lookup_bootinfo(BTINFO_NETIF); 1197 if (bin == NULL) 1198 return NULL; 1199 1200 /* 1201 * We don't check the driver name against the device name 1202 * passed by the boot ROM. The ROM should stay usable if 1203 * the driver becomes obsolete. The physical attachment 1204 * information (checked below) must be sufficient to 1205 * identify the device. 1206 */ 1207 if (bin->bus == BI_BUS_PCI && device_is_a(parent, "pci")) { 1208 struct pci_attach_args *paa = aux; 1209 int b, d, f; 1210 1211 /* 1212 * Calculate BIOS representation of: 1213 * 1214 * <bus,device,function> 1215 * 1216 * and compare. 1217 */ 1218 pci_decompose_tag(paa->pa_pc, paa->pa_tag, &b, &d, &f); 1219 if (bin->addr.tag == ((b << 8) | (d << 3) | f)) 1220 return dev; 1221 1222 #ifndef XENPV 1223 /* 1224 * efiboot reports parent ppb bus/device/function. 1225 */ 1226 device_t grand = device_parent(parent); 1227 if (efi_probe() && grand && device_is_a(grand, "ppb")) { 1228 struct ppb_softc *ppb_sc = device_private(grand); 1229 pci_decompose_tag(ppb_sc->sc_pc, ppb_sc->sc_tag, 1230 &b, &d, &f); 1231 if (bin->addr.tag == ((b << 8) | (d << 3) | f)) 1232 return dev; 1233 } 1234 #endif 1235 } 1236 } 1237 if (parent && device_is_a(parent, "pci") && 1238 x86_found_console == false) { 1239 struct pci_attach_args *pa = aux; 1240 1241 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_DISPLAY) { 1242 prop_dictionary_t dict = device_properties(dev); 1243 /* 1244 * framebuffer drivers other than genfb can work 1245 * without the address property 1246 */ 1247 populate_fbinfo(dev, dict); 1248 1249 /* 1250 * If the bootloader requested console=pc and 1251 * specified a framebuffer, and if 1252 * x86_genfb_cnattach succeeded in setting it 1253 * up during consinit, then consinit will call 1254 * genfb_cnattach which makes genfb_is_console 1255 * return true. In this case, if it's the 1256 * first genfb we've seen, we will instruct the 1257 * genfb driver via the is_console property 1258 * that it has been selected as the console. 1259 * 1260 * If not all of that happened, then consinit 1261 * can't have selected a genfb console, so this 1262 * device is definitely not the console. 1263 * 1264 * XXX What happens if there's more than one 1265 * PCI display device, and the bootloader picks 1266 * the second one's framebuffer as the console 1267 * framebuffer address? Tough...but this has 1268 * probably never worked. 1269 */ 1270 #if NGENFB > 0 1271 prop_dictionary_set_bool(dict, "is_console", 1272 genfb_is_console()); 1273 #else 1274 prop_dictionary_set_bool(dict, "is_console", 1275 true); 1276 #endif 1277 1278 prop_dictionary_set_bool(dict, "clear-screen", false); 1279 #if NWSDISPLAY > 0 && NGENFB > 0 1280 prop_dictionary_set_uint16(dict, "cursor-row", 1281 x86_genfb_console_screen.scr_ri.ri_crow); 1282 #endif 1283 #if notyet 1284 prop_dictionary_set_bool(dict, "splash", 1285 (fbinfo->flags & BI_FB_SPLASH) != 0); 1286 #endif 1287 pmf_cb.gpc_suspend = x86_genfb_suspend; 1288 pmf_cb.gpc_resume = x86_genfb_resume; 1289 prop_dictionary_set_uint64(dict, 1290 "pmf_callback", (uint64_t)(uintptr_t)&pmf_cb); 1291 #ifdef VGA_POST 1292 vga_posth = vga_post_init(pa->pa_bus, pa->pa_device, 1293 pa->pa_function); 1294 #endif 1295 x86_found_console = true; 1296 return NULL; 1297 } 1298 } 1299 return NULL; 1300 } 1301 1302 #ifndef PUC_CNBUS 1303 #define PUC_CNBUS 0 1304 #endif 1305 1306 #if NCOM > 0 1307 int 1308 cpu_puc_cnprobe(struct consdev *cn, struct pci_attach_args *pa) 1309 { 1310 pci_mode_detect(); 1311 pa->pa_iot = x86_bus_space_io; 1312 pa->pa_memt = x86_bus_space_mem; 1313 pa->pa_pc = 0; 1314 pa->pa_tag = pci_make_tag(0, PUC_CNBUS, pci_bus_maxdevs(NULL, 0) - 1, 1315 0); 1316 1317 return 0; 1318 } 1319 #endif 1320