1 1.100 riastrad /* $NetBSD: pci_machdep.c,v 1.100 2025/05/08 13:57:26 riastradh Exp $ */ 2 1.1 fvdl 3 1.1 fvdl /*- 4 1.1 fvdl * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc. 5 1.1 fvdl * All rights reserved. 6 1.1 fvdl * 7 1.1 fvdl * This code is derived from software contributed to The NetBSD Foundation 8 1.1 fvdl * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 1.1 fvdl * NASA Ames Research Center. 10 1.1 fvdl * 11 1.1 fvdl * Redistribution and use in source and binary forms, with or without 12 1.1 fvdl * modification, are permitted provided that the following conditions 13 1.1 fvdl * are met: 14 1.1 fvdl * 1. Redistributions of source code must retain the above copyright 15 1.1 fvdl * notice, this list of conditions and the following disclaimer. 16 1.1 fvdl * 2. Redistributions in binary form must reproduce the above copyright 17 1.1 fvdl * notice, this list of conditions and the following disclaimer in the 18 1.1 fvdl * documentation and/or other materials provided with the distribution. 19 1.1 fvdl * 20 1.1 fvdl * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 1.1 fvdl * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 1.1 fvdl * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 1.1 fvdl * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 1.1 fvdl * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 1.1 fvdl * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 1.1 fvdl * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 1.1 fvdl * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 1.1 fvdl * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 1.1 fvdl * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 1.1 fvdl * POSSIBILITY OF SUCH DAMAGE. 31 1.1 fvdl */ 32 1.1 fvdl 33 1.1 fvdl /* 34 1.1 fvdl * Copyright (c) 1996 Christopher G. Demetriou. All rights reserved. 35 1.1 fvdl * Copyright (c) 1994 Charles M. Hannum. All rights reserved. 36 1.1 fvdl * 37 1.1 fvdl * Redistribution and use in source and binary forms, with or without 38 1.1 fvdl * modification, are permitted provided that the following conditions 39 1.1 fvdl * are met: 40 1.1 fvdl * 1. Redistributions of source code must retain the above copyright 41 1.1 fvdl * notice, this list of conditions and the following disclaimer. 42 1.1 fvdl * 2. Redistributions in binary form must reproduce the above copyright 43 1.1 fvdl * notice, this list of conditions and the following disclaimer in the 44 1.1 fvdl * documentation and/or other materials provided with the distribution. 45 1.1 fvdl * 3. All advertising materials mentioning features or use of this software 46 1.1 fvdl * must display the following acknowledgement: 47 1.1 fvdl * This product includes software developed by Charles M. Hannum. 48 1.1 fvdl * 4. The name of the author may not be used to endorse or promote products 49 1.1 fvdl * derived from this software without specific prior written permission. 50 1.1 fvdl * 51 1.1 fvdl * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 52 1.1 fvdl * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 53 1.1 fvdl * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 54 1.1 fvdl * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 55 1.1 fvdl * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 56 1.1 fvdl * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 57 1.1 fvdl * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 58 1.1 fvdl * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 59 1.1 fvdl * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 60 1.1 fvdl * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 61 1.1 fvdl */ 62 1.1 fvdl 63 1.1 fvdl /* 64 1.1 fvdl * Machine-specific functions for PCI autoconfiguration. 65 1.1 fvdl * 66 1.1 fvdl * On PCs, there are two methods of generating PCI configuration cycles. 67 1.1 fvdl * We try to detect the appropriate mechanism for this machine and set 68 1.1 fvdl * up a few function pointers to access the correct method directly. 69 1.1 fvdl * 70 1.1 fvdl * The configuration method can be hard-coded in the config file by 71 1.1 fvdl * using `options PCI_CONF_MODE=N', where `N' is the configuration mode 72 1.55 jakllsch * as defined in section 3.6.4.1, `Generating Configuration Cycles'. 73 1.1 fvdl */ 74 1.1 fvdl 75 1.1 fvdl #include <sys/cdefs.h> 76 1.100 riastrad __KERNEL_RCSID(0, "$NetBSD: pci_machdep.c,v 1.100 2025/05/08 13:57:26 riastradh Exp $"); 77 1.1 fvdl 78 1.1 fvdl #include <sys/types.h> 79 1.1 fvdl #include <sys/param.h> 80 1.1 fvdl #include <sys/time.h> 81 1.1 fvdl #include <sys/systm.h> 82 1.1 fvdl #include <sys/errno.h> 83 1.1 fvdl #include <sys/device.h> 84 1.29 ad #include <sys/bus.h> 85 1.42 dyoung #include <sys/cpu.h> 86 1.43 dyoung #include <sys/kmem.h> 87 1.1 fvdl 88 1.1 fvdl #include <uvm/uvm_extern.h> 89 1.1 fvdl 90 1.10 yamt #include <machine/bus_private.h> 91 1.1 fvdl 92 1.1 fvdl #include <machine/pio.h> 93 1.30 ad #include <machine/lock.h> 94 1.1 fvdl 95 1.3 fvdl #include <dev/isa/isareg.h> 96 1.1 fvdl #include <dev/isa/isavar.h> 97 1.1 fvdl #include <dev/pci/pcivar.h> 98 1.1 fvdl #include <dev/pci/pcireg.h> 99 1.43 dyoung #include <dev/pci/pccbbreg.h> 100 1.1 fvdl #include <dev/pci/pcidevs.h> 101 1.80 nonaka #include <dev/pci/ppbvar.h> 102 1.52 dyoung #include <dev/pci/genfb_pcivar.h> 103 1.52 dyoung 104 1.52 dyoung #include <dev/wsfb/genfbvar.h> 105 1.52 dyoung #include <arch/x86/include/genfb_machdep.h> 106 1.97 bouyer #include <arch/xen/include/hypervisor.h> 107 1.97 bouyer #include <arch/xen/include/xen.h> 108 1.52 dyoung #include <dev/ic/vgareg.h> 109 1.1 fvdl 110 1.37 jmcneill #include "acpica.h" 111 1.52 dyoung #include "genfb.h" 112 1.52 dyoung #include "isa.h" 113 1.52 dyoung #include "opt_acpi.h" 114 1.52 dyoung #include "opt_ddb.h" 115 1.14 bouyer #include "opt_mpbios.h" 116 1.64 msaitoh #include "opt_puc.h" 117 1.52 dyoung #include "opt_vga.h" 118 1.52 dyoung #include "pci.h" 119 1.52 dyoung #include "wsdisplay.h" 120 1.58 soren #include "com.h" 121 1.97 bouyer #include "opt_xen.h" 122 1.52 dyoung 123 1.52 dyoung #ifdef DDB 124 1.52 dyoung #include <machine/db_machdep.h> 125 1.52 dyoung #include <ddb/db_sym.h> 126 1.52 dyoung #include <ddb/db_extern.h> 127 1.52 dyoung #endif 128 1.52 dyoung 129 1.52 dyoung #ifdef VGA_POST 130 1.52 dyoung #include <x86/vga_post.h> 131 1.52 dyoung #endif 132 1.52 dyoung 133 1.70 knakahar #include <x86/cpuvar.h> 134 1.70 knakahar 135 1.52 dyoung #include <machine/autoconf.h> 136 1.52 dyoung #include <machine/bootinfo.h> 137 1.14 bouyer 138 1.14 bouyer #ifdef MPBIOS 139 1.14 bouyer #include <machine/mpbiosvar.h> 140 1.14 bouyer #endif 141 1.14 bouyer 142 1.37 jmcneill #if NACPICA > 0 143 1.14 bouyer #include <machine/mpacpi.h> 144 1.71 msaitoh #if !defined(NO_PCI_EXTENDED_CONFIG) 145 1.71 msaitoh #include <dev/acpi/acpivar.h> 146 1.71 msaitoh #include <dev/acpi/acpi_mcfg.h> 147 1.71 msaitoh #endif 148 1.14 bouyer #endif 149 1.14 bouyer 150 1.16 christos #include <machine/mpconfig.h> 151 1.16 christos 152 1.58 soren #if NCOM > 0 153 1.58 soren #include <dev/pci/puccn.h> 154 1.58 soren #endif 155 1.58 soren 156 1.84 cherry #ifndef XENPV 157 1.80 nonaka #include <x86/efi.h> 158 1.80 nonaka #endif 159 1.80 nonaka 160 1.1 fvdl #include "opt_pci_conf_mode.h" 161 1.1 fvdl 162 1.38 dyoung #ifdef PCI_CONF_MODE 163 1.38 dyoung #if (PCI_CONF_MODE == 1) || (PCI_CONF_MODE == 2) 164 1.38 dyoung static int pci_mode = PCI_CONF_MODE; 165 1.38 dyoung #else 166 1.38 dyoung #error Invalid PCI configuration mode. 167 1.38 dyoung #endif 168 1.38 dyoung #else 169 1.38 dyoung static int pci_mode = -1; 170 1.38 dyoung #endif 171 1.1 fvdl 172 1.42 dyoung struct pci_conf_lock { 173 1.42 dyoung uint32_t cl_cpuno; /* 0: unlocked 174 1.42 dyoung * 1 + n: locked by CPU n (0 <= n) 175 1.42 dyoung */ 176 1.42 dyoung uint32_t cl_sel; /* the address that's being read. */ 177 1.42 dyoung }; 178 1.42 dyoung 179 1.42 dyoung static void pci_conf_unlock(struct pci_conf_lock *); 180 1.42 dyoung static uint32_t pci_conf_selector(pcitag_t, int); 181 1.42 dyoung static unsigned int pci_conf_port(pcitag_t, int); 182 1.42 dyoung static void pci_conf_select(uint32_t); 183 1.42 dyoung static void pci_conf_lock(struct pci_conf_lock *, uint32_t); 184 1.11 sekiya static void pci_bridge_hook(pci_chipset_tag_t, pcitag_t, void *); 185 1.11 sekiya struct pci_bridge_hook_arg { 186 1.55 jakllsch void (*func)(pci_chipset_tag_t, pcitag_t, void *); 187 1.55 jakllsch void *arg; 188 1.55 jakllsch }; 189 1.11 sekiya 190 1.1 fvdl #define PCI_MODE1_ENABLE 0x80000000UL 191 1.1 fvdl #define PCI_MODE1_ADDRESS_REG 0x0cf8 192 1.1 fvdl #define PCI_MODE1_DATA_REG 0x0cfc 193 1.1 fvdl 194 1.1 fvdl #define PCI_MODE2_ENABLE_REG 0x0cf8 195 1.1 fvdl #define PCI_MODE2_FORWARD_REG 0x0cfa 196 1.1 fvdl 197 1.56 jakllsch #define _tag(b, d, f) \ 198 1.56 jakllsch {.mode1 = PCI_MODE1_ENABLE | ((b) << 16) | ((d) << 11) | ((f) << 8)} 199 1.1 fvdl #define _qe(bus, dev, fcn, vend, prod) \ 200 1.56 jakllsch {_tag(bus, dev, fcn), PCI_ID_CODE(vend, prod)} 201 1.56 jakllsch const struct { 202 1.56 jakllsch pcitag_t tag; 203 1.1 fvdl pcireg_t id; 204 1.1 fvdl } pcim1_quirk_tbl[] = { 205 1.56 jakllsch _qe(0, 0, 0, PCI_VENDOR_INVALID, 0x0000), /* patchable */ 206 1.1 fvdl _qe(0, 0, 0, PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_TRIFLEX1), 207 1.1 fvdl /* XXX Triflex2 not tested */ 208 1.1 fvdl _qe(0, 0, 0, PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_TRIFLEX2), 209 1.1 fvdl _qe(0, 0, 0, PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_TRIFLEX4), 210 1.82 jakllsch #if 0 211 1.1 fvdl /* Triton needed for Connectix Virtual PC */ 212 1.1 fvdl _qe(0, 0, 0, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82437FX), 213 1.1 fvdl /* Connectix Virtual PC 5 has a 440BX */ 214 1.1 fvdl _qe(0, 0, 0, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82443BX_NOAGP), 215 1.15 soren /* Parallels Desktop for Mac */ 216 1.15 soren _qe(0, 2, 0, PCI_VENDOR_PARALLELS, PCI_PRODUCT_PARALLELS_VIDEO), 217 1.15 soren _qe(0, 3, 0, PCI_VENDOR_PARALLELS, PCI_PRODUCT_PARALLELS_TOOLS), 218 1.36 drochner /* SIS 740 */ 219 1.36 drochner _qe(0, 0, 0, PCI_VENDOR_SIS, PCI_PRODUCT_SIS_740), 220 1.12 christos /* SIS 741 */ 221 1.12 christos _qe(0, 0, 0, PCI_VENDOR_SIS, PCI_PRODUCT_SIS_741), 222 1.54 tsutsui /* VIA Technologies VX900 */ 223 1.56 jakllsch _qe(0, 0, 0, PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VX900_HB) 224 1.82 jakllsch #endif 225 1.1 fvdl }; 226 1.56 jakllsch #undef _tag 227 1.1 fvdl #undef _qe 228 1.1 fvdl 229 1.70 knakahar /* arch/xen does not support MSI/MSI-X yet. */ 230 1.70 knakahar #ifdef __HAVE_PCI_MSI_MSIX 231 1.98 gutterid #define PCI_QUIRK_DISABLE_MSI 1 /* Neither MSI nor MSI-X work */ 232 1.70 knakahar #define PCI_QUIRK_DISABLE_MSIX 2 /* MSI-X does not work */ 233 1.70 knakahar #define PCI_QUIRK_ENABLE_MSI_VM 3 /* Older chipset in VM where MSI and MSI-X works */ 234 1.70 knakahar 235 1.70 knakahar #define _dme(vend, prod) \ 236 1.70 knakahar { PCI_QUIRK_DISABLE_MSI, PCI_ID_CODE(vend, prod) } 237 1.70 knakahar #define _dmxe(vend, prod) \ 238 1.70 knakahar { PCI_QUIRK_DISABLE_MSIX, PCI_ID_CODE(vend, prod) } 239 1.70 knakahar #define _emve(vend, prod) \ 240 1.70 knakahar { PCI_QUIRK_ENABLE_MSI_VM, PCI_ID_CODE(vend, prod) } 241 1.70 knakahar const struct { 242 1.70 knakahar int type; 243 1.70 knakahar pcireg_t id; 244 1.70 knakahar } pci_msi_quirk_tbl[] = { 245 1.70 knakahar _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCMC), 246 1.70 knakahar _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82437FX), 247 1.70 knakahar _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82437MX), 248 1.70 knakahar _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82437VX), 249 1.70 knakahar _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82439HX), 250 1.70 knakahar _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82439TX), 251 1.70 knakahar _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82443GX), 252 1.70 knakahar _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82443GX_AGP), 253 1.70 knakahar _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82440MX), 254 1.70 knakahar _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82441FX), 255 1.70 knakahar _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82443BX), 256 1.70 knakahar _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82443BX_AGP), 257 1.70 knakahar _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82443BX_NOAGP), 258 1.70 knakahar _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82443GX_NOAGP), 259 1.70 knakahar _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82443LX), 260 1.70 knakahar _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82443LX_AGP), 261 1.70 knakahar _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82810_MCH), 262 1.70 knakahar _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82810E_MCH), 263 1.70 knakahar _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82815_FULL_HUB), 264 1.70 knakahar _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82820_MCH), 265 1.70 knakahar _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82830MP_IO_1), 266 1.70 knakahar _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82840_HB), 267 1.70 knakahar _dme(PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_PCHB), 268 1.70 knakahar _dme(PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_PCHB), 269 1.70 knakahar _dme(PCI_VENDOR_AMD, PCI_PRODUCT_AMD_SC751_SC), 270 1.70 knakahar _dme(PCI_VENDOR_AMD, PCI_PRODUCT_AMD_SC761_SC), 271 1.70 knakahar _dme(PCI_VENDOR_AMD, PCI_PRODUCT_AMD_SC762_NB), 272 1.70 knakahar 273 1.70 knakahar _emve(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82441FX), /* QEMU */ 274 1.70 knakahar _emve(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82443BX), /* VMWare */ 275 1.70 knakahar }; 276 1.70 knakahar #undef _dme 277 1.70 knakahar #undef _dmxe 278 1.70 knakahar #undef _emve 279 1.70 knakahar #endif /* __HAVE_PCI_MSI_MSIX */ 280 1.70 knakahar 281 1.1 fvdl /* 282 1.1 fvdl * PCI doesn't have any special needs; just use the generic versions 283 1.1 fvdl * of these functions. 284 1.1 fvdl */ 285 1.1 fvdl struct x86_bus_dma_tag pci_bus_dma_tag = { 286 1.46 christos ._tag_needs_free = 0, 287 1.3 fvdl #if defined(_LP64) || defined(PAE) 288 1.46 christos ._bounce_thresh = PCI32_DMA_BOUNCE_THRESHOLD, 289 1.46 christos ._bounce_alloc_lo = ISA_DMA_BOUNCE_THRESHOLD, 290 1.46 christos ._bounce_alloc_hi = PCI32_DMA_BOUNCE_THRESHOLD, 291 1.3 fvdl #else 292 1.46 christos ._bounce_thresh = 0, 293 1.46 christos ._bounce_alloc_lo = 0, 294 1.46 christos ._bounce_alloc_hi = 0, 295 1.46 christos #endif 296 1.46 christos ._may_bounce = NULL, 297 1.1 fvdl }; 298 1.5 fvdl 299 1.5 fvdl #ifdef _LP64 300 1.5 fvdl struct x86_bus_dma_tag pci_bus_dma64_tag = { 301 1.46 christos ._tag_needs_free = 0, 302 1.46 christos ._bounce_thresh = 0, 303 1.46 christos ._bounce_alloc_lo = 0, 304 1.46 christos ._bounce_alloc_hi = 0, 305 1.46 christos ._may_bounce = NULL, 306 1.5 fvdl }; 307 1.5 fvdl #endif 308 1.1 fvdl 309 1.42 dyoung static struct pci_conf_lock cl0 = { 310 1.42 dyoung .cl_cpuno = 0UL 311 1.42 dyoung , .cl_sel = 0UL 312 1.42 dyoung }; 313 1.42 dyoung 314 1.42 dyoung static struct pci_conf_lock * const cl = &cl0; 315 1.42 dyoung 316 1.52 dyoung static struct genfb_colormap_callback gfb_cb; 317 1.52 dyoung static struct genfb_pmf_callback pmf_cb; 318 1.52 dyoung static struct genfb_mode_callback mode_cb; 319 1.52 dyoung #ifdef VGA_POST 320 1.52 dyoung static struct vga_post *vga_posth = NULL; 321 1.52 dyoung #endif 322 1.52 dyoung 323 1.42 dyoung static void 324 1.42 dyoung pci_conf_lock(struct pci_conf_lock *ocl, uint32_t sel) 325 1.42 dyoung { 326 1.42 dyoung uint32_t cpuno; 327 1.42 dyoung 328 1.42 dyoung KASSERT(sel != 0); 329 1.42 dyoung 330 1.42 dyoung kpreempt_disable(); 331 1.42 dyoung cpuno = cpu_number() + 1; 332 1.42 dyoung /* If the kernel enters pci_conf_lock() through an interrupt 333 1.42 dyoung * handler, then the CPU may already hold the lock. 334 1.42 dyoung * 335 1.42 dyoung * If the CPU does not already hold the lock, spin until 336 1.42 dyoung * we can acquire it. 337 1.42 dyoung */ 338 1.42 dyoung if (cpuno == cl->cl_cpuno) { 339 1.42 dyoung ocl->cl_cpuno = cpuno; 340 1.42 dyoung } else { 341 1.83 maxv #ifdef LOCKDEBUG 342 1.83 maxv u_int spins = 0; 343 1.83 maxv #endif 344 1.83 maxv u_int count; 345 1.83 maxv count = SPINLOCK_BACKOFF_MIN; 346 1.44 dyoung 347 1.42 dyoung ocl->cl_cpuno = 0; 348 1.44 dyoung 349 1.44 dyoung while (atomic_cas_32(&cl->cl_cpuno, 0, cpuno) != 0) { 350 1.83 maxv SPINLOCK_BACKOFF(count); 351 1.44 dyoung #ifdef LOCKDEBUG 352 1.44 dyoung if (SPINLOCK_SPINOUT(spins)) { 353 1.44 dyoung panic("%s: cpu %" PRId32 354 1.44 dyoung " spun out waiting for cpu %" PRId32, 355 1.44 dyoung __func__, cpuno, cl->cl_cpuno); 356 1.44 dyoung } 357 1.83 maxv #endif 358 1.44 dyoung } 359 1.42 dyoung } 360 1.42 dyoung 361 1.42 dyoung /* Only one CPU can be here, so an interlocked atomic_swap(3) 362 1.42 dyoung * is not necessary. 363 1.42 dyoung * 364 1.42 dyoung * Evaluating atomic_cas_32_ni()'s argument, cl->cl_sel, 365 1.42 dyoung * and applying atomic_cas_32_ni() is not an atomic operation, 366 1.42 dyoung * however, any interrupt that, in the middle of the 367 1.42 dyoung * operation, modifies cl->cl_sel, will also restore 368 1.42 dyoung * cl->cl_sel. So cl->cl_sel will have the same value when 369 1.42 dyoung * we apply atomic_cas_32_ni() as when we evaluated it, 370 1.42 dyoung * before. 371 1.42 dyoung */ 372 1.42 dyoung ocl->cl_sel = atomic_cas_32_ni(&cl->cl_sel, cl->cl_sel, sel); 373 1.42 dyoung pci_conf_select(sel); 374 1.42 dyoung } 375 1.42 dyoung 376 1.42 dyoung static void 377 1.42 dyoung pci_conf_unlock(struct pci_conf_lock *ocl) 378 1.42 dyoung { 379 1.62 christos atomic_cas_32_ni(&cl->cl_sel, cl->cl_sel, ocl->cl_sel); 380 1.42 dyoung pci_conf_select(ocl->cl_sel); 381 1.42 dyoung if (ocl->cl_cpuno != cl->cl_cpuno) 382 1.42 dyoung atomic_cas_32(&cl->cl_cpuno, cl->cl_cpuno, ocl->cl_cpuno); 383 1.42 dyoung kpreempt_enable(); 384 1.42 dyoung } 385 1.42 dyoung 386 1.39 dyoung static uint32_t 387 1.39 dyoung pci_conf_selector(pcitag_t tag, int reg) 388 1.39 dyoung { 389 1.39 dyoung static const pcitag_t mode2_mask = { 390 1.39 dyoung .mode2 = { 391 1.39 dyoung .enable = 0xff 392 1.39 dyoung , .forward = 0xff 393 1.39 dyoung } 394 1.39 dyoung }; 395 1.39 dyoung 396 1.39 dyoung switch (pci_mode) { 397 1.39 dyoung case 1: 398 1.39 dyoung return tag.mode1 | reg; 399 1.39 dyoung case 2: 400 1.39 dyoung return tag.mode1 & mode2_mask.mode1; 401 1.39 dyoung default: 402 1.69 christos panic("%s: mode %d not configured", __func__, pci_mode); 403 1.39 dyoung } 404 1.39 dyoung } 405 1.39 dyoung 406 1.39 dyoung static unsigned int 407 1.39 dyoung pci_conf_port(pcitag_t tag, int reg) 408 1.39 dyoung { 409 1.39 dyoung switch (pci_mode) { 410 1.39 dyoung case 1: 411 1.39 dyoung return PCI_MODE1_DATA_REG; 412 1.39 dyoung case 2: 413 1.39 dyoung return tag.mode2.port | reg; 414 1.39 dyoung default: 415 1.69 christos panic("%s: mode %d not configured", __func__, pci_mode); 416 1.39 dyoung } 417 1.39 dyoung } 418 1.39 dyoung 419 1.39 dyoung static void 420 1.42 dyoung pci_conf_select(uint32_t sel) 421 1.39 dyoung { 422 1.39 dyoung pcitag_t tag; 423 1.39 dyoung 424 1.39 dyoung switch (pci_mode) { 425 1.39 dyoung case 1: 426 1.42 dyoung outl(PCI_MODE1_ADDRESS_REG, sel); 427 1.39 dyoung return; 428 1.39 dyoung case 2: 429 1.42 dyoung tag.mode1 = sel; 430 1.39 dyoung outb(PCI_MODE2_ENABLE_REG, tag.mode2.enable); 431 1.39 dyoung if (tag.mode2.enable != 0) 432 1.39 dyoung outb(PCI_MODE2_FORWARD_REG, tag.mode2.forward); 433 1.39 dyoung return; 434 1.39 dyoung default: 435 1.69 christos panic("%s: mode %d not configured", __func__, pci_mode); 436 1.39 dyoung } 437 1.39 dyoung } 438 1.39 dyoung 439 1.81 jakllsch static int 440 1.81 jakllsch pci_mode_check(void) 441 1.81 jakllsch { 442 1.81 jakllsch pcireg_t x; 443 1.81 jakllsch pcitag_t t; 444 1.81 jakllsch int device; 445 1.81 jakllsch const int maxdev = pci_bus_maxdevs(NULL, 0); 446 1.81 jakllsch 447 1.81 jakllsch for (device = 0; device < maxdev; device++) { 448 1.81 jakllsch t = pci_make_tag(NULL, 0, device, 0); 449 1.81 jakllsch x = pci_conf_read(NULL, t, PCI_CLASS_REG); 450 1.81 jakllsch if (PCI_CLASS(x) == PCI_CLASS_BRIDGE && 451 1.81 jakllsch PCI_SUBCLASS(x) == PCI_SUBCLASS_BRIDGE_HOST) 452 1.81 jakllsch return 0; 453 1.81 jakllsch x = pci_conf_read(NULL, t, PCI_ID_REG); 454 1.81 jakllsch switch (PCI_VENDOR(x)) { 455 1.81 jakllsch case PCI_VENDOR_COMPAQ: 456 1.81 jakllsch case PCI_VENDOR_INTEL: 457 1.81 jakllsch case PCI_VENDOR_VIATECH: 458 1.81 jakllsch return 0; 459 1.81 jakllsch } 460 1.81 jakllsch } 461 1.81 jakllsch return -1; 462 1.81 jakllsch } 463 1.70 knakahar #ifdef __HAVE_PCI_MSI_MSIX 464 1.70 knakahar static int 465 1.70 knakahar pci_has_msi_quirk(pcireg_t id, int type) 466 1.70 knakahar { 467 1.70 knakahar int i; 468 1.70 knakahar 469 1.70 knakahar for (i = 0; i < __arraycount(pci_msi_quirk_tbl); i++) { 470 1.70 knakahar if (id == pci_msi_quirk_tbl[i].id && 471 1.70 knakahar type == pci_msi_quirk_tbl[i].type) 472 1.70 knakahar return 1; 473 1.70 knakahar } 474 1.70 knakahar 475 1.70 knakahar return 0; 476 1.70 knakahar } 477 1.70 knakahar #endif 478 1.70 knakahar 479 1.1 fvdl void 480 1.32 dyoung pci_attach_hook(device_t parent, device_t self, struct pcibus_attach_args *pba) 481 1.1 fvdl { 482 1.70 knakahar #ifdef __HAVE_PCI_MSI_MSIX 483 1.70 knakahar pci_chipset_tag_t pc = pba->pba_pc; 484 1.70 knakahar pcitag_t tag; 485 1.70 knakahar pcireg_t id, class; 486 1.99 manu int device, function; 487 1.94 msaitoh bool havehb = false; 488 1.70 knakahar #endif 489 1.1 fvdl 490 1.1 fvdl if (pba->pba_bus == 0) 491 1.26 mjf aprint_normal(": configuration mode %d", pci_mode); 492 1.4 fvdl #ifdef MPBIOS 493 1.4 fvdl mpbios_pci_attach_hook(parent, self, pba); 494 1.4 fvdl #endif 495 1.37 jmcneill #if NACPICA > 0 496 1.4 fvdl mpacpi_pci_attach_hook(parent, self, pba); 497 1.4 fvdl #endif 498 1.73 jakllsch #if NACPICA > 0 && !defined(NO_PCI_EXTENDED_CONFIG) 499 1.73 jakllsch acpimcfg_map_bus(self, pba->pba_pc, pba->pba_bus); 500 1.73 jakllsch #endif 501 1.70 knakahar 502 1.70 knakahar #ifdef __HAVE_PCI_MSI_MSIX 503 1.70 knakahar /* 504 1.70 knakahar * In order to decide whether the system supports MSI we look 505 1.100 riastrad * at the host bridge, which should be on bus 0. 506 1.94 msaitoh * It is better to not enable MSI on systems that 507 1.70 knakahar * support it than the other way around, so be conservative 508 1.70 knakahar * here. So we don't enable MSI if we don't find a host 509 1.70 knakahar * bridge there. We also deliberately don't enable MSI on 510 1.98 gutterid * chipsets from low-end manufacturers like VIA and SiS. 511 1.70 knakahar */ 512 1.99 manu for (device = 0; device < pci_bus_maxdevs(pc, 0); device++) { 513 1.99 manu for (function = 0; function <= 7; function++) { 514 1.99 manu tag = pci_make_tag(pc, 0, device, function); 515 1.99 manu id = pci_conf_read(pc, tag, PCI_ID_REG); 516 1.99 manu class = pci_conf_read(pc, tag, PCI_CLASS_REG); 517 1.99 manu 518 1.99 manu if (PCI_CLASS(class) == PCI_CLASS_BRIDGE && 519 1.99 manu PCI_SUBCLASS(class) == PCI_SUBCLASS_BRIDGE_HOST) { 520 1.99 manu havehb = true; 521 1.99 manu goto donehb; 522 1.99 manu } 523 1.94 msaitoh } 524 1.94 msaitoh } 525 1.99 manu donehb: 526 1.99 manu 527 1.94 msaitoh if (havehb == false) 528 1.70 knakahar return; 529 1.70 knakahar 530 1.77 msaitoh /* VMware and KVM use old chipset, but they can use MSI/MSI-X */ 531 1.77 msaitoh if ((cpu_feature[1] & CPUID2_RAZ) 532 1.77 msaitoh && (pci_has_msi_quirk(id, PCI_QUIRK_ENABLE_MSI_VM))) { 533 1.77 msaitoh pba->pba_flags |= PCI_FLAGS_MSI_OKAY; 534 1.77 msaitoh pba->pba_flags |= PCI_FLAGS_MSIX_OKAY; 535 1.77 msaitoh } else if (pci_has_msi_quirk(id, PCI_QUIRK_DISABLE_MSI)) { 536 1.70 knakahar pba->pba_flags &= ~PCI_FLAGS_MSI_OKAY; 537 1.70 knakahar pba->pba_flags &= ~PCI_FLAGS_MSIX_OKAY; 538 1.76 nonaka aprint_verbose("\n"); 539 1.76 nonaka aprint_verbose_dev(self, 540 1.76 nonaka "This pci host supports neither MSI nor MSI-X."); 541 1.70 knakahar } else if (pci_has_msi_quirk(id, PCI_QUIRK_DISABLE_MSIX)) { 542 1.70 knakahar pba->pba_flags |= PCI_FLAGS_MSI_OKAY; 543 1.70 knakahar pba->pba_flags &= ~PCI_FLAGS_MSIX_OKAY; 544 1.76 nonaka aprint_verbose("\n"); 545 1.76 nonaka aprint_verbose_dev(self, 546 1.76 nonaka "This pci host does not support MSI-X."); 547 1.89 jmcneill #if NACPICA > 0 548 1.89 jmcneill } else if (acpi_active && 549 1.89 jmcneill AcpiGbl_FADT.Header.Revision >= 4 && 550 1.89 jmcneill (AcpiGbl_FADT.BootFlags & ACPI_FADT_NO_MSI) != 0) { 551 1.89 jmcneill pba->pba_flags &= ~PCI_FLAGS_MSI_OKAY; 552 1.89 jmcneill pba->pba_flags &= ~PCI_FLAGS_MSIX_OKAY; 553 1.89 jmcneill aprint_verbose("\n"); 554 1.89 jmcneill aprint_verbose_dev(self, 555 1.89 jmcneill "MSI support disabled via ACPI IAPC_BOOT_ARCH flag.\n"); 556 1.89 jmcneill #endif 557 1.70 knakahar } else { 558 1.70 knakahar pba->pba_flags |= PCI_FLAGS_MSI_OKAY; 559 1.70 knakahar pba->pba_flags |= PCI_FLAGS_MSIX_OKAY; 560 1.70 knakahar } 561 1.70 knakahar 562 1.70 knakahar /* 563 1.70 knakahar * Don't enable MSI on a HyperTransport bus. In order to 564 1.70 knakahar * determine that bus 0 is a HyperTransport bus, we look at 565 1.70 knakahar * device 24 function 0, which is the HyperTransport 566 1.70 knakahar * host/primary interface integrated on most 64-bit AMD CPUs. 567 1.70 knakahar * If that device has a HyperTransport capability, bus 0 must 568 1.70 knakahar * be a HyperTransport bus and we disable MSI. 569 1.70 knakahar */ 570 1.74 jakllsch if (24 < pci_bus_maxdevs(pc, 0)) { 571 1.74 jakllsch tag = pci_make_tag(pc, 0, 24, 0); 572 1.74 jakllsch if (pci_get_capability(pc, tag, PCI_CAP_LDT, NULL, NULL)) { 573 1.74 jakllsch pba->pba_flags &= ~PCI_FLAGS_MSI_OKAY; 574 1.74 jakllsch pba->pba_flags &= ~PCI_FLAGS_MSIX_OKAY; 575 1.74 jakllsch } 576 1.70 knakahar } 577 1.87 jdolecek 578 1.70 knakahar #endif /* __HAVE_PCI_MSI_MSIX */ 579 1.1 fvdl } 580 1.1 fvdl 581 1.1 fvdl int 582 1.18 christos pci_bus_maxdevs(pci_chipset_tag_t pc, int busno) 583 1.1 fvdl { 584 1.1 fvdl /* 585 1.1 fvdl * Bus number is irrelevant. If Configuration Mechanism 2 is in 586 1.1 fvdl * use, can only have devices 0-15 on any bus. If Configuration 587 1.1 fvdl * Mechanism 1 is in use, can have devices 0-32 (i.e. the `normal' 588 1.1 fvdl * range). 589 1.1 fvdl */ 590 1.1 fvdl if (pci_mode == 2) 591 1.1 fvdl return (16); 592 1.1 fvdl else 593 1.1 fvdl return (32); 594 1.1 fvdl } 595 1.1 fvdl 596 1.1 fvdl pcitag_t 597 1.18 christos pci_make_tag(pci_chipset_tag_t pc, int bus, int device, int function) 598 1.1 fvdl { 599 1.47 dyoung pci_chipset_tag_t ipc; 600 1.1 fvdl pcitag_t tag; 601 1.1 fvdl 602 1.47 dyoung for (ipc = pc; ipc != NULL; ipc = ipc->pc_super) { 603 1.47 dyoung if ((ipc->pc_present & PCI_OVERRIDE_MAKE_TAG) == 0) 604 1.47 dyoung continue; 605 1.47 dyoung return (*ipc->pc_ov->ov_make_tag)(ipc->pc_ctx, 606 1.47 dyoung pc, bus, device, function); 607 1.41 dyoung } 608 1.40 dyoung 609 1.1 fvdl switch (pci_mode) { 610 1.1 fvdl case 1: 611 1.38 dyoung if (bus >= 256 || device >= 32 || function >= 8) 612 1.69 christos panic("%s: bad request(%d, %d, %d)", __func__, 613 1.69 christos bus, device, function); 614 1.38 dyoung 615 1.38 dyoung tag.mode1 = PCI_MODE1_ENABLE | 616 1.38 dyoung (bus << 16) | (device << 11) | (function << 8); 617 1.38 dyoung return tag; 618 1.1 fvdl case 2: 619 1.38 dyoung if (bus >= 256 || device >= 16 || function >= 8) 620 1.69 christos panic("%s: bad request(%d, %d, %d)", __func__, 621 1.69 christos bus, device, function); 622 1.38 dyoung 623 1.38 dyoung tag.mode2.port = 0xc000 | (device << 8); 624 1.38 dyoung tag.mode2.enable = 0xf0 | (function << 1); 625 1.38 dyoung tag.mode2.forward = bus; 626 1.38 dyoung return tag; 627 1.1 fvdl default: 628 1.69 christos panic("%s: mode %d not configured", __func__, pci_mode); 629 1.1 fvdl } 630 1.1 fvdl } 631 1.1 fvdl 632 1.1 fvdl void 633 1.18 christos pci_decompose_tag(pci_chipset_tag_t pc, pcitag_t tag, 634 1.17 christos int *bp, int *dp, int *fp) 635 1.1 fvdl { 636 1.47 dyoung pci_chipset_tag_t ipc; 637 1.1 fvdl 638 1.47 dyoung for (ipc = pc; ipc != NULL; ipc = ipc->pc_super) { 639 1.47 dyoung if ((ipc->pc_present & PCI_OVERRIDE_DECOMPOSE_TAG) == 0) 640 1.47 dyoung continue; 641 1.47 dyoung (*ipc->pc_ov->ov_decompose_tag)(ipc->pc_ctx, 642 1.47 dyoung pc, tag, bp, dp, fp); 643 1.47 dyoung return; 644 1.40 dyoung } 645 1.40 dyoung 646 1.1 fvdl switch (pci_mode) { 647 1.1 fvdl case 1: 648 1.38 dyoung if (bp != NULL) 649 1.38 dyoung *bp = (tag.mode1 >> 16) & 0xff; 650 1.38 dyoung if (dp != NULL) 651 1.38 dyoung *dp = (tag.mode1 >> 11) & 0x1f; 652 1.38 dyoung if (fp != NULL) 653 1.38 dyoung *fp = (tag.mode1 >> 8) & 0x7; 654 1.38 dyoung return; 655 1.1 fvdl case 2: 656 1.38 dyoung if (bp != NULL) 657 1.38 dyoung *bp = tag.mode2.forward & 0xff; 658 1.38 dyoung if (dp != NULL) 659 1.38 dyoung *dp = (tag.mode2.port >> 8) & 0xf; 660 1.38 dyoung if (fp != NULL) 661 1.38 dyoung *fp = (tag.mode2.enable >> 1) & 0x7; 662 1.38 dyoung return; 663 1.1 fvdl default: 664 1.69 christos panic("%s: mode %d not configured", __func__, pci_mode); 665 1.1 fvdl } 666 1.1 fvdl } 667 1.1 fvdl 668 1.1 fvdl pcireg_t 669 1.43 dyoung pci_conf_read(pci_chipset_tag_t pc, pcitag_t tag, int reg) 670 1.1 fvdl { 671 1.47 dyoung pci_chipset_tag_t ipc; 672 1.1 fvdl pcireg_t data; 673 1.42 dyoung struct pci_conf_lock ocl; 674 1.71 msaitoh int dev; 675 1.1 fvdl 676 1.31 dyoung KASSERT((reg & 0x3) == 0); 677 1.40 dyoung 678 1.47 dyoung for (ipc = pc; ipc != NULL; ipc = ipc->pc_super) { 679 1.47 dyoung if ((ipc->pc_present & PCI_OVERRIDE_CONF_READ) == 0) 680 1.47 dyoung continue; 681 1.47 dyoung return (*ipc->pc_ov->ov_conf_read)(ipc->pc_ctx, pc, tag, reg); 682 1.41 dyoung } 683 1.40 dyoung 684 1.71 msaitoh pci_decompose_tag(pc, tag, NULL, &dev, NULL); 685 1.71 msaitoh if (__predict_false(pci_mode == 2 && dev >= 16)) 686 1.71 msaitoh return (pcireg_t) -1; 687 1.71 msaitoh 688 1.71 msaitoh if (reg < 0) 689 1.71 msaitoh return (pcireg_t) -1; 690 1.71 msaitoh if (reg >= PCI_CONF_SIZE) { 691 1.71 msaitoh #if NACPICA > 0 && !defined(NO_PCI_EXTENDED_CONFIG) 692 1.71 msaitoh if (reg >= PCI_EXTCONF_SIZE) 693 1.71 msaitoh return (pcireg_t) -1; 694 1.71 msaitoh acpimcfg_conf_read(pc, tag, reg, &data); 695 1.71 msaitoh return data; 696 1.71 msaitoh #else 697 1.71 msaitoh return (pcireg_t) -1; 698 1.71 msaitoh #endif 699 1.71 msaitoh } 700 1.71 msaitoh 701 1.42 dyoung pci_conf_lock(&ocl, pci_conf_selector(tag, reg)); 702 1.39 dyoung data = inl(pci_conf_port(tag, reg)); 703 1.42 dyoung pci_conf_unlock(&ocl); 704 1.39 dyoung return data; 705 1.1 fvdl } 706 1.1 fvdl 707 1.1 fvdl void 708 1.43 dyoung pci_conf_write(pci_chipset_tag_t pc, pcitag_t tag, int reg, pcireg_t data) 709 1.1 fvdl { 710 1.47 dyoung pci_chipset_tag_t ipc; 711 1.42 dyoung struct pci_conf_lock ocl; 712 1.71 msaitoh int dev; 713 1.1 fvdl 714 1.31 dyoung KASSERT((reg & 0x3) == 0); 715 1.40 dyoung 716 1.47 dyoung for (ipc = pc; ipc != NULL; ipc = ipc->pc_super) { 717 1.47 dyoung if ((ipc->pc_present & PCI_OVERRIDE_CONF_WRITE) == 0) 718 1.47 dyoung continue; 719 1.47 dyoung (*ipc->pc_ov->ov_conf_write)(ipc->pc_ctx, pc, tag, reg, 720 1.47 dyoung data); 721 1.47 dyoung return; 722 1.40 dyoung } 723 1.40 dyoung 724 1.71 msaitoh pci_decompose_tag(pc, tag, NULL, &dev, NULL); 725 1.71 msaitoh if (__predict_false(pci_mode == 2 && dev >= 16)) { 726 1.71 msaitoh return; 727 1.71 msaitoh } 728 1.71 msaitoh 729 1.71 msaitoh if (reg < 0) 730 1.71 msaitoh return; 731 1.71 msaitoh if (reg >= PCI_CONF_SIZE) { 732 1.71 msaitoh #if NACPICA > 0 && !defined(NO_PCI_EXTENDED_CONFIG) 733 1.71 msaitoh if (reg >= PCI_EXTCONF_SIZE) 734 1.71 msaitoh return; 735 1.71 msaitoh acpimcfg_conf_write(pc, tag, reg, data); 736 1.71 msaitoh #endif 737 1.71 msaitoh return; 738 1.71 msaitoh } 739 1.71 msaitoh 740 1.42 dyoung pci_conf_lock(&ocl, pci_conf_selector(tag, reg)); 741 1.39 dyoung outl(pci_conf_port(tag, reg), data); 742 1.42 dyoung pci_conf_unlock(&ocl); 743 1.38 dyoung } 744 1.1 fvdl 745 1.90 bouyer #ifdef XENPV 746 1.90 bouyer void 747 1.90 bouyer pci_conf_write16(pci_chipset_tag_t pc, pcitag_t tag, int reg, uint16_t data) 748 1.90 bouyer { 749 1.90 bouyer pci_chipset_tag_t ipc; 750 1.90 bouyer struct pci_conf_lock ocl; 751 1.90 bouyer int dev; 752 1.90 bouyer 753 1.90 bouyer KASSERT((reg & 0x1) == 0); 754 1.90 bouyer 755 1.90 bouyer for (ipc = pc; ipc != NULL; ipc = ipc->pc_super) { 756 1.90 bouyer if ((ipc->pc_present & PCI_OVERRIDE_CONF_WRITE) == 0) 757 1.90 bouyer continue; 758 1.90 bouyer panic("pci_conf_write16 and override"); 759 1.90 bouyer } 760 1.90 bouyer 761 1.90 bouyer pci_decompose_tag(pc, tag, NULL, &dev, NULL); 762 1.90 bouyer if (__predict_false(pci_mode == 2 && dev >= 16)) { 763 1.90 bouyer return; 764 1.90 bouyer } 765 1.90 bouyer 766 1.90 bouyer if (reg < 0) 767 1.90 bouyer return; 768 1.90 bouyer if (reg >= PCI_CONF_SIZE) { 769 1.90 bouyer #if NACPICA > 0 && !defined(NO_PCI_EXTENDED_CONFIG) 770 1.90 bouyer if (reg >= PCI_EXTCONF_SIZE) 771 1.90 bouyer return; 772 1.90 bouyer panic("pci_conf_write16 and reg >= PCI_CONF_SIZE"); 773 1.90 bouyer #endif 774 1.90 bouyer return; 775 1.90 bouyer } 776 1.90 bouyer 777 1.90 bouyer pci_conf_lock(&ocl, pci_conf_selector(tag, reg & ~0x3)); 778 1.90 bouyer outl(pci_conf_port(tag, reg & ~0x3) + (reg & 0x3), data); 779 1.90 bouyer pci_conf_unlock(&ocl); 780 1.90 bouyer } 781 1.90 bouyer #endif /* XENPV */ 782 1.90 bouyer 783 1.38 dyoung void 784 1.38 dyoung pci_mode_set(int mode) 785 1.38 dyoung { 786 1.38 dyoung KASSERT(pci_mode == -1 || pci_mode == mode); 787 1.1 fvdl 788 1.38 dyoung pci_mode = mode; 789 1.1 fvdl } 790 1.1 fvdl 791 1.1 fvdl int 792 1.33 cegger pci_mode_detect(void) 793 1.1 fvdl { 794 1.33 cegger uint32_t sav, val; 795 1.1 fvdl int i; 796 1.1 fvdl pcireg_t idreg; 797 1.1 fvdl 798 1.1 fvdl if (pci_mode != -1) 799 1.1 fvdl return pci_mode; 800 1.1 fvdl 801 1.1 fvdl /* 802 1.1 fvdl * We try to divine which configuration mode the host bridge wants. 803 1.1 fvdl */ 804 1.1 fvdl 805 1.1 fvdl sav = inl(PCI_MODE1_ADDRESS_REG); 806 1.1 fvdl 807 1.1 fvdl pci_mode = 1; /* assume this for now */ 808 1.1 fvdl /* 809 1.1 fvdl * catch some known buggy implementations of mode 1 810 1.1 fvdl */ 811 1.27 dyoung for (i = 0; i < __arraycount(pcim1_quirk_tbl); i++) { 812 1.1 fvdl pcitag_t t; 813 1.1 fvdl 814 1.56 jakllsch if (PCI_VENDOR(pcim1_quirk_tbl[i].id) == PCI_VENDOR_INVALID) 815 1.56 jakllsch continue; 816 1.56 jakllsch t.mode1 = pcim1_quirk_tbl[i].tag.mode1; 817 1.56 jakllsch idreg = pci_conf_read(NULL, t, PCI_ID_REG); /* needs "pci_mode" */ 818 1.1 fvdl if (idreg == pcim1_quirk_tbl[i].id) { 819 1.1 fvdl #ifdef DEBUG 820 1.67 christos printf("%s: known mode 1 PCI chipset (%08x)\n", 821 1.67 christos __func__, idreg); 822 1.1 fvdl #endif 823 1.1 fvdl return (pci_mode); 824 1.1 fvdl } 825 1.1 fvdl } 826 1.66 sborrill 827 1.82 jakllsch #if 0 828 1.82 jakllsch extern char cpu_brand_string[]; 829 1.67 christos const char *reason, *system_vendor, *system_product; 830 1.67 christos if (memcmp(cpu_brand_string, "QEMU", 4) == 0) 831 1.61 gson /* PR 45671, https://bugs.launchpad.net/qemu/+bug/897771 */ 832 1.67 christos reason = "QEMU"; 833 1.67 christos else if ((system_vendor = pmf_get_platform("system-vendor")) != NULL && 834 1.67 christos strcmp(system_vendor, "Xen") == 0 && 835 1.67 christos (system_product = pmf_get_platform("system-product")) != NULL && 836 1.67 christos strcmp(system_product, "HVM domU") == 0) 837 1.67 christos reason = "Xen"; 838 1.67 christos else 839 1.67 christos reason = NULL; 840 1.67 christos 841 1.67 christos if (reason) { 842 1.61 gson #ifdef DEBUG 843 1.67 christos printf("%s: forcing PCI mode 1 for %s\n", __func__, reason); 844 1.61 gson #endif 845 1.61 gson return (pci_mode); 846 1.61 gson } 847 1.82 jakllsch #endif 848 1.1 fvdl /* 849 1.1 fvdl * Strong check for standard compliant mode 1: 850 1.1 fvdl * 1. bit 31 ("enable") can be set 851 1.1 fvdl * 2. byte/word access does not affect register 852 1.1 fvdl */ 853 1.1 fvdl outl(PCI_MODE1_ADDRESS_REG, PCI_MODE1_ENABLE); 854 1.1 fvdl outb(PCI_MODE1_ADDRESS_REG + 3, 0); 855 1.1 fvdl outw(PCI_MODE1_ADDRESS_REG + 2, 0); 856 1.1 fvdl val = inl(PCI_MODE1_ADDRESS_REG); 857 1.1 fvdl if ((val & 0x80fffffc) != PCI_MODE1_ENABLE) { 858 1.1 fvdl #ifdef DEBUG 859 1.67 christos printf("%s: mode 1 enable failed (%x)\n", __func__, val); 860 1.1 fvdl #endif 861 1.81 jakllsch /* Try out mode 1 to see if we can find a host bridge. */ 862 1.81 jakllsch if (pci_mode_check() == 0) { 863 1.81 jakllsch #ifdef DEBUG 864 1.81 jakllsch printf("%s: mode 1 functional, using\n", __func__); 865 1.81 jakllsch #endif 866 1.81 jakllsch return (pci_mode); 867 1.81 jakllsch } 868 1.1 fvdl goto not1; 869 1.1 fvdl } 870 1.1 fvdl outl(PCI_MODE1_ADDRESS_REG, 0); 871 1.1 fvdl val = inl(PCI_MODE1_ADDRESS_REG); 872 1.1 fvdl if ((val & 0x80fffffc) != 0) 873 1.1 fvdl goto not1; 874 1.1 fvdl return (pci_mode); 875 1.1 fvdl not1: 876 1.1 fvdl outl(PCI_MODE1_ADDRESS_REG, sav); 877 1.1 fvdl 878 1.1 fvdl /* 879 1.1 fvdl * This mode 2 check is quite weak (and known to give false 880 1.1 fvdl * positives on some Compaq machines). 881 1.1 fvdl * However, this doesn't matter, because this is the 882 1.1 fvdl * last test, and simply no PCI devices will be found if 883 1.1 fvdl * this happens. 884 1.1 fvdl */ 885 1.1 fvdl outb(PCI_MODE2_ENABLE_REG, 0); 886 1.1 fvdl outb(PCI_MODE2_FORWARD_REG, 0); 887 1.1 fvdl if (inb(PCI_MODE2_ENABLE_REG) != 0 || 888 1.1 fvdl inb(PCI_MODE2_FORWARD_REG) != 0) 889 1.1 fvdl goto not2; 890 1.1 fvdl return (pci_mode = 2); 891 1.1 fvdl not2: 892 1.1 fvdl 893 1.1 fvdl return (pci_mode = 0); 894 1.1 fvdl } 895 1.1 fvdl 896 1.11 sekiya void 897 1.11 sekiya pci_device_foreach(pci_chipset_tag_t pc, int maxbus, 898 1.11 sekiya void (*func)(pci_chipset_tag_t, pcitag_t, void *), void *context) 899 1.11 sekiya { 900 1.11 sekiya pci_device_foreach_min(pc, 0, maxbus, func, context); 901 1.11 sekiya } 902 1.11 sekiya 903 1.11 sekiya void 904 1.11 sekiya pci_device_foreach_min(pci_chipset_tag_t pc, int minbus, int maxbus, 905 1.11 sekiya void (*func)(pci_chipset_tag_t, pcitag_t, void *), void *context) 906 1.11 sekiya { 907 1.11 sekiya const struct pci_quirkdata *qd; 908 1.11 sekiya int bus, device, function, maxdevs, nfuncs; 909 1.11 sekiya pcireg_t id, bhlcr; 910 1.11 sekiya pcitag_t tag; 911 1.11 sekiya 912 1.11 sekiya for (bus = minbus; bus <= maxbus; bus++) { 913 1.11 sekiya maxdevs = pci_bus_maxdevs(pc, bus); 914 1.11 sekiya for (device = 0; device < maxdevs; device++) { 915 1.11 sekiya tag = pci_make_tag(pc, bus, device, 0); 916 1.11 sekiya id = pci_conf_read(pc, tag, PCI_ID_REG); 917 1.11 sekiya 918 1.11 sekiya /* Invalid vendor ID value? */ 919 1.11 sekiya if (PCI_VENDOR(id) == PCI_VENDOR_INVALID) 920 1.11 sekiya continue; 921 1.11 sekiya /* XXX Not invalid, but we've done this ~forever. */ 922 1.11 sekiya if (PCI_VENDOR(id) == 0) 923 1.11 sekiya continue; 924 1.11 sekiya 925 1.11 sekiya qd = pci_lookup_quirkdata(PCI_VENDOR(id), 926 1.11 sekiya PCI_PRODUCT(id)); 927 1.11 sekiya 928 1.11 sekiya bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG); 929 1.11 sekiya if (PCI_HDRTYPE_MULTIFN(bhlcr) || 930 1.11 sekiya (qd != NULL && 931 1.55 jakllsch (qd->quirks & PCI_QUIRK_MULTIFUNCTION) != 0)) 932 1.11 sekiya nfuncs = 8; 933 1.11 sekiya else 934 1.11 sekiya nfuncs = 1; 935 1.11 sekiya 936 1.11 sekiya for (function = 0; function < nfuncs; function++) { 937 1.11 sekiya tag = pci_make_tag(pc, bus, device, function); 938 1.11 sekiya id = pci_conf_read(pc, tag, PCI_ID_REG); 939 1.11 sekiya 940 1.11 sekiya /* Invalid vendor ID value? */ 941 1.11 sekiya if (PCI_VENDOR(id) == PCI_VENDOR_INVALID) 942 1.11 sekiya continue; 943 1.11 sekiya /* 944 1.11 sekiya * XXX Not invalid, but we've done this 945 1.11 sekiya * ~forever. 946 1.11 sekiya */ 947 1.11 sekiya if (PCI_VENDOR(id) == 0) 948 1.11 sekiya continue; 949 1.11 sekiya (*func)(pc, tag, context); 950 1.11 sekiya } 951 1.11 sekiya } 952 1.11 sekiya } 953 1.11 sekiya } 954 1.11 sekiya 955 1.11 sekiya void 956 1.11 sekiya pci_bridge_foreach(pci_chipset_tag_t pc, int minbus, int maxbus, 957 1.11 sekiya void (*func)(pci_chipset_tag_t, pcitag_t, void *), void *ctx) 958 1.11 sekiya { 959 1.11 sekiya struct pci_bridge_hook_arg bridge_hook; 960 1.11 sekiya 961 1.11 sekiya bridge_hook.func = func; 962 1.55 jakllsch bridge_hook.arg = ctx; 963 1.11 sekiya 964 1.11 sekiya pci_device_foreach_min(pc, minbus, maxbus, pci_bridge_hook, 965 1.55 jakllsch &bridge_hook); 966 1.11 sekiya } 967 1.11 sekiya 968 1.11 sekiya static void 969 1.11 sekiya pci_bridge_hook(pci_chipset_tag_t pc, pcitag_t tag, void *ctx) 970 1.11 sekiya { 971 1.11 sekiya struct pci_bridge_hook_arg *bridge_hook = (void *)ctx; 972 1.11 sekiya pcireg_t reg; 973 1.11 sekiya 974 1.11 sekiya reg = pci_conf_read(pc, tag, PCI_CLASS_REG); 975 1.11 sekiya if (PCI_CLASS(reg) == PCI_CLASS_BRIDGE && 976 1.55 jakllsch (PCI_SUBCLASS(reg) == PCI_SUBCLASS_BRIDGE_PCI || 977 1.11 sekiya PCI_SUBCLASS(reg) == PCI_SUBCLASS_BRIDGE_CARDBUS)) { 978 1.11 sekiya (*bridge_hook->func)(pc, tag, bridge_hook->arg); 979 1.11 sekiya } 980 1.11 sekiya } 981 1.43 dyoung 982 1.43 dyoung static const void * 983 1.43 dyoung bit_to_function_pointer(const struct pci_overrides *ov, uint64_t bit) 984 1.43 dyoung { 985 1.43 dyoung switch (bit) { 986 1.43 dyoung case PCI_OVERRIDE_CONF_READ: 987 1.43 dyoung return ov->ov_conf_read; 988 1.43 dyoung case PCI_OVERRIDE_CONF_WRITE: 989 1.43 dyoung return ov->ov_conf_write; 990 1.43 dyoung case PCI_OVERRIDE_INTR_MAP: 991 1.43 dyoung return ov->ov_intr_map; 992 1.43 dyoung case PCI_OVERRIDE_INTR_STRING: 993 1.43 dyoung return ov->ov_intr_string; 994 1.43 dyoung case PCI_OVERRIDE_INTR_EVCNT: 995 1.43 dyoung return ov->ov_intr_evcnt; 996 1.43 dyoung case PCI_OVERRIDE_INTR_ESTABLISH: 997 1.43 dyoung return ov->ov_intr_establish; 998 1.43 dyoung case PCI_OVERRIDE_INTR_DISESTABLISH: 999 1.43 dyoung return ov->ov_intr_disestablish; 1000 1.43 dyoung case PCI_OVERRIDE_MAKE_TAG: 1001 1.43 dyoung return ov->ov_make_tag; 1002 1.43 dyoung case PCI_OVERRIDE_DECOMPOSE_TAG: 1003 1.43 dyoung return ov->ov_decompose_tag; 1004 1.43 dyoung default: 1005 1.43 dyoung return NULL; 1006 1.43 dyoung } 1007 1.43 dyoung } 1008 1.43 dyoung 1009 1.43 dyoung void 1010 1.43 dyoung pci_chipset_tag_destroy(pci_chipset_tag_t pc) 1011 1.43 dyoung { 1012 1.43 dyoung kmem_free(pc, sizeof(struct pci_chipset_tag)); 1013 1.43 dyoung } 1014 1.43 dyoung 1015 1.43 dyoung int 1016 1.43 dyoung pci_chipset_tag_create(pci_chipset_tag_t opc, const uint64_t present, 1017 1.43 dyoung const struct pci_overrides *ov, void *ctx, pci_chipset_tag_t *pcp) 1018 1.43 dyoung { 1019 1.43 dyoung uint64_t bit, bits, nbits; 1020 1.43 dyoung pci_chipset_tag_t pc; 1021 1.43 dyoung const void *fp; 1022 1.43 dyoung 1023 1.43 dyoung if (ov == NULL || present == 0) 1024 1.43 dyoung return EINVAL; 1025 1.43 dyoung 1026 1.43 dyoung pc = kmem_alloc(sizeof(struct pci_chipset_tag), KM_SLEEP); 1027 1.43 dyoung pc->pc_super = opc; 1028 1.43 dyoung 1029 1.43 dyoung for (bits = present; bits != 0; bits = nbits) { 1030 1.43 dyoung nbits = bits & (bits - 1); 1031 1.43 dyoung bit = nbits ^ bits; 1032 1.43 dyoung if ((fp = bit_to_function_pointer(ov, bit)) == NULL) { 1033 1.51 dyoung #ifdef DEBUG 1034 1.43 dyoung printf("%s: missing bit %" PRIx64 "\n", __func__, bit); 1035 1.51 dyoung #endif 1036 1.43 dyoung goto einval; 1037 1.43 dyoung } 1038 1.43 dyoung } 1039 1.43 dyoung 1040 1.43 dyoung pc->pc_ov = ov; 1041 1.43 dyoung pc->pc_present = present; 1042 1.43 dyoung pc->pc_ctx = ctx; 1043 1.43 dyoung 1044 1.43 dyoung *pcp = pc; 1045 1.43 dyoung 1046 1.43 dyoung return 0; 1047 1.43 dyoung einval: 1048 1.43 dyoung kmem_free(pc, sizeof(struct pci_chipset_tag)); 1049 1.43 dyoung return EINVAL; 1050 1.43 dyoung } 1051 1.52 dyoung 1052 1.52 dyoung static void 1053 1.52 dyoung x86_genfb_set_mapreg(void *opaque, int index, int r, int g, int b) 1054 1.52 dyoung { 1055 1.57 jakllsch outb(IO_VGA + VGA_DAC_ADDRW, index); 1056 1.57 jakllsch outb(IO_VGA + VGA_DAC_PALETTE, (uint8_t)r >> 2); 1057 1.57 jakllsch outb(IO_VGA + VGA_DAC_PALETTE, (uint8_t)g >> 2); 1058 1.57 jakllsch outb(IO_VGA + VGA_DAC_PALETTE, (uint8_t)b >> 2); 1059 1.52 dyoung } 1060 1.52 dyoung 1061 1.52 dyoung static bool 1062 1.52 dyoung x86_genfb_setmode(struct genfb_softc *sc, int newmode) 1063 1.52 dyoung { 1064 1.52 dyoung #if NGENFB > 0 1065 1.95 riastrad # if NACPICA > 0 && defined(VGA_POST) && !defined(XENPV) 1066 1.52 dyoung static int curmode = WSDISPLAYIO_MODE_EMUL; 1067 1.68 christos # endif 1068 1.52 dyoung 1069 1.52 dyoung switch (newmode) { 1070 1.52 dyoung case WSDISPLAYIO_MODE_EMUL: 1071 1.95 riastrad # if NACPICA > 0 && defined(VGA_POST) && !defined(XENPV) 1072 1.52 dyoung if (curmode != newmode) { 1073 1.52 dyoung if (vga_posth != NULL && acpi_md_vesa_modenum != 0) { 1074 1.52 dyoung vga_post_set_vbe(vga_posth, 1075 1.52 dyoung acpi_md_vesa_modenum); 1076 1.52 dyoung } 1077 1.52 dyoung } 1078 1.68 christos # endif 1079 1.52 dyoung break; 1080 1.52 dyoung } 1081 1.52 dyoung 1082 1.95 riastrad # if NACPICA > 0 && defined(VGA_POST) && !defined(XENPV) 1083 1.52 dyoung curmode = newmode; 1084 1.68 christos # endif 1085 1.52 dyoung #endif 1086 1.52 dyoung return true; 1087 1.52 dyoung } 1088 1.52 dyoung 1089 1.52 dyoung static bool 1090 1.52 dyoung x86_genfb_suspend(device_t dev, const pmf_qual_t *qual) 1091 1.52 dyoung { 1092 1.52 dyoung return true; 1093 1.52 dyoung } 1094 1.52 dyoung 1095 1.52 dyoung static bool 1096 1.52 dyoung x86_genfb_resume(device_t dev, const pmf_qual_t *qual) 1097 1.52 dyoung { 1098 1.52 dyoung #if NGENFB > 0 1099 1.52 dyoung struct pci_genfb_softc *psc = device_private(dev); 1100 1.52 dyoung 1101 1.95 riastrad #if NACPICA > 0 && defined(VGA_POST) && !defined(XENPV) 1102 1.52 dyoung if (vga_posth != NULL && acpi_md_vbios_reset == 2) { 1103 1.52 dyoung vga_post_call(vga_posth); 1104 1.52 dyoung if (acpi_md_vesa_modenum != 0) 1105 1.52 dyoung vga_post_set_vbe(vga_posth, acpi_md_vesa_modenum); 1106 1.52 dyoung } 1107 1.52 dyoung #endif 1108 1.52 dyoung genfb_restore_palette(&psc->sc_gen); 1109 1.52 dyoung #endif 1110 1.52 dyoung 1111 1.52 dyoung return true; 1112 1.52 dyoung } 1113 1.52 dyoung 1114 1.85 christos static void 1115 1.85 christos populate_fbinfo(device_t dev, prop_dictionary_t dict) 1116 1.85 christos { 1117 1.85 christos #if NWSDISPLAY > 0 && NGENFB > 0 1118 1.85 christos struct rasops_info *ri = &x86_genfb_console_screen.scr_ri; 1119 1.85 christos #endif 1120 1.97 bouyer const void *fbptr = NULL; 1121 1.85 christos struct btinfo_framebuffer fbinfo; 1122 1.85 christos 1123 1.97 bouyer 1124 1.97 bouyer #if NWSDISPLAY > 0 && NGENFB > 0 && defined(XEN) && defined(DOM0OPS) 1125 1.97 bouyer if ((vm_guest == VM_GUEST_XENPVH || vm_guest == VM_GUEST_XENPV) && 1126 1.97 bouyer xendomain_is_dom0()) 1127 1.97 bouyer fbptr = xen_genfb_getbtinfo(); 1128 1.97 bouyer #endif 1129 1.97 bouyer if (fbptr == NULL) 1130 1.97 bouyer fbptr = lookup_bootinfo(BTINFO_FRAMEBUFFER); 1131 1.97 bouyer 1132 1.85 christos if (fbptr == NULL) 1133 1.85 christos return; 1134 1.85 christos 1135 1.85 christos memcpy(&fbinfo, fbptr, sizeof(fbinfo)); 1136 1.85 christos 1137 1.85 christos if (fbinfo.physaddr != 0) { 1138 1.85 christos prop_dictionary_set_uint32(dict, "width", fbinfo.width); 1139 1.85 christos prop_dictionary_set_uint32(dict, "height", fbinfo.height); 1140 1.85 christos prop_dictionary_set_uint8(dict, "depth", fbinfo.depth); 1141 1.85 christos prop_dictionary_set_uint16(dict, "linebytes", fbinfo.stride); 1142 1.85 christos 1143 1.85 christos prop_dictionary_set_uint64(dict, "address", fbinfo.physaddr); 1144 1.85 christos #if NWSDISPLAY > 0 && NGENFB > 0 1145 1.85 christos if (ri->ri_bits != NULL) { 1146 1.85 christos prop_dictionary_set_uint64(dict, "virtual_address", 1147 1.85 christos ri->ri_hwbits != NULL ? 1148 1.85 christos (vaddr_t)ri->ri_hworigbits : 1149 1.85 christos (vaddr_t)ri->ri_origbits); 1150 1.85 christos } 1151 1.85 christos #endif 1152 1.85 christos } 1153 1.85 christos #if notyet 1154 1.100 riastrad prop_dictionary_set_bool(dict, "splash", 1155 1.85 christos (fbinfo.flags & BI_FB_SPLASH) != 0); 1156 1.85 christos #endif 1157 1.85 christos if (fbinfo.depth == 8) { 1158 1.85 christos gfb_cb.gcc_cookie = NULL; 1159 1.85 christos gfb_cb.gcc_set_mapreg = x86_genfb_set_mapreg; 1160 1.85 christos prop_dictionary_set_uint64(dict, "cmap_callback", 1161 1.85 christos (uint64_t)(uintptr_t)&gfb_cb); 1162 1.85 christos } 1163 1.85 christos if (fbinfo.physaddr != 0) { 1164 1.85 christos mode_cb.gmc_setmode = x86_genfb_setmode; 1165 1.85 christos prop_dictionary_set_uint64(dict, "mode_callback", 1166 1.85 christos (uint64_t)(uintptr_t)&mode_cb); 1167 1.85 christos } 1168 1.85 christos 1169 1.85 christos #if NWSDISPLAY > 0 && NGENFB > 0 1170 1.85 christos if (device_is_a(dev, "genfb")) { 1171 1.85 christos prop_dictionary_set_bool(dict, "enable_shadowfb", 1172 1.85 christos ri->ri_hwbits != NULL); 1173 1.85 christos 1174 1.85 christos x86_genfb_set_console_dev(dev); 1175 1.85 christos #ifdef DDB 1176 1.85 christos db_trap_callback = x86_genfb_ddb_trap_callback; 1177 1.85 christos #endif 1178 1.85 christos } 1179 1.85 christos #endif 1180 1.85 christos } 1181 1.85 christos 1182 1.52 dyoung device_t 1183 1.52 dyoung device_pci_register(device_t dev, void *aux) 1184 1.52 dyoung { 1185 1.80 nonaka device_t parent = device_parent(dev); 1186 1.52 dyoung 1187 1.52 dyoung device_pci_props_register(dev, aux); 1188 1.52 dyoung 1189 1.52 dyoung /* 1190 1.52 dyoung * Handle network interfaces here, the attachment information is 1191 1.52 dyoung * not available driver-independently later. 1192 1.52 dyoung * 1193 1.52 dyoung * For disks, there is nothing useful available at attach time. 1194 1.52 dyoung */ 1195 1.52 dyoung if (device_class(dev) == DV_IFNET) { 1196 1.52 dyoung struct btinfo_netif *bin = lookup_bootinfo(BTINFO_NETIF); 1197 1.52 dyoung if (bin == NULL) 1198 1.52 dyoung return NULL; 1199 1.52 dyoung 1200 1.52 dyoung /* 1201 1.52 dyoung * We don't check the driver name against the device name 1202 1.52 dyoung * passed by the boot ROM. The ROM should stay usable if 1203 1.52 dyoung * the driver becomes obsolete. The physical attachment 1204 1.52 dyoung * information (checked below) must be sufficient to 1205 1.55 jakllsch * identify the device. 1206 1.52 dyoung */ 1207 1.80 nonaka if (bin->bus == BI_BUS_PCI && device_is_a(parent, "pci")) { 1208 1.52 dyoung struct pci_attach_args *paa = aux; 1209 1.52 dyoung int b, d, f; 1210 1.52 dyoung 1211 1.52 dyoung /* 1212 1.52 dyoung * Calculate BIOS representation of: 1213 1.52 dyoung * 1214 1.52 dyoung * <bus,device,function> 1215 1.52 dyoung * 1216 1.52 dyoung * and compare. 1217 1.52 dyoung */ 1218 1.52 dyoung pci_decompose_tag(paa->pa_pc, paa->pa_tag, &b, &d, &f); 1219 1.52 dyoung if (bin->addr.tag == ((b << 8) | (d << 3) | f)) 1220 1.52 dyoung return dev; 1221 1.80 nonaka 1222 1.84 cherry #ifndef XENPV 1223 1.80 nonaka /* 1224 1.80 nonaka * efiboot reports parent ppb bus/device/function. 1225 1.80 nonaka */ 1226 1.80 nonaka device_t grand = device_parent(parent); 1227 1.80 nonaka if (efi_probe() && grand && device_is_a(grand, "ppb")) { 1228 1.80 nonaka struct ppb_softc *ppb_sc = device_private(grand); 1229 1.80 nonaka pci_decompose_tag(ppb_sc->sc_pc, ppb_sc->sc_tag, 1230 1.80 nonaka &b, &d, &f); 1231 1.80 nonaka if (bin->addr.tag == ((b << 8) | (d << 3) | f)) 1232 1.80 nonaka return dev; 1233 1.80 nonaka } 1234 1.80 nonaka #endif 1235 1.52 dyoung } 1236 1.52 dyoung } 1237 1.80 nonaka if (parent && device_is_a(parent, "pci") && 1238 1.86 nonaka x86_found_console == false) { 1239 1.52 dyoung struct pci_attach_args *pa = aux; 1240 1.52 dyoung 1241 1.52 dyoung if (PCI_CLASS(pa->pa_class) == PCI_CLASS_DISPLAY) { 1242 1.85 christos prop_dictionary_t dict = device_properties(dev); 1243 1.52 dyoung /* 1244 1.52 dyoung * framebuffer drivers other than genfb can work 1245 1.52 dyoung * without the address property 1246 1.52 dyoung */ 1247 1.85 christos populate_fbinfo(dev, dict); 1248 1.78 nonaka 1249 1.92 riastrad /* 1250 1.92 riastrad * If the bootloader requested console=pc and 1251 1.92 riastrad * specified a framebuffer, and if 1252 1.92 riastrad * x86_genfb_cnattach succeeded in setting it 1253 1.92 riastrad * up during consinit, then consinit will call 1254 1.92 riastrad * genfb_cnattach which makes genfb_is_console 1255 1.92 riastrad * return true. In this case, if it's the 1256 1.92 riastrad * first genfb we've seen, we will instruct the 1257 1.92 riastrad * genfb driver via the is_console property 1258 1.92 riastrad * that it has been selected as the console. 1259 1.92 riastrad * 1260 1.92 riastrad * If not all of that happened, then consinit 1261 1.92 riastrad * can't have selected a genfb console, so this 1262 1.92 riastrad * device is definitely not the console. 1263 1.92 riastrad * 1264 1.92 riastrad * XXX What happens if there's more than one 1265 1.92 riastrad * PCI display device, and the bootloader picks 1266 1.92 riastrad * the second one's framebuffer as the console 1267 1.92 riastrad * framebuffer address? Tough...but this has 1268 1.92 riastrad * probably never worked. 1269 1.92 riastrad */ 1270 1.100 riastrad #if NGENFB > 0 1271 1.92 riastrad prop_dictionary_set_bool(dict, "is_console", 1272 1.92 riastrad genfb_is_console()); 1273 1.93 msaitoh #else 1274 1.93 msaitoh prop_dictionary_set_bool(dict, "is_console", 1275 1.93 msaitoh true); 1276 1.93 msaitoh #endif 1277 1.60 macallan 1278 1.52 dyoung prop_dictionary_set_bool(dict, "clear-screen", false); 1279 1.52 dyoung #if NWSDISPLAY > 0 && NGENFB > 0 1280 1.52 dyoung prop_dictionary_set_uint16(dict, "cursor-row", 1281 1.52 dyoung x86_genfb_console_screen.scr_ri.ri_crow); 1282 1.52 dyoung #endif 1283 1.52 dyoung #if notyet 1284 1.52 dyoung prop_dictionary_set_bool(dict, "splash", 1285 1.85 christos (fbinfo->flags & BI_FB_SPLASH) != 0); 1286 1.52 dyoung #endif 1287 1.52 dyoung pmf_cb.gpc_suspend = x86_genfb_suspend; 1288 1.52 dyoung pmf_cb.gpc_resume = x86_genfb_resume; 1289 1.52 dyoung prop_dictionary_set_uint64(dict, 1290 1.52 dyoung "pmf_callback", (uint64_t)(uintptr_t)&pmf_cb); 1291 1.52 dyoung #ifdef VGA_POST 1292 1.52 dyoung vga_posth = vga_post_init(pa->pa_bus, pa->pa_device, 1293 1.52 dyoung pa->pa_function); 1294 1.52 dyoung #endif 1295 1.86 nonaka x86_found_console = true; 1296 1.52 dyoung return NULL; 1297 1.52 dyoung } 1298 1.52 dyoung } 1299 1.52 dyoung return NULL; 1300 1.52 dyoung } 1301 1.58 soren 1302 1.64 msaitoh #ifndef PUC_CNBUS 1303 1.64 msaitoh #define PUC_CNBUS 0 1304 1.64 msaitoh #endif 1305 1.64 msaitoh 1306 1.58 soren #if NCOM > 0 1307 1.58 soren int 1308 1.64 msaitoh cpu_puc_cnprobe(struct consdev *cn, struct pci_attach_args *pa) 1309 1.58 soren { 1310 1.58 soren pci_mode_detect(); 1311 1.58 soren pa->pa_iot = x86_bus_space_io; 1312 1.64 msaitoh pa->pa_memt = x86_bus_space_mem; 1313 1.58 soren pa->pa_pc = 0; 1314 1.64 msaitoh pa->pa_tag = pci_make_tag(0, PUC_CNBUS, pci_bus_maxdevs(NULL, 0) - 1, 1315 1.64 msaitoh 0); 1316 1.64 msaitoh 1317 1.58 soren return 0; 1318 1.58 soren } 1319 1.58 soren #endif 1320