Home | History | Annotate | Line # | Download | only in linux
linux_pci.c revision 1.24
      1 /*	$NetBSD: linux_pci.c,v 1.24 2022/09/20 23:01:42 mrg Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2013 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Taylor R. Campbell.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #ifdef _KERNEL_OPT
     33 #include "acpica.h"
     34 #include "opt_pci.h"
     35 #endif
     36 
     37 #include <sys/cdefs.h>
     38 __KERNEL_RCSID(0, "$NetBSD: linux_pci.c,v 1.24 2022/09/20 23:01:42 mrg Exp $");
     39 
     40 #if NACPICA > 0
     41 #include <dev/acpi/acpivar.h>
     42 #include <dev/acpi/acpi_pci.h>
     43 #endif
     44 
     45 #include <linux/pci.h>
     46 
     47 #include <drm/drm_agp_netbsd.h>
     48 
     49 device_t
     50 pci_dev_dev(struct pci_dev *pdev)
     51 {
     52 
     53 	return pdev->pd_dev;
     54 }
     55 
     56 void
     57 pci_set_drvdata(struct pci_dev *pdev, void *drvdata)
     58 {
     59 	pdev->pd_drvdata = drvdata;
     60 }
     61 
     62 void *
     63 pci_get_drvdata(struct pci_dev *pdev)
     64 {
     65 	return pdev->pd_drvdata;
     66 }
     67 
     68 const char *
     69 pci_name(struct pci_dev *pdev)
     70 {
     71 
     72 	/* XXX not sure this has the right format */
     73 	return device_xname(pci_dev_dev(pdev));
     74 }
     75 
     76 void
     77 linux_pci_dev_init(struct pci_dev *pdev, device_t dev, device_t parent,
     78     const struct pci_attach_args *pa, int kludges)
     79 {
     80 	const uint32_t subsystem_id = pci_conf_read(pa->pa_pc, pa->pa_tag,
     81 	    PCI_SUBSYS_ID_REG);
     82 	unsigned i;
     83 
     84 	memset(pdev, 0, sizeof(*pdev)); /* paranoia */
     85 
     86 	pdev->pd_pa = *pa;
     87 	pdev->pd_kludges = kludges;
     88 	pdev->pd_rom_vaddr = NULL;
     89 	pdev->pd_dev = dev;
     90 #if (NACPICA > 0)
     91 	const int seg = pci_get_segment(pa->pa_pc);
     92 	pdev->pd_ad = acpi_pcidev_find(seg, pa->pa_bus,
     93 	    pa->pa_device, pa->pa_function);
     94 #else
     95 	pdev->pd_ad = NULL;
     96 #endif
     97 	pdev->pd_saved_state = NULL;
     98 	pdev->pd_intr_handles = NULL;
     99 	pdev->pd_drvdata = NULL;
    100 	pdev->bus = kmem_zalloc(sizeof(*pdev->bus), KM_NOSLEEP);
    101 	pdev->bus->pb_pc = pa->pa_pc;
    102 	pdev->bus->pb_dev = parent;
    103 	pdev->bus->number = pa->pa_bus;
    104 	pdev->devfn = PCI_DEVFN(pa->pa_device, pa->pa_function);
    105 	pdev->vendor = PCI_VENDOR(pa->pa_id);
    106 	pdev->device = PCI_PRODUCT(pa->pa_id);
    107 	pdev->subsystem_vendor = PCI_SUBSYS_VENDOR(subsystem_id);
    108 	pdev->subsystem_device = PCI_SUBSYS_ID(subsystem_id);
    109 	pdev->revision = PCI_REVISION(pa->pa_class);
    110 	pdev->class = __SHIFTOUT(pa->pa_class, 0xffffff00UL); /* ? */
    111 
    112 	CTASSERT(__arraycount(pdev->pd_resources) == PCI_NUM_RESOURCES);
    113 	for (i = 0; i < PCI_NUM_RESOURCES; i++) {
    114 		const int reg = PCI_BAR(i);
    115 
    116 		pdev->pd_resources[i].type = pci_mapreg_type(pa->pa_pc,
    117 		    pa->pa_tag, reg);
    118 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, reg,
    119 			pdev->pd_resources[i].type,
    120 			&pdev->pd_resources[i].addr,
    121 			&pdev->pd_resources[i].size,
    122 			&pdev->pd_resources[i].flags)) {
    123 			pdev->pd_resources[i].addr = 0;
    124 			pdev->pd_resources[i].size = 0;
    125 			pdev->pd_resources[i].flags = 0;
    126 		}
    127 		pdev->pd_resources[i].kva = NULL;
    128 		pdev->pd_resources[i].mapped = false;
    129 	}
    130 }
    131 
    132 int
    133 pci_find_capability(struct pci_dev *pdev, int cap)
    134 {
    135 
    136 	return pci_get_capability(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, cap,
    137 	    NULL, NULL);
    138 }
    139 
    140 int
    141 pci_read_config_dword(struct pci_dev *pdev, int reg, uint32_t *valuep)
    142 {
    143 
    144 	KASSERT(!ISSET(reg, 3));
    145 	*valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg);
    146 	return 0;
    147 }
    148 
    149 int
    150 pci_read_config_word(struct pci_dev *pdev, int reg, uint16_t *valuep)
    151 {
    152 
    153 	KASSERT(!ISSET(reg, 1));
    154 	*valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
    155 	    (reg &~ 2)) >> (8 * (reg & 2));
    156 	return 0;
    157 }
    158 
    159 int
    160 pci_read_config_byte(struct pci_dev *pdev, int reg, uint8_t *valuep)
    161 {
    162 
    163 	*valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
    164 	    (reg &~ 3)) >> (8 * (reg & 3));
    165 	return 0;
    166 }
    167 
    168 int
    169 pci_write_config_dword(struct pci_dev *pdev, int reg, uint32_t value)
    170 {
    171 
    172 	KASSERT(!ISSET(reg, 3));
    173 	pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg, value);
    174 	return 0;
    175 }
    176 
    177 int
    178 pci_bus_read_config_dword(struct pci_bus *bus, unsigned devfn, int reg,
    179     uint32_t *valuep)
    180 {
    181 	pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
    182 	    PCI_FUNC(devfn));
    183 
    184 	KASSERT(!ISSET(reg, 1));
    185 	*valuep = pci_conf_read(bus->pb_pc, tag, reg & ~3) >> (8 * (reg & 3));
    186 	return 0;
    187 }
    188 
    189 int
    190 pci_bus_read_config_word(struct pci_bus *bus, unsigned devfn, int reg,
    191     uint16_t *valuep)
    192 {
    193 	pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
    194 	    PCI_FUNC(devfn));
    195 
    196 	KASSERT(!ISSET(reg, 1));
    197 	*valuep = pci_conf_read(bus->pb_pc, tag, reg &~ 2) >> (8 * (reg & 2));
    198 	return 0;
    199 }
    200 
    201 int
    202 pci_bus_read_config_byte(struct pci_bus *bus, unsigned devfn, int reg,
    203     uint8_t *valuep)
    204 {
    205 	pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
    206 	    PCI_FUNC(devfn));
    207 
    208 	*valuep = pci_conf_read(bus->pb_pc, tag, reg &~ 3) >> (8 * (reg & 3));
    209 	return 0;
    210 }
    211 
    212 int
    213 pci_bus_write_config_dword(struct pci_bus *bus, unsigned devfn, int reg,
    214     uint32_t value)
    215 {
    216 	pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
    217 	    PCI_FUNC(devfn));
    218 
    219 	KASSERT(!ISSET(reg, 3));
    220 	pci_conf_write(bus->pb_pc, tag, reg, value);
    221 	return 0;
    222 }
    223 
    224 static void
    225 pci_rmw_config(pci_chipset_tag_t pc, pcitag_t tag, int reg, unsigned int bytes,
    226     uint32_t value)
    227 {
    228 	const uint32_t mask = ~((~0UL) << (8 * bytes));
    229 	const int reg32 = (reg &~ 3);
    230 	const unsigned int shift = (8 * (reg & 3));
    231 	uint32_t value32;
    232 
    233 	KASSERT(bytes <= 4);
    234 	KASSERT(!ISSET(value, ~mask));
    235 	value32 = pci_conf_read(pc, tag, reg32);
    236 	value32 &=~ (mask << shift);
    237 	value32 |= (value << shift);
    238 	pci_conf_write(pc, tag, reg32, value32);
    239 }
    240 
    241 int
    242 pci_write_config_word(struct pci_dev *pdev, int reg, uint16_t value)
    243 {
    244 
    245 	KASSERT(!ISSET(reg, 1));
    246 	pci_rmw_config(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg, 2, value);
    247 	return 0;
    248 }
    249 
    250 int
    251 pci_write_config_byte(struct pci_dev *pdev, int reg, uint8_t value)
    252 {
    253 
    254 	pci_rmw_config(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg, 1, value);
    255 	return 0;
    256 }
    257 
    258 int
    259 pci_bus_write_config_word(struct pci_bus *bus, unsigned devfn, int reg,
    260     uint16_t value)
    261 {
    262 	pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
    263 	    PCI_FUNC(devfn));
    264 
    265 	KASSERT(!ISSET(reg, 1));
    266 	pci_rmw_config(bus->pb_pc, tag, reg, 2, value);
    267 	return 0;
    268 }
    269 
    270 int
    271 pci_bus_write_config_byte(struct pci_bus *bus, unsigned devfn, int reg,
    272     uint8_t value)
    273 {
    274 	pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
    275 	    PCI_FUNC(devfn));
    276 
    277 	pci_rmw_config(bus->pb_pc, tag, reg, 1, value);
    278 	return 0;
    279 }
    280 
    281 int
    282 pci_enable_msi(struct pci_dev *pdev)
    283 {
    284 	const struct pci_attach_args *const pa = &pdev->pd_pa;
    285 
    286 	if (pci_msi_alloc_exact(pa, &pdev->pd_intr_handles, 1))
    287 		return -EINVAL;
    288 
    289 	pdev->msi_enabled = 1;
    290 	return 0;
    291 }
    292 
    293 void
    294 pci_disable_msi(struct pci_dev *pdev __unused)
    295 {
    296 	const struct pci_attach_args *const pa = &pdev->pd_pa;
    297 
    298 	if (pdev->pd_intr_handles != NULL) {
    299 		pci_intr_release(pa->pa_pc, pdev->pd_intr_handles, 1);
    300 		pdev->pd_intr_handles = NULL;
    301 	}
    302 	pdev->msi_enabled = 0;
    303 }
    304 
    305 void
    306 pci_set_master(struct pci_dev *pdev)
    307 {
    308 	pcireg_t csr;
    309 
    310 	csr = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
    311 	    PCI_COMMAND_STATUS_REG);
    312 	csr |= PCI_COMMAND_MASTER_ENABLE;
    313 	pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
    314 	    PCI_COMMAND_STATUS_REG, csr);
    315 }
    316 
    317 void
    318 pci_clear_master(struct pci_dev *pdev)
    319 {
    320 	pcireg_t csr;
    321 
    322 	csr = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
    323 	    PCI_COMMAND_STATUS_REG);
    324 	csr &= ~(pcireg_t)PCI_COMMAND_MASTER_ENABLE;
    325 	pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
    326 	    PCI_COMMAND_STATUS_REG, csr);
    327 }
    328 
    329 bus_addr_t
    330 pcibios_align_resource(void *p, const struct resource *resource,
    331     bus_addr_t addr, bus_size_t size)
    332 {
    333 	panic("pcibios_align_resource has accessed unaligned neurons!");
    334 }
    335 
    336 int
    337 pci_bus_alloc_resource(struct pci_bus *bus, struct resource *resource,
    338     bus_size_t size, bus_size_t align, bus_addr_t start, int type __unused,
    339     bus_addr_t (*align_fn)(void *, const struct resource *, bus_addr_t,
    340 	bus_size_t) __unused,
    341     struct pci_dev *pdev)
    342 {
    343 	const struct pci_attach_args *const pa = &pdev->pd_pa;
    344 	bus_space_tag_t bst;
    345 	int error;
    346 
    347 	switch (resource->flags) {
    348 	case IORESOURCE_MEM:
    349 		bst = pa->pa_memt;
    350 		break;
    351 
    352 	case IORESOURCE_IO:
    353 		bst = pa->pa_iot;
    354 		break;
    355 
    356 	default:
    357 		panic("I don't know what kind of resource you want!");
    358 	}
    359 
    360 	resource->r_bst = bst;
    361 	error = bus_space_alloc(bst, start, __type_max(bus_addr_t),
    362 	    size, align, 0, 0, &resource->start, &resource->r_bsh);
    363 	if (error)
    364 		return error;
    365 
    366 	resource->end = start + (size - 1);
    367 	return 0;
    368 }
    369 
    370 /*
    371  * XXX Mega-kludgerific!  pci_get_bus_and_slot and pci_get_class are
    372  * defined only for their single purposes in i915drm, in
    373  * i915_get_bridge_dev and intel_detect_pch.  We can't define them more
    374  * generally without adapting pci_find_device (and pci_enumerate_bus
    375  * internally) to pass a cookie through.
    376  */
    377 
    378 static int
    379 pci_kludgey_match_bus0_dev0_func0(const struct pci_attach_args *pa)
    380 {
    381 
    382 	/* XXX domain */
    383 	if (pa->pa_bus != 0)
    384 		return 0;
    385 	if (pa->pa_device != 0)
    386 		return 0;
    387 	if (pa->pa_function != 0)
    388 		return 0;
    389 
    390 	return 1;
    391 }
    392 
    393 struct pci_dev *
    394 pci_get_domain_bus_and_slot(int domain, int bus, int slot)
    395 {
    396 	struct pci_attach_args pa;
    397 
    398 	KASSERT(domain == 0);
    399 	KASSERT(bus == 0);
    400 	KASSERT(slot == PCI_DEVFN(0, 0));
    401 
    402 	if (!pci_find_device(&pa, &pci_kludgey_match_bus0_dev0_func0))
    403 		return NULL;
    404 
    405 	struct pci_dev *const pdev = kmem_zalloc(sizeof(*pdev), KM_SLEEP);
    406 	linux_pci_dev_init(pdev, NULL, NULL, &pa, NBPCI_KLUDGE_GET_MUMBLE);
    407 
    408 	return pdev;
    409 }
    410 
    411 static int
    412 pci_kludgey_match_isa_bridge(const struct pci_attach_args *pa)
    413 {
    414 
    415 	if (PCI_CLASS(pa->pa_class) != PCI_CLASS_BRIDGE)
    416 		return 0;
    417 	if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_BRIDGE_ISA)
    418 		return 0;
    419 
    420 	return 1;
    421 }
    422 
    423 void
    424 pci_dev_put(struct pci_dev *pdev)
    425 {
    426 
    427 	if (pdev == NULL)
    428 		return;
    429 
    430 	KASSERT(ISSET(pdev->pd_kludges, NBPCI_KLUDGE_GET_MUMBLE));
    431 	kmem_free(pdev->bus, sizeof(*pdev->bus));
    432 	kmem_free(pdev, sizeof(*pdev));
    433 }
    434 
    435 struct pci_dev *		/* XXX i915 kludge */
    436 pci_get_class(uint32_t class_subclass_shifted __unused, struct pci_dev *from)
    437 {
    438 	struct pci_attach_args pa;
    439 
    440 	KASSERT(class_subclass_shifted == (PCI_CLASS_BRIDGE_ISA << 8));
    441 
    442 	if (from != NULL) {
    443 		pci_dev_put(from);
    444 		return NULL;
    445 	}
    446 
    447 	if (!pci_find_device(&pa, &pci_kludgey_match_isa_bridge))
    448 		return NULL;
    449 
    450 	struct pci_dev *const pdev = kmem_zalloc(sizeof(*pdev), KM_SLEEP);
    451 	linux_pci_dev_init(pdev, NULL, NULL, &pa, NBPCI_KLUDGE_GET_MUMBLE);
    452 
    453 	return pdev;
    454 }
    455 
    456 int
    457 pci_dev_present(const struct pci_device_id *ids)
    458 {
    459 
    460 	/* XXX implement me -- pci_find_device doesn't pass a cookie */
    461 	return 0;
    462 }
    463 
    464 void
    465 pci_unmap_rom(struct pci_dev *pdev, void __pci_rom_iomem *vaddr __unused)
    466 {
    467 
    468 	/* XXX Disable the ROM address decoder.  */
    469 	KASSERT(ISSET(pdev->pd_kludges, NBPCI_KLUDGE_MAP_ROM));
    470 	KASSERT(vaddr == pdev->pd_rom_vaddr);
    471 	bus_space_unmap(pdev->pd_rom_bst, pdev->pd_rom_bsh, pdev->pd_rom_size);
    472 	pdev->pd_kludges &= ~NBPCI_KLUDGE_MAP_ROM;
    473 	pdev->pd_rom_vaddr = NULL;
    474 }
    475 
    476 /* XXX Whattakludge!  Should move this in sys/arch/.  */
    477 static int
    478 pci_map_rom_md(struct pci_dev *pdev)
    479 {
    480 #if defined(__i386__) || defined(__x86_64__) || defined(__ia64__)
    481 	const bus_addr_t rom_base = 0xc0000;
    482 	const bus_size_t rom_size = 0x20000;
    483 	bus_space_handle_t rom_bsh;
    484 	int error;
    485 
    486 	if (PCI_CLASS(pdev->pd_pa.pa_class) != PCI_CLASS_DISPLAY)
    487 		return ENXIO;
    488 	if (PCI_SUBCLASS(pdev->pd_pa.pa_class) != PCI_SUBCLASS_DISPLAY_VGA)
    489 		return ENXIO;
    490 	/* XXX Check whether this is the primary VGA card?  */
    491 	error = bus_space_map(pdev->pd_pa.pa_memt, rom_base, rom_size,
    492 	    (BUS_SPACE_MAP_LINEAR | BUS_SPACE_MAP_PREFETCHABLE), &rom_bsh);
    493 	if (error)
    494 		return ENXIO;
    495 
    496 	pdev->pd_rom_bst = pdev->pd_pa.pa_memt;
    497 	pdev->pd_rom_bsh = rom_bsh;
    498 	pdev->pd_rom_size = rom_size;
    499 	pdev->pd_kludges |= NBPCI_KLUDGE_MAP_ROM;
    500 
    501 	return 0;
    502 #else
    503 	return ENXIO;
    504 #endif
    505 }
    506 
    507 void __pci_rom_iomem *
    508 pci_map_rom(struct pci_dev *pdev, size_t *sizep)
    509 {
    510 
    511 	KASSERT(!ISSET(pdev->pd_kludges, NBPCI_KLUDGE_MAP_ROM));
    512 
    513 	if (pci_mapreg_map(&pdev->pd_pa, PCI_MAPREG_ROM, PCI_MAPREG_TYPE_ROM,
    514 		(BUS_SPACE_MAP_PREFETCHABLE | BUS_SPACE_MAP_LINEAR),
    515 		&pdev->pd_rom_bst, &pdev->pd_rom_bsh, NULL, &pdev->pd_rom_size)
    516 	    != 0)
    517 		goto fail_mi;
    518 	pdev->pd_kludges |= NBPCI_KLUDGE_MAP_ROM;
    519 
    520 	/* XXX This type is obviously wrong in general...  */
    521 	if (pci_find_rom(&pdev->pd_pa, pdev->pd_rom_bst, pdev->pd_rom_bsh,
    522 		pdev->pd_rom_size, PCI_ROM_CODE_TYPE_X86,
    523 		&pdev->pd_rom_found_bsh, &pdev->pd_rom_found_size)) {
    524 		pci_unmap_rom(pdev, NULL);
    525 		goto fail_mi;
    526 	}
    527 	goto success;
    528 
    529 fail_mi:
    530 	if (pci_map_rom_md(pdev) != 0)
    531 		goto fail_md;
    532 
    533 	/* XXX This type is obviously wrong in general...  */
    534 	if (pci_find_rom(&pdev->pd_pa, pdev->pd_rom_bst, pdev->pd_rom_bsh,
    535 		pdev->pd_rom_size, PCI_ROM_CODE_TYPE_X86,
    536 		&pdev->pd_rom_found_bsh, &pdev->pd_rom_found_size)) {
    537 		pci_unmap_rom(pdev, NULL);
    538 		goto fail_md;
    539 	}
    540 
    541 success:
    542 	KASSERT(pdev->pd_rom_found_size <= SIZE_T_MAX);
    543 	*sizep = pdev->pd_rom_found_size;
    544 	pdev->pd_rom_vaddr = bus_space_vaddr(pdev->pd_rom_bst,
    545 	    pdev->pd_rom_found_bsh);
    546 	return pdev->pd_rom_vaddr;
    547 
    548 fail_md:
    549 	return NULL;
    550 }
    551 
    552 void __pci_rom_iomem *
    553 pci_platform_rom(struct pci_dev *pdev __unused, size_t *sizep)
    554 {
    555 
    556 	*sizep = 0;
    557 	return NULL;
    558 }
    559 
    560 int
    561 pci_enable_rom(struct pci_dev *pdev)
    562 {
    563 	const pci_chipset_tag_t pc = pdev->pd_pa.pa_pc;
    564 	const pcitag_t tag = pdev->pd_pa.pa_tag;
    565 	pcireg_t addr;
    566 	int s;
    567 
    568 	/* XXX Don't do anything if the ROM isn't there.  */
    569 
    570 	s = splhigh();
    571 	addr = pci_conf_read(pc, tag, PCI_MAPREG_ROM);
    572 	addr |= PCI_MAPREG_ROM_ENABLE;
    573 	pci_conf_write(pc, tag, PCI_MAPREG_ROM, addr);
    574 	splx(s);
    575 
    576 	return 0;
    577 }
    578 
    579 void
    580 pci_disable_rom(struct pci_dev *pdev)
    581 {
    582 	const pci_chipset_tag_t pc = pdev->pd_pa.pa_pc;
    583 	const pcitag_t tag = pdev->pd_pa.pa_tag;
    584 	pcireg_t addr;
    585 	int s;
    586 
    587 	s = splhigh();
    588 	addr = pci_conf_read(pc, tag, PCI_MAPREG_ROM);
    589 	addr &= ~(pcireg_t)PCI_MAPREG_ROM_ENABLE;
    590 	pci_conf_write(pc, tag, PCI_MAPREG_ROM, addr);
    591 	splx(s);
    592 }
    593 
    594 bus_addr_t
    595 pci_resource_start(struct pci_dev *pdev, unsigned i)
    596 {
    597 
    598 	KASSERT(i < PCI_NUM_RESOURCES);
    599 	return pdev->pd_resources[i].addr;
    600 }
    601 
    602 bus_size_t
    603 pci_resource_len(struct pci_dev *pdev, unsigned i)
    604 {
    605 
    606 	KASSERT(i < PCI_NUM_RESOURCES);
    607 	return pdev->pd_resources[i].size;
    608 }
    609 
    610 bus_addr_t
    611 pci_resource_end(struct pci_dev *pdev, unsigned i)
    612 {
    613 
    614 	return pci_resource_start(pdev, i) + (pci_resource_len(pdev, i) - 1);
    615 }
    616 
    617 int
    618 pci_resource_flags(struct pci_dev *pdev, unsigned i)
    619 {
    620 
    621 	KASSERT(i < PCI_NUM_RESOURCES);
    622 	return pdev->pd_resources[i].flags;
    623 }
    624 
    625 void __pci_iomem *
    626 pci_iomap(struct pci_dev *pdev, unsigned i, bus_size_t size)
    627 {
    628 	int error;
    629 
    630 	KASSERT(i < PCI_NUM_RESOURCES);
    631 	KASSERT(pdev->pd_resources[i].kva == NULL);
    632 
    633 	if (PCI_MAPREG_TYPE(pdev->pd_resources[i].type) != PCI_MAPREG_TYPE_MEM)
    634 		return NULL;
    635 	if (pdev->pd_resources[i].size < size)
    636 		return NULL;
    637 	error = bus_space_map(pdev->pd_pa.pa_memt, pdev->pd_resources[i].addr,
    638 	    size, BUS_SPACE_MAP_LINEAR | pdev->pd_resources[i].flags,
    639 	    &pdev->pd_resources[i].bsh);
    640 	if (error)
    641 		return NULL;
    642 	pdev->pd_resources[i].bst = pdev->pd_pa.pa_memt;
    643 	pdev->pd_resources[i].kva = bus_space_vaddr(pdev->pd_resources[i].bst,
    644 	    pdev->pd_resources[i].bsh);
    645 	pdev->pd_resources[i].mapped = true;
    646 
    647 	return pdev->pd_resources[i].kva;
    648 }
    649 
    650 void
    651 pci_iounmap(struct pci_dev *pdev, void __pci_iomem *kva)
    652 {
    653 	unsigned i;
    654 
    655 	CTASSERT(__arraycount(pdev->pd_resources) == PCI_NUM_RESOURCES);
    656 	for (i = 0; i < PCI_NUM_RESOURCES; i++) {
    657 		if (pdev->pd_resources[i].kva == kva)
    658 			break;
    659 	}
    660 	KASSERT(i < PCI_NUM_RESOURCES);
    661 
    662 	pdev->pd_resources[i].kva = NULL;
    663 	bus_space_unmap(pdev->pd_resources[i].bst, pdev->pd_resources[i].bsh,
    664 	    pdev->pd_resources[i].size);
    665 }
    666 
    667 void
    668 pci_save_state(struct pci_dev *pdev)
    669 {
    670 
    671 	KASSERT(pdev->pd_saved_state == NULL);
    672 	pdev->pd_saved_state = kmem_alloc(sizeof(*pdev->pd_saved_state),
    673 	    KM_SLEEP);
    674 	pci_conf_capture(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
    675 	    pdev->pd_saved_state);
    676 }
    677 
    678 void
    679 pci_restore_state(struct pci_dev *pdev)
    680 {
    681 
    682 	KASSERT(pdev->pd_saved_state != NULL);
    683 	pci_conf_restore(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
    684 	    pdev->pd_saved_state);
    685 	kmem_free(pdev->pd_saved_state, sizeof(*pdev->pd_saved_state));
    686 	pdev->pd_saved_state = NULL;
    687 }
    688 
    689 bool
    690 pci_is_pcie(struct pci_dev *pdev)
    691 {
    692 
    693 	return (pci_find_capability(pdev, PCI_CAP_PCIEXPRESS) != 0);
    694 }
    695 
    696 bool
    697 pci_dma_supported(struct pci_dev *pdev, uintmax_t mask)
    698 {
    699 
    700 	/* XXX Cop-out.  */
    701 	if (mask > DMA_BIT_MASK(32))
    702 		return pci_dma64_available(&pdev->pd_pa);
    703 	else
    704 		return true;
    705 }
    706 
    707 bool
    708 pci_is_thunderbolt_attached(struct pci_dev *pdev)
    709 {
    710 
    711 	/* XXX Cop-out.  */
    712 	return false;
    713 }
    714 
    715 bool
    716 pci_is_root_bus(struct pci_bus *bus)
    717 {
    718 
    719 	return bus->number == 0;
    720 }
    721 
    722 int
    723 pci_domain_nr(struct pci_bus *bus)
    724 {
    725 
    726 	return pci_get_segment(bus->pb_pc);
    727 }
    728 
    729 /*
    730  * We explicitly rename pci_enable/disable_device so that you have to
    731  * review each use of them, since NetBSD's PCI API does _not_ respect
    732  * our local enablecnt here, but there are different parts of NetBSD
    733  * that automatically enable/disable like PMF, so you have to decide
    734  * for each one whether to call it or not.
    735  */
    736 
    737 int
    738 linux_pci_enable_device(struct pci_dev *pdev)
    739 {
    740 	const struct pci_attach_args *pa = &pdev->pd_pa;
    741 	pcireg_t csr;
    742 	int s;
    743 
    744 	if (pdev->pd_enablecnt++)
    745 		return 0;
    746 
    747 	s = splhigh();
    748 	csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
    749 	/* If someone else (firmware) already enabled it, credit them.  */
    750 	if (csr & (PCI_COMMAND_IO_ENABLE|PCI_COMMAND_MEM_ENABLE))
    751 		pdev->pd_enablecnt++;
    752 	csr |= PCI_COMMAND_IO_ENABLE;
    753 	csr |= PCI_COMMAND_MEM_ENABLE;
    754 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, csr);
    755 	splx(s);
    756 
    757 	return 0;
    758 }
    759 
    760 void
    761 linux_pci_disable_device(struct pci_dev *pdev)
    762 {
    763 	const struct pci_attach_args *pa = &pdev->pd_pa;
    764 	pcireg_t csr;
    765 	int s;
    766 
    767 	if (--pdev->pd_enablecnt)
    768 		return;
    769 
    770 	s = splhigh();
    771 	csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
    772 	csr &= ~PCI_COMMAND_IO_ENABLE;
    773 	csr &= ~PCI_COMMAND_MEM_ENABLE;
    774 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, csr);
    775 	splx(s);
    776 }
    777 
    778 void
    779 linux_pci_dev_destroy(struct pci_dev *pdev)
    780 {
    781 	unsigned i;
    782 
    783 	if (pdev->bus != NULL) {
    784 		kmem_free(pdev->bus, sizeof(*pdev->bus));
    785 		pdev->bus = NULL;
    786 	}
    787 	if (ISSET(pdev->pd_kludges, NBPCI_KLUDGE_MAP_ROM)) {
    788 		pci_unmap_rom(pdev, pdev->pd_rom_vaddr);
    789 		pdev->pd_rom_vaddr = 0;
    790 	}
    791 	for (i = 0; i < __arraycount(pdev->pd_resources); i++) {
    792 		if (!pdev->pd_resources[i].mapped)
    793 			continue;
    794 		bus_space_unmap(pdev->pd_resources[i].bst,
    795 		    pdev->pd_resources[i].bsh, pdev->pd_resources[i].size);
    796 	}
    797 
    798 	/* There is no way these should be still in use.  */
    799 	KASSERT(pdev->pd_saved_state == NULL);
    800 	KASSERT(pdev->pd_intr_handles == NULL);
    801 }
    802 
    803 enum pci_bus_speed
    804 pcie_get_speed_cap(struct pci_dev *dev)
    805 {
    806 	pci_chipset_tag_t pc = dev->pd_pa.pa_pc;
    807 	pcitag_t tag = dev->pd_pa.pa_tag;
    808 	pcireg_t lcap, lcap2, xcap;
    809 	int off;
    810 
    811 	/* Must have capabilities. */
    812 	if (pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS, &off, NULL) == 0)
    813 		return PCI_SPEED_UNKNOWN;
    814 
    815 	/* Only PCIe 3.x has LCAP2. */
    816 	xcap = pci_conf_read(pc, tag, off + PCIE_XCAP);
    817 	if (__SHIFTOUT(xcap, PCIE_XCAP_VER_MASK) >= 2) {
    818 		lcap2 = pci_conf_read(pc, tag, off + PCIE_LCAP2);
    819 		if (lcap2) {
    820 			if ((lcap2 & PCIE_LCAP2_SUP_LNKS64) != 0) {
    821 				return PCIE_SPEED_64_0GT;
    822 			}
    823 			if ((lcap2 & PCIE_LCAP2_SUP_LNKS32) != 0) {
    824 				return PCIE_SPEED_32_0GT;
    825 			}
    826 			if ((lcap2 & PCIE_LCAP2_SUP_LNKS16) != 0) {
    827 				return PCIE_SPEED_16_0GT;
    828 			}
    829 			if ((lcap2 & PCIE_LCAP2_SUP_LNKS8) != 0) {
    830 				return PCIE_SPEED_8_0GT;
    831 			}
    832 			if ((lcap2 & PCIE_LCAP2_SUP_LNKS5) != 0) {
    833 				return PCIE_SPEED_5_0GT;
    834 			}
    835 			if ((lcap2 & PCIE_LCAP2_SUP_LNKS2) != 0) {
    836 				return PCIE_SPEED_2_5GT;
    837 			}
    838 		}
    839 	}
    840 
    841 	lcap = pci_conf_read(pc, tag, off + PCIE_LCAP);
    842 	if ((lcap & PCIE_LCAP_MAX_SPEED) == PCIE_LCAP_MAX_SPEED_64) {
    843 		return PCIE_SPEED_64_0GT;
    844 	}
    845 	if ((lcap & PCIE_LCAP_MAX_SPEED) == PCIE_LCAP_MAX_SPEED_32) {
    846 		return PCIE_SPEED_32_0GT;
    847 	}
    848 	if ((lcap & PCIE_LCAP_MAX_SPEED) == PCIE_LCAP_MAX_SPEED_16) {
    849 		return PCIE_SPEED_16_0GT;
    850 	}
    851 	if ((lcap & PCIE_LCAP_MAX_SPEED) == PCIE_LCAP_MAX_SPEED_8) {
    852 		return PCIE_SPEED_8_0GT;
    853 	}
    854 	if ((lcap & PCIE_LCAP_MAX_SPEED) == PCIE_LCAP_MAX_SPEED_5) {
    855 		return PCIE_SPEED_5_0GT;
    856 	}
    857 	if ((lcap & PCIE_LCAP_MAX_SPEED) == PCIE_LCAP_MAX_SPEED_2) {
    858 		return PCIE_SPEED_2_5GT;
    859 	}
    860 
    861 	return PCI_SPEED_UNKNOWN;
    862 }
    863 
    864 /*
    865  * This should walk the tree, it only checks this device currently.
    866  * It also does not write to limiting_dev (the only caller in drm2
    867  * currently does not use it.)
    868  */
    869 unsigned
    870 pcie_bandwidth_available(struct pci_dev *dev,
    871     struct pci_dev **limiting_dev,
    872     enum pci_bus_speed *speed,
    873     enum pcie_link_width *width)
    874 {
    875 	pci_chipset_tag_t pc = dev->pd_pa.pa_pc;
    876 	pcitag_t tag = dev->pd_pa.pa_tag;
    877 	pcireg_t lcsr;
    878 	unsigned per_line_speed, num_lanes;
    879 	int off;
    880 
    881 	/* Must have capabilities. */
    882 	if (pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS, &off, NULL) == 0)
    883 		return 0;
    884 
    885 	if (speed)
    886 		*speed = PCI_SPEED_UNKNOWN;
    887 	if (width)
    888 		*width = 0;
    889 
    890 	lcsr = pci_conf_read(pc, tag, off + PCIE_LCSR);
    891 
    892 	switch (lcsr & PCIE_LCSR_NLW) {
    893 	case PCIE_LCSR_NLW_X1:
    894 	case PCIE_LCSR_NLW_X2:
    895 	case PCIE_LCSR_NLW_X4:
    896 	case PCIE_LCSR_NLW_X8:
    897 	case PCIE_LCSR_NLW_X12:
    898 	case PCIE_LCSR_NLW_X16:
    899 	case PCIE_LCSR_NLW_X32:
    900 		num_lanes = __SHIFTOUT(lcsr, PCIE_LCSR_NLW);
    901 		if (width)
    902 			*width = num_lanes;
    903 		break;
    904 	default:
    905 		num_lanes = 0;
    906 		break;
    907 	}
    908 
    909 	switch (__SHIFTOUT(lcsr, PCIE_LCSR_LINKSPEED)) {
    910 	case PCIE_LCSR_LINKSPEED_2:
    911 		*speed = PCIE_SPEED_2_5GT;
    912 		per_line_speed = 2500 * 8 / 10;
    913 		break;
    914 	case PCIE_LCSR_LINKSPEED_5:
    915 		*speed = PCIE_SPEED_5_0GT;
    916 		per_line_speed = 5000 * 8 / 10;
    917 		break;
    918 	case PCIE_LCSR_LINKSPEED_8:
    919 		*speed = PCIE_SPEED_8_0GT;
    920 		per_line_speed = 8000 * 128 / 130;
    921 		break;
    922 	case PCIE_LCSR_LINKSPEED_16:
    923 		*speed = PCIE_SPEED_16_0GT;
    924 		per_line_speed = 16000 * 128 / 130;
    925 		break;
    926 	case PCIE_LCSR_LINKSPEED_32:
    927 		*speed = PCIE_SPEED_32_0GT;
    928 		per_line_speed = 32000 * 128 / 130;
    929 		break;
    930 	case PCIE_LCSR_LINKSPEED_64:
    931 		*speed = PCIE_SPEED_64_0GT;
    932 		per_line_speed = 64000 * 128 / 130;
    933 		break;
    934 	default:
    935 		per_line_speed = 0;
    936 	}
    937 
    938 	return num_lanes * per_line_speed;
    939 }
    940