Home | History | Annotate | Line # | Download | only in linux
pci.h revision 1.27
      1 /*	$NetBSD: pci.h,v 1.27 2018/08/27 07:03:02 riastradh Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2013 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Taylor R. Campbell.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #ifndef _LINUX_PCI_H_
     33 #define _LINUX_PCI_H_
     34 
     35 #ifdef _KERNEL_OPT
     36 #if defined(i386) || defined(amd64)
     37 #include "acpica.h"
     38 #else	/* !(i386 || amd64) */
     39 #define NACPICA	0
     40 #endif	/* i386 || amd64 */
     41 #endif
     42 
     43 #include <sys/types.h>
     44 #include <sys/param.h>
     45 #include <sys/bus.h>
     46 #include <sys/cdefs.h>
     47 #include <sys/kmem.h>
     48 #include <sys/systm.h>
     49 
     50 #include <machine/limits.h>
     51 
     52 #include <dev/pci/pcidevs.h>
     53 #include <dev/pci/pcireg.h>
     54 #include <dev/pci/pcivar.h>
     55 #include <dev/pci/agpvar.h>
     56 
     57 #if NACPICA > 0
     58 #include <dev/acpi/acpivar.h>
     59 #include <dev/acpi/acpi_pci.h>
     60 #else
     61 struct acpi_devnode;
     62 #endif
     63 
     64 #include <linux/dma-mapping.h>
     65 #include <linux/ioport.h>
     66 #include <linux/kernel.h>
     67 
     68 struct pci_driver;
     69 
     70 struct pci_bus {
     71 	u_int		number;
     72 };
     73 
     74 struct pci_device_id {
     75 	uint32_t	vendor;
     76 	uint32_t	device;
     77 	uint32_t	subvendor;
     78 	uint32_t	subdevice;
     79 	uint32_t	class;
     80 	uint32_t	class_mask;
     81 	unsigned long	driver_data;
     82 };
     83 
     84 #define	PCI_ANY_ID		((pcireg_t)-1)
     85 
     86 #define	PCI_BASE_CLASS_DISPLAY	PCI_CLASS_DISPLAY
     87 
     88 #define	PCI_CLASS_DISPLAY_VGA						\
     89 	((PCI_CLASS_DISPLAY << 8) | PCI_SUBCLASS_DISPLAY_VGA)
     90 #define	PCI_CLASS_BRIDGE_ISA						\
     91 	((PCI_CLASS_BRIDGE << 8) | PCI_SUBCLASS_BRIDGE_ISA)
     92 CTASSERT(PCI_CLASS_BRIDGE_ISA == 0x0601);
     93 
     94 /* XXX This is getting silly...  */
     95 #define	PCI_VENDOR_ID_ASUSTEK	PCI_VENDOR_ASUSTEK
     96 #define	PCI_VENDOR_ID_ATI	PCI_VENDOR_ATI
     97 #define	PCI_VENDOR_ID_DELL	PCI_VENDOR_DELL
     98 #define	PCI_VENDOR_ID_IBM	PCI_VENDOR_IBM
     99 #define	PCI_VENDOR_ID_HP	PCI_VENDOR_HP
    100 #define	PCI_VENDOR_ID_INTEL	PCI_VENDOR_INTEL
    101 #define	PCI_VENDOR_ID_NVIDIA	PCI_VENDOR_NVIDIA
    102 #define	PCI_VENDOR_ID_SONY	PCI_VENDOR_SONY
    103 #define	PCI_VENDOR_ID_VIA	PCI_VENDOR_VIATECH
    104 
    105 #define	PCI_DEVICE_ID_ATI_RADEON_QY	PCI_PRODUCT_ATI_RADEON_RV100_QY
    106 
    107 #define	PCI_DEVFN(DEV, FN)						\
    108 	(__SHIFTIN((DEV), __BITS(3, 7)) | __SHIFTIN((FN), __BITS(0, 2)))
    109 #define	PCI_SLOT(DEVFN)		__SHIFTOUT((DEVFN), __BITS(3, 7))
    110 #define	PCI_FUNC(DEVFN)		__SHIFTOUT((DEVFN), __BITS(0, 2))
    111 
    112 #define	PCI_NUM_RESOURCES	((PCI_MAPREG_END - PCI_MAPREG_START) / 4)
    113 #define	DEVICE_COUNT_RESOURCE	PCI_NUM_RESOURCES
    114 
    115 #define	PCI_CAP_ID_AGP	PCI_CAP_AGP
    116 
    117 typedef int pci_power_t;
    118 
    119 #define	PCI_D0		0
    120 #define	PCI_D1		1
    121 #define	PCI_D2		2
    122 #define	PCI_D3hot	3
    123 #define	PCI_D3cold	4
    124 
    125 #define	__pci_iomem
    126 
    127 struct pci_dev {
    128 	struct pci_attach_args	pd_pa;
    129 	int			pd_kludges;	/* Gotta lose 'em...  */
    130 #define	NBPCI_KLUDGE_GET_MUMBLE	0x01
    131 #define	NBPCI_KLUDGE_MAP_ROM	0x02
    132 	bus_space_tag_t		pd_rom_bst;
    133 	bus_space_handle_t	pd_rom_bsh;
    134 	bus_size_t		pd_rom_size;
    135 	bus_space_handle_t	pd_rom_found_bsh;
    136 	bus_size_t		pd_rom_found_size;
    137 	void			*pd_rom_vaddr;
    138 	device_t		pd_dev;
    139 	struct drm_device	*pd_drm_dev; /* XXX Nouveau kludge!  */
    140 	struct {
    141 		pcireg_t		type;
    142 		bus_addr_t		addr;
    143 		bus_size_t		size;
    144 		int			flags;
    145 		bus_space_tag_t		bst;
    146 		bus_space_handle_t	bsh;
    147 		void __pci_iomem	*kva;
    148 	}			pd_resources[PCI_NUM_RESOURCES];
    149 	struct pci_conf_state	*pd_saved_state;
    150 	struct acpi_devnode	*pd_ad;
    151 	pci_intr_handle_t	*pd_intr_handles;
    152 
    153 	/* Linx API only below */
    154 	struct pci_bus		*bus;
    155 	uint32_t		devfn;
    156 	uint16_t		vendor;
    157 	uint16_t		device;
    158 	uint16_t		subsystem_vendor;
    159 	uint16_t		subsystem_device;
    160 	uint8_t			revision;
    161 	uint32_t		class;
    162 	bool			msi_enabled;
    163 };
    164 
    165 static inline device_t
    166 pci_dev_dev(struct pci_dev *pdev)
    167 {
    168 	return pdev->pd_dev;
    169 }
    170 
    171 /* XXX Nouveau kludge!  */
    172 static inline struct drm_device *
    173 pci_get_drvdata(struct pci_dev *pdev)
    174 {
    175 	return pdev->pd_drm_dev;
    176 }
    177 
    178 static inline void
    179 linux_pci_dev_init(struct pci_dev *pdev, device_t dev,
    180     const struct pci_attach_args *pa, int kludges)
    181 {
    182 	const uint32_t subsystem_id = pci_conf_read(pa->pa_pc, pa->pa_tag,
    183 	    PCI_SUBSYS_ID_REG);
    184 	unsigned i;
    185 
    186 	pdev->pd_pa = *pa;
    187 	pdev->pd_kludges = kludges;
    188 	pdev->pd_rom_vaddr = NULL;
    189 	pdev->pd_dev = dev;
    190 #if (NACPICA > 0)
    191 	pdev->pd_ad = acpi_pcidev_find(0 /*XXX segment*/, pa->pa_bus,
    192 	    pa->pa_device, pa->pa_function);
    193 #else
    194 	pdev->pd_ad = NULL;
    195 #endif
    196 	pdev->bus = kmem_zalloc(sizeof(struct pci_bus), KM_NOSLEEP);
    197 	pdev->bus->number = pa->pa_bus;
    198 	pdev->devfn = PCI_DEVFN(pa->pa_device, pa->pa_function);
    199 	pdev->vendor = PCI_VENDOR(pa->pa_id);
    200 	pdev->device = PCI_PRODUCT(pa->pa_id);
    201 	pdev->subsystem_vendor = PCI_SUBSYS_VENDOR(subsystem_id);
    202 	pdev->subsystem_device = PCI_SUBSYS_ID(subsystem_id);
    203 	pdev->revision = PCI_REVISION(pa->pa_class);
    204 	pdev->class = __SHIFTOUT(pa->pa_class, 0xffffff00UL); /* ? */
    205 
    206 	CTASSERT(__arraycount(pdev->pd_resources) == PCI_NUM_RESOURCES);
    207 	for (i = 0; i < PCI_NUM_RESOURCES; i++) {
    208 		const int reg = PCI_BAR(i);
    209 
    210 		pdev->pd_resources[i].type = pci_mapreg_type(pa->pa_pc,
    211 		    pa->pa_tag, reg);
    212 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, reg,
    213 			pdev->pd_resources[i].type,
    214 			&pdev->pd_resources[i].addr,
    215 			&pdev->pd_resources[i].size,
    216 			&pdev->pd_resources[i].flags)) {
    217 			pdev->pd_resources[i].addr = 0;
    218 			pdev->pd_resources[i].size = 0;
    219 			pdev->pd_resources[i].flags = 0;
    220 		}
    221 		pdev->pd_resources[i].kva = NULL;
    222 	}
    223 }
    224 
    225 static inline int
    226 pci_find_capability(struct pci_dev *pdev, int cap)
    227 {
    228 	return pci_get_capability(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, cap,
    229 	    NULL, NULL);
    230 }
    231 
    232 static inline int
    233 pci_read_config_dword(struct pci_dev *pdev, int reg, uint32_t *valuep)
    234 {
    235 	KASSERT(!ISSET(reg, 3));
    236 	*valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg);
    237 	return 0;
    238 }
    239 
    240 static inline int
    241 pci_read_config_word(struct pci_dev *pdev, int reg, uint16_t *valuep)
    242 {
    243 	KASSERT(!ISSET(reg, 1));
    244 	*valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
    245 	    (reg &~ 2)) >> (8 * (reg & 2));
    246 	return 0;
    247 }
    248 
    249 static inline int
    250 pci_read_config_byte(struct pci_dev *pdev, int reg, uint8_t *valuep)
    251 {
    252 	*valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
    253 	    (reg &~ 3)) >> (8 * (reg & 3));
    254 	return 0;
    255 }
    256 
    257 static inline int
    258 pci_write_config_dword(struct pci_dev *pdev, int reg, uint32_t value)
    259 {
    260 	KASSERT(!ISSET(reg, 3));
    261 	pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg, value);
    262 	return 0;
    263 }
    264 
    265 static inline void
    266 pci_rmw_config(struct pci_dev *pdev, int reg, unsigned int bytes,
    267     uint32_t value)
    268 {
    269 	const uint32_t mask = ~((~0UL) << (8 * bytes));
    270 	const int reg32 = (reg &~ 3);
    271 	const unsigned int shift = (8 * (reg & 3));
    272 	uint32_t value32;
    273 
    274 	KASSERT(bytes <= 4);
    275 	KASSERT(!ISSET(value, ~mask));
    276 	pci_read_config_dword(pdev, reg32, &value32);
    277 	value32 &=~ (mask << shift);
    278 	value32 |= (value << shift);
    279 	pci_write_config_dword(pdev, reg32, value32);
    280 }
    281 
    282 static inline int
    283 pci_write_config_word(struct pci_dev *pdev, int reg, uint16_t value)
    284 {
    285 	KASSERT(!ISSET(reg, 1));
    286 	pci_rmw_config(pdev, reg, 2, value);
    287 	return 0;
    288 }
    289 
    290 static inline int
    291 pci_write_config_byte(struct pci_dev *pdev, int reg, uint8_t value)
    292 {
    293 	pci_rmw_config(pdev, reg, 1, value);
    294 	return 0;
    295 }
    296 
    297 static inline int
    298 pci_enable_msi(struct pci_dev *pdev)
    299 {
    300 #ifdef notyet
    301 	const struct pci_attach_args *const pa = &pdev->pd_pa;
    302 
    303 	if (pci_msi_alloc_exact(pa, &pdev->pd_intr_handles, 1))
    304 		return -EINVAL;
    305 
    306 	pdev->msi_enabled = 1;
    307 	return 0;
    308 #else
    309 	return -ENOSYS;
    310 #endif
    311 }
    312 
    313 static inline void
    314 pci_disable_msi(struct pci_dev *pdev __unused)
    315 {
    316 	const struct pci_attach_args *const pa = &pdev->pd_pa;
    317 
    318 	if (pdev->pd_intr_handles != NULL) {
    319 		pci_intr_release(pa->pa_pc, pdev->pd_intr_handles, 1);
    320 		pdev->pd_intr_handles = NULL;
    321 	}
    322 	pdev->msi_enabled = 0;
    323 }
    324 
    325 static inline void
    326 pci_set_master(struct pci_dev *pdev)
    327 {
    328 	pcireg_t csr;
    329 
    330 	csr = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
    331 	    PCI_COMMAND_STATUS_REG);
    332 	csr |= PCI_COMMAND_MASTER_ENABLE;
    333 	pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
    334 	    PCI_COMMAND_STATUS_REG, csr);
    335 }
    336 
    337 static inline void
    338 pci_clear_master(struct pci_dev *pdev)
    339 {
    340 	pcireg_t csr;
    341 
    342 	csr = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
    343 	    PCI_COMMAND_STATUS_REG);
    344 	csr &= ~(pcireg_t)PCI_COMMAND_MASTER_ENABLE;
    345 	pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
    346 	    PCI_COMMAND_STATUS_REG, csr);
    347 }
    348 
    349 #define	PCIBIOS_MIN_MEM	0x100000	/* XXX bogus x86 kludge bollocks */
    350 
    351 static inline bus_addr_t
    352 pcibios_align_resource(void *p, const struct resource *resource,
    353     bus_addr_t addr, bus_size_t size)
    354 {
    355 	panic("pcibios_align_resource has accessed unaligned neurons!");
    356 }
    357 
    358 static inline int
    359 pci_bus_alloc_resource(struct pci_bus *bus, struct resource *resource,
    360     bus_size_t size, bus_size_t align, bus_addr_t start, int type __unused,
    361     bus_addr_t (*align_fn)(void *, const struct resource *, bus_addr_t,
    362 	bus_size_t) __unused,
    363     struct pci_dev *pdev)
    364 {
    365 	const struct pci_attach_args *const pa = &pdev->pd_pa;
    366 	bus_space_tag_t bst;
    367 	int error;
    368 
    369 	switch (resource->flags) {
    370 	case IORESOURCE_MEM:
    371 		bst = pa->pa_memt;
    372 		break;
    373 
    374 	case IORESOURCE_IO:
    375 		bst = pa->pa_iot;
    376 		break;
    377 
    378 	default:
    379 		panic("I don't know what kind of resource you want!");
    380 	}
    381 
    382 	resource->r_bst = bst;
    383 	error = bus_space_alloc(bst, start, __type_max(bus_addr_t),
    384 	    size, align, 0, 0, &resource->start, &resource->r_bsh);
    385 	if (error)
    386 		return error;
    387 
    388 	resource->size = size;
    389 	return 0;
    390 }
    391 
    392 /*
    393  * XXX Mega-kludgerific!  pci_get_bus_and_slot and pci_get_class are
    394  * defined only for their single purposes in i915drm, in
    395  * i915_get_bridge_dev and intel_detect_pch.  We can't define them more
    396  * generally without adapting pci_find_device (and pci_enumerate_bus
    397  * internally) to pass a cookie through.
    398  */
    399 
    400 static inline int		/* XXX inline?  */
    401 pci_kludgey_match_bus0_dev0_func0(const struct pci_attach_args *pa)
    402 {
    403 
    404 	if (pa->pa_bus != 0)
    405 		return 0;
    406 	if (pa->pa_device != 0)
    407 		return 0;
    408 	if (pa->pa_function != 0)
    409 		return 0;
    410 
    411 	return 1;
    412 }
    413 
    414 static inline struct pci_dev *
    415 pci_get_bus_and_slot(int bus, int slot)
    416 {
    417 	struct pci_attach_args pa;
    418 
    419 	KASSERT(bus == 0);
    420 	KASSERT(slot == PCI_DEVFN(0, 0));
    421 
    422 	if (!pci_find_device(&pa, &pci_kludgey_match_bus0_dev0_func0))
    423 		return NULL;
    424 
    425 	struct pci_dev *const pdev = kmem_zalloc(sizeof(*pdev), KM_SLEEP);
    426 	linux_pci_dev_init(pdev, NULL, &pa, NBPCI_KLUDGE_GET_MUMBLE);
    427 
    428 	return pdev;
    429 }
    430 
    431 static inline int		/* XXX inline?  */
    432 pci_kludgey_match_isa_bridge(const struct pci_attach_args *pa)
    433 {
    434 
    435 	if (PCI_CLASS(pa->pa_class) != PCI_CLASS_BRIDGE)
    436 		return 0;
    437 	if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_BRIDGE_ISA)
    438 		return 0;
    439 
    440 	return 1;
    441 }
    442 
    443 static inline void
    444 pci_dev_put(struct pci_dev *pdev)
    445 {
    446 
    447 	if (pdev == NULL)
    448 		return;
    449 
    450 	KASSERT(ISSET(pdev->pd_kludges, NBPCI_KLUDGE_GET_MUMBLE));
    451 	kmem_free(pdev, sizeof(*pdev));
    452 }
    453 
    454 static inline struct pci_dev *
    455 pci_get_class(uint32_t class_subclass_shifted __unused, struct pci_dev *from)
    456 {
    457 	struct pci_attach_args pa;
    458 
    459 	KASSERT(class_subclass_shifted == (PCI_CLASS_BRIDGE_ISA << 8));
    460 
    461 	if (from != NULL) {
    462 		pci_dev_put(from);
    463 		return NULL;
    464 	}
    465 
    466 	if (!pci_find_device(&pa, &pci_kludgey_match_isa_bridge))
    467 		return NULL;
    468 
    469 	struct pci_dev *const pdev = kmem_zalloc(sizeof(*pdev), KM_SLEEP);
    470 	linux_pci_dev_init(pdev, NULL, &pa, NBPCI_KLUDGE_GET_MUMBLE);
    471 
    472 	return pdev;
    473 }
    474 
    475 #define	__pci_rom_iomem
    476 
    477 static inline void
    478 pci_unmap_rom(struct pci_dev *pdev, void __pci_rom_iomem *vaddr __unused)
    479 {
    480 
    481 	/* XXX Disable the ROM address decoder.  */
    482 	KASSERT(ISSET(pdev->pd_kludges, NBPCI_KLUDGE_MAP_ROM));
    483 	KASSERT(vaddr == pdev->pd_rom_vaddr);
    484 	bus_space_unmap(pdev->pd_rom_bst, pdev->pd_rom_bsh, pdev->pd_rom_size);
    485 	pdev->pd_kludges &= ~NBPCI_KLUDGE_MAP_ROM;
    486 	pdev->pd_rom_vaddr = NULL;
    487 }
    488 
    489 /* XXX Whattakludge!  Should move this in sys/arch/.  */
    490 static int
    491 pci_map_rom_md(struct pci_dev *pdev)
    492 {
    493 #if defined(__i386__) || defined(__x86_64__) || defined(__ia64__)
    494 	const bus_addr_t rom_base = 0xc0000;
    495 	const bus_size_t rom_size = 0x20000;
    496 	bus_space_handle_t rom_bsh;
    497 	int error;
    498 
    499 	if (PCI_CLASS(pdev->pd_pa.pa_class) != PCI_CLASS_DISPLAY)
    500 		return ENXIO;
    501 	if (PCI_SUBCLASS(pdev->pd_pa.pa_class) != PCI_SUBCLASS_DISPLAY_VGA)
    502 		return ENXIO;
    503 	/* XXX Check whether this is the primary VGA card?  */
    504 	error = bus_space_map(pdev->pd_pa.pa_memt, rom_base, rom_size,
    505 	    (BUS_SPACE_MAP_LINEAR | BUS_SPACE_MAP_PREFETCHABLE), &rom_bsh);
    506 	if (error)
    507 		return ENXIO;
    508 
    509 	pdev->pd_rom_bst = pdev->pd_pa.pa_memt;
    510 	pdev->pd_rom_bsh = rom_bsh;
    511 	pdev->pd_rom_size = rom_size;
    512 	pdev->pd_kludges |= NBPCI_KLUDGE_MAP_ROM;
    513 
    514 	return 0;
    515 #else
    516 	return ENXIO;
    517 #endif
    518 }
    519 
    520 static inline void __pci_rom_iomem *
    521 pci_map_rom(struct pci_dev *pdev, size_t *sizep)
    522 {
    523 
    524 	KASSERT(!ISSET(pdev->pd_kludges, NBPCI_KLUDGE_MAP_ROM));
    525 
    526 	if (pci_mapreg_map(&pdev->pd_pa, PCI_MAPREG_ROM, PCI_MAPREG_TYPE_ROM,
    527 		(BUS_SPACE_MAP_PREFETCHABLE | BUS_SPACE_MAP_LINEAR),
    528 		&pdev->pd_rom_bst, &pdev->pd_rom_bsh, NULL, &pdev->pd_rom_size)
    529 	    != 0)
    530 		goto fail_mi;
    531 	pdev->pd_kludges |= NBPCI_KLUDGE_MAP_ROM;
    532 
    533 	/* XXX This type is obviously wrong in general...  */
    534 	if (pci_find_rom(&pdev->pd_pa, pdev->pd_rom_bst, pdev->pd_rom_bsh,
    535 		pdev->pd_rom_size, PCI_ROM_CODE_TYPE_X86,
    536 		&pdev->pd_rom_found_bsh, &pdev->pd_rom_found_size)) {
    537 		pci_unmap_rom(pdev, NULL);
    538 		goto fail_mi;
    539 	}
    540 	goto success;
    541 
    542 fail_mi:
    543 	if (pci_map_rom_md(pdev) != 0)
    544 		goto fail_md;
    545 
    546 	/* XXX This type is obviously wrong in general...  */
    547 	if (pci_find_rom(&pdev->pd_pa, pdev->pd_rom_bst, pdev->pd_rom_bsh,
    548 		pdev->pd_rom_size, PCI_ROM_CODE_TYPE_X86,
    549 		&pdev->pd_rom_found_bsh, &pdev->pd_rom_found_size)) {
    550 		pci_unmap_rom(pdev, NULL);
    551 		goto fail_md;
    552 	}
    553 
    554 success:
    555 	KASSERT(pdev->pd_rom_found_size <= SIZE_T_MAX);
    556 	*sizep = pdev->pd_rom_found_size;
    557 	pdev->pd_rom_vaddr = bus_space_vaddr(pdev->pd_rom_bst,
    558 	    pdev->pd_rom_found_bsh);
    559 	return pdev->pd_rom_vaddr;
    560 
    561 fail_md:
    562 	return NULL;
    563 }
    564 
    565 static inline void __pci_rom_iomem *
    566 pci_platform_rom(struct pci_dev *pdev __unused, size_t *sizep)
    567 {
    568 
    569 	*sizep = 0;
    570 	return NULL;
    571 }
    572 
    573 static inline int
    574 pci_enable_rom(struct pci_dev *pdev)
    575 {
    576 	const pci_chipset_tag_t pc = pdev->pd_pa.pa_pc;
    577 	const pcitag_t tag = pdev->pd_pa.pa_tag;
    578 	pcireg_t addr;
    579 	int s;
    580 
    581 	/* XXX Don't do anything if the ROM isn't there.  */
    582 
    583 	s = splhigh();
    584 	addr = pci_conf_read(pc, tag, PCI_MAPREG_ROM);
    585 	addr |= PCI_MAPREG_ROM_ENABLE;
    586 	pci_conf_write(pc, tag, PCI_MAPREG_ROM, addr);
    587 	splx(s);
    588 
    589 	return 0;
    590 }
    591 
    592 static inline void
    593 pci_disable_rom(struct pci_dev *pdev)
    594 {
    595 	const pci_chipset_tag_t pc = pdev->pd_pa.pa_pc;
    596 	const pcitag_t tag = pdev->pd_pa.pa_tag;
    597 	pcireg_t addr;
    598 	int s;
    599 
    600 	s = splhigh();
    601 	addr = pci_conf_read(pc, tag, PCI_MAPREG_ROM);
    602 	addr &= ~(pcireg_t)PCI_MAPREG_ROM_ENABLE;
    603 	pci_conf_write(pc, tag, PCI_MAPREG_ROM, addr);
    604 	splx(s);
    605 }
    606 
    607 static inline bus_addr_t
    608 pci_resource_start(struct pci_dev *pdev, unsigned i)
    609 {
    610 
    611 	KASSERT(i < PCI_NUM_RESOURCES);
    612 	return pdev->pd_resources[i].addr;
    613 }
    614 
    615 static inline bus_size_t
    616 pci_resource_len(struct pci_dev *pdev, unsigned i)
    617 {
    618 
    619 	KASSERT(i < PCI_NUM_RESOURCES);
    620 	return pdev->pd_resources[i].size;
    621 }
    622 
    623 static inline bus_addr_t
    624 pci_resource_end(struct pci_dev *pdev, unsigned i)
    625 {
    626 
    627 	return pci_resource_start(pdev, i) + (pci_resource_len(pdev, i) - 1);
    628 }
    629 
    630 static inline int
    631 pci_resource_flags(struct pci_dev *pdev, unsigned i)
    632 {
    633 
    634 	KASSERT(i < PCI_NUM_RESOURCES);
    635 	return pdev->pd_resources[i].flags;
    636 }
    637 
    638 static inline void __pci_iomem *
    639 pci_iomap(struct pci_dev *pdev, unsigned i, bus_size_t size)
    640 {
    641 	int error;
    642 
    643 	KASSERT(i < PCI_NUM_RESOURCES);
    644 	KASSERT(pdev->pd_resources[i].kva == NULL);
    645 
    646 	if (PCI_MAPREG_TYPE(pdev->pd_resources[i].type) != PCI_MAPREG_TYPE_MEM)
    647 		return NULL;
    648 	if (pdev->pd_resources[i].size < size)
    649 		return NULL;
    650 	error = bus_space_map(pdev->pd_pa.pa_memt, pdev->pd_resources[i].addr,
    651 	    size, BUS_SPACE_MAP_LINEAR | pdev->pd_resources[i].flags,
    652 	    &pdev->pd_resources[i].bsh);
    653 	if (error) {
    654 		/* Horrible hack: try asking the fake AGP device.  */
    655 		if (!agp_i810_borrow(pdev->pd_resources[i].addr, size,
    656 			&pdev->pd_resources[i].bsh))
    657 			return NULL;
    658 	}
    659 	pdev->pd_resources[i].bst = pdev->pd_pa.pa_memt;
    660 	pdev->pd_resources[i].kva = bus_space_vaddr(pdev->pd_resources[i].bst,
    661 	    pdev->pd_resources[i].bsh);
    662 
    663 	return pdev->pd_resources[i].kva;
    664 }
    665 
    666 static inline void
    667 pci_iounmap(struct pci_dev *pdev, void __pci_iomem *kva)
    668 {
    669 	unsigned i;
    670 
    671 	CTASSERT(__arraycount(pdev->pd_resources) == PCI_NUM_RESOURCES);
    672 	for (i = 0; i < PCI_NUM_RESOURCES; i++) {
    673 		if (pdev->pd_resources[i].kva == kva)
    674 			break;
    675 	}
    676 	KASSERT(i < PCI_NUM_RESOURCES);
    677 
    678 	pdev->pd_resources[i].kva = NULL;
    679 	bus_space_unmap(pdev->pd_resources[i].bst, pdev->pd_resources[i].bsh,
    680 	    pdev->pd_resources[i].size);
    681 }
    682 
    683 static inline void
    684 pci_save_state(struct pci_dev *pdev)
    685 {
    686 
    687 	KASSERT(pdev->pd_saved_state == NULL);
    688 	pdev->pd_saved_state = kmem_alloc(sizeof(*pdev->pd_saved_state),
    689 	    KM_SLEEP);
    690 	pci_conf_capture(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
    691 	    pdev->pd_saved_state);
    692 }
    693 
    694 static inline void
    695 pci_restore_state(struct pci_dev *pdev)
    696 {
    697 
    698 	KASSERT(pdev->pd_saved_state != NULL);
    699 	pci_conf_restore(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
    700 	    pdev->pd_saved_state);
    701 	kmem_free(pdev->pd_saved_state, sizeof(*pdev->pd_saved_state));
    702 	pdev->pd_saved_state = NULL;
    703 }
    704 
    705 static inline bool
    706 pci_is_pcie(struct pci_dev *pdev)
    707 {
    708 
    709 	return (pci_find_capability(pdev, PCI_CAP_PCIEXPRESS) != 0);
    710 }
    711 
    712 static inline bool
    713 pci_dma_supported(struct pci_dev *pdev, uintmax_t mask)
    714 {
    715 
    716 	/* XXX Cop-out.  */
    717 	if (mask > DMA_BIT_MASK(32))
    718 		return pci_dma64_available(&pdev->pd_pa);
    719 	else
    720 		return true;
    721 }
    722 
    723 static inline bool
    724 pci_is_root_bus(struct pci_bus *bus)
    725 {
    726 
    727 	/* XXX Cop-out. */
    728 	return false;
    729 }
    730 
    731 #endif  /* _LINUX_PCI_H_ */
    732