Home | History | Annotate | Line # | Download | only in pci
pci.c revision 1.86
      1 /*	$NetBSD: pci.c,v 1.86 2004/07/29 16:51:01 drochner Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1995, 1996, 1997, 1998
      5  *     Christopher G. Demetriou.  All rights reserved.
      6  * Copyright (c) 1994 Charles M. Hannum.  All rights reserved.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  * 3. All advertising materials mentioning features or use of this software
     17  *    must display the following acknowledgement:
     18  *	This product includes software developed by Charles M. Hannum.
     19  * 4. The name of the author may not be used to endorse or promote products
     20  *    derived from this software without specific prior written permission.
     21  *
     22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     23  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     24  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     25  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     26  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     27  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     28  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     29  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     31  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     32  */
     33 
     34 /*
     35  * PCI bus autoconfiguration.
     36  */
     37 
     38 #include <sys/cdefs.h>
     39 __KERNEL_RCSID(0, "$NetBSD: pci.c,v 1.86 2004/07/29 16:51:01 drochner Exp $");
     40 
     41 #include "opt_pci.h"
     42 
     43 #include <sys/param.h>
     44 #include <sys/systm.h>
     45 #include <sys/device.h>
     46 
     47 #include <dev/pci/pcireg.h>
     48 #include <dev/pci/pcivar.h>
     49 #include <dev/pci/pcidevs.h>
     50 
     51 #include <uvm/uvm_extern.h>
     52 
     53 #include "locators.h"
     54 
     55 #ifdef PCI_CONFIG_DUMP
     56 int pci_config_dump = 1;
     57 #else
     58 int pci_config_dump = 0;
     59 #endif
     60 
     61 int pcimatch __P((struct device *, struct cfdata *, void *));
     62 void pciattach __P((struct device *, struct device *, void *));
     63 
     64 CFATTACH_DECL(pci, sizeof(struct pci_softc),
     65     pcimatch, pciattach, NULL, NULL);
     66 
     67 int	pciprint __P((void *, const char *));
     68 int	pcisubmatch __P((struct device *, struct cfdata *, void *));
     69 
     70 #ifdef PCI_MACHDEP_ENUMERATE_BUS
     71 #define pci_enumerate_bus PCI_MACHDEP_ENUMERATE_BUS
     72 #else
     73 int pci_enumerate_bus(struct pci_softc *,
     74     int (*)(struct pci_attach_args *), struct pci_attach_args *);
     75 #endif
     76 
     77 /*
     78  * Important note about PCI-ISA bridges:
     79  *
     80  * Callbacks are used to configure these devices so that ISA/EISA bridges
     81  * can attach their child busses after PCI configuration is done.
     82  *
     83  * This works because:
     84  *	(1) there can be at most one ISA/EISA bridge per PCI bus, and
     85  *	(2) any ISA/EISA bridges must be attached to primary PCI
     86  *	    busses (i.e. bus zero).
     87  *
     88  * That boils down to: there can only be one of these outstanding
     89  * at a time, it is cleared when configuring PCI bus 0 before any
     90  * subdevices have been found, and it is run after all subdevices
     91  * of PCI bus 0 have been found.
     92  *
     93  * This is needed because there are some (legacy) PCI devices which
     94  * can show up as ISA/EISA devices as well (the prime example of which
     95  * are VGA controllers).  If you attach ISA from a PCI-ISA/EISA bridge,
     96  * and the bridge is seen before the video board is, the board can show
     97  * up as an ISA device, and that can (bogusly) complicate the PCI device's
     98  * attach code, or make the PCI device not be properly attached at all.
     99  *
    100  * We use the generic config_defer() facility to achieve this.
    101  */
    102 
    103 int
    104 pcimatch(parent, cf, aux)
    105 	struct device *parent;
    106 	struct cfdata *cf;
    107 	void *aux;
    108 {
    109 	struct pcibus_attach_args *pba = aux;
    110 
    111 	if (strcmp(pba->pba_busname, cf->cf_name))
    112 		return (0);
    113 
    114 	/* Check the locators */
    115 	if (cf->pcibuscf_bus != PCIBUS_UNK_BUS &&
    116 	    cf->pcibuscf_bus != pba->pba_bus)
    117 		return (0);
    118 
    119 	/* sanity */
    120 	if (pba->pba_bus < 0 || pba->pba_bus > 255)
    121 		return (0);
    122 
    123 	/*
    124 	 * XXX check other (hardware?) indicators
    125 	 */
    126 
    127 	return (1);
    128 }
    129 
    130 void
    131 pciattach(parent, self, aux)
    132 	struct device *parent, *self;
    133 	void *aux;
    134 {
    135 	struct pcibus_attach_args *pba = aux;
    136 	struct pci_softc *sc = (struct pci_softc *)self;
    137 	int io_enabled, mem_enabled, mrl_enabled, mrm_enabled, mwi_enabled;
    138 	const char *sep = "";
    139 
    140 	pci_attach_hook(parent, self, pba);
    141 
    142 	aprint_naive("\n");
    143 	aprint_normal("\n");
    144 
    145 	io_enabled = (pba->pba_flags & PCI_FLAGS_IO_ENABLED);
    146 	mem_enabled = (pba->pba_flags & PCI_FLAGS_MEM_ENABLED);
    147 	mrl_enabled = (pba->pba_flags & PCI_FLAGS_MRL_OKAY);
    148 	mrm_enabled = (pba->pba_flags & PCI_FLAGS_MRM_OKAY);
    149 	mwi_enabled = (pba->pba_flags & PCI_FLAGS_MWI_OKAY);
    150 
    151 	if (io_enabled == 0 && mem_enabled == 0) {
    152 		aprint_error("%s: no spaces enabled!\n", self->dv_xname);
    153 		return;
    154 	}
    155 
    156 #define	PRINT(str)							\
    157 do {									\
    158 	aprint_normal("%s%s", sep, str);				\
    159 	sep = ", ";							\
    160 } while (/*CONSTCOND*/0)
    161 
    162 	aprint_normal("%s: ", self->dv_xname);
    163 
    164 	if (io_enabled)
    165 		PRINT("i/o space");
    166 	if (mem_enabled)
    167 		PRINT("memory space");
    168 	aprint_normal(" enabled");
    169 
    170 	if (mrl_enabled || mrm_enabled || mwi_enabled) {
    171 		if (mrl_enabled)
    172 			PRINT("rd/line");
    173 		if (mrm_enabled)
    174 			PRINT("rd/mult");
    175 		if (mwi_enabled)
    176 			PRINT("wr/inv");
    177 		aprint_normal(" ok");
    178 	}
    179 
    180 	aprint_normal("\n");
    181 
    182 #undef PRINT
    183 
    184 	sc->sc_iot = pba->pba_iot;
    185 	sc->sc_memt = pba->pba_memt;
    186 	sc->sc_dmat = pba->pba_dmat;
    187 	sc->sc_dmat64 = pba->pba_dmat64;
    188 	sc->sc_pc = pba->pba_pc;
    189 	sc->sc_bus = pba->pba_bus;
    190 	sc->sc_bridgetag = pba->pba_bridgetag;
    191 	sc->sc_maxndevs = pci_bus_maxdevs(pba->pba_pc, pba->pba_bus);
    192 	sc->sc_intrswiz = pba->pba_intrswiz;
    193 	sc->sc_intrtag = pba->pba_intrtag;
    194 	sc->sc_flags = pba->pba_flags;
    195 	pci_enumerate_bus(sc, NULL, NULL);
    196 }
    197 
    198 int
    199 pciprint(aux, pnp)
    200 	void *aux;
    201 	const char *pnp;
    202 {
    203 	struct pci_attach_args *pa = aux;
    204 	char devinfo[256];
    205 	const struct pci_quirkdata *qd;
    206 
    207 	if (pnp) {
    208 		pci_devinfo(pa->pa_id, pa->pa_class, 1, devinfo, sizeof(devinfo));
    209 		aprint_normal("%s at %s", devinfo, pnp);
    210 	}
    211 	aprint_normal(" dev %d function %d", pa->pa_device, pa->pa_function);
    212 	if (pci_config_dump) {
    213 		printf(": ");
    214 		pci_conf_print(pa->pa_pc, pa->pa_tag, NULL);
    215 		if (!pnp)
    216 			pci_devinfo(pa->pa_id, pa->pa_class, 1, devinfo, sizeof(devinfo));
    217 		printf("%s at %s", devinfo, pnp ? pnp : "?");
    218 		printf(" dev %d function %d (", pa->pa_device, pa->pa_function);
    219 #ifdef __i386__
    220 		printf("tag %#lx, intrtag %#lx, intrswiz %#lx, intrpin %#lx",
    221 		    *(long *)&pa->pa_tag, *(long *)&pa->pa_intrtag,
    222 		    (long)pa->pa_intrswiz, (long)pa->pa_intrpin);
    223 #else
    224 		printf("intrswiz %#lx, intrpin %#lx",
    225 		    (long)pa->pa_intrswiz, (long)pa->pa_intrpin);
    226 #endif
    227 		printf(", i/o %s, mem %s,",
    228 		    pa->pa_flags & PCI_FLAGS_IO_ENABLED ? "on" : "off",
    229 		    pa->pa_flags & PCI_FLAGS_MEM_ENABLED ? "on" : "off");
    230 		qd = pci_lookup_quirkdata(PCI_VENDOR(pa->pa_id),
    231 		    PCI_PRODUCT(pa->pa_id));
    232 		if (qd == NULL) {
    233 			printf(" no quirks");
    234 		} else {
    235 			bitmask_snprintf(qd->quirks,
    236 			    "\002\001multifn\002singlefn\003skipfunc0"
    237 			    "\004skipfunc1\005skipfunc2\006skipfunc3"
    238 			    "\007skipfunc4\010skipfunc5\011skipfunc6"
    239 			    "\012skipfunc7",
    240 			    devinfo, sizeof (devinfo));
    241 			printf(" quirks %s", devinfo);
    242 		}
    243 		printf(")");
    244 	}
    245 	return (UNCONF);
    246 }
    247 
    248 int
    249 pcisubmatch(parent, cf, aux)
    250 	struct device *parent;
    251 	struct cfdata *cf;
    252 	void *aux;
    253 {
    254 	struct pci_attach_args *pa = aux;
    255 
    256 	if (cf->pcicf_dev != PCI_UNK_DEV &&
    257 	    cf->pcicf_dev != pa->pa_device)
    258 		return (0);
    259 	if (cf->pcicf_function != PCI_UNK_FUNCTION &&
    260 	    cf->pcicf_function != pa->pa_function)
    261 		return (0);
    262 	return (config_match(parent, cf, aux));
    263 }
    264 
    265 int
    266 pci_probe_device(struct pci_softc *sc, pcitag_t tag,
    267     int (*match)(struct pci_attach_args *), struct pci_attach_args *pap)
    268 {
    269 	pci_chipset_tag_t pc = sc->sc_pc;
    270 	struct pci_attach_args pa;
    271 	pcireg_t id, csr, class, intr, bhlcr;
    272 	int ret, pin, bus, device, function;
    273 
    274 	pci_decompose_tag(pc, tag, &bus, &device, &function);
    275 
    276 	bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG);
    277 	if (PCI_HDRTYPE_TYPE(bhlcr) > 2)
    278 		return (0);
    279 
    280 	id = pci_conf_read(pc, tag, PCI_ID_REG);
    281 	csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
    282 	class = pci_conf_read(pc, tag, PCI_CLASS_REG);
    283 
    284 	/* Invalid vendor ID value? */
    285 	if (PCI_VENDOR(id) == PCI_VENDOR_INVALID)
    286 		return (0);
    287 	/* XXX Not invalid, but we've done this ~forever. */
    288 	if (PCI_VENDOR(id) == 0)
    289 		return (0);
    290 
    291 	pa.pa_iot = sc->sc_iot;
    292 	pa.pa_memt = sc->sc_memt;
    293 	pa.pa_dmat = sc->sc_dmat;
    294 	pa.pa_dmat64 = sc->sc_dmat64;
    295 	pa.pa_pc = pc;
    296 	pa.pa_bus = bus;
    297 	pa.pa_device = device;
    298 	pa.pa_function = function;
    299 	pa.pa_tag = tag;
    300 	pa.pa_id = id;
    301 	pa.pa_class = class;
    302 
    303 	/*
    304 	 * Set up memory, I/O enable, and PCI command flags
    305 	 * as appropriate.
    306 	 */
    307 	pa.pa_flags = sc->sc_flags;
    308 	if ((csr & PCI_COMMAND_IO_ENABLE) == 0)
    309 		pa.pa_flags &= ~PCI_FLAGS_IO_ENABLED;
    310 	if ((csr & PCI_COMMAND_MEM_ENABLE) == 0)
    311 		pa.pa_flags &= ~PCI_FLAGS_MEM_ENABLED;
    312 
    313 	/*
    314 	 * If the cache line size is not configured, then
    315 	 * clear the MRL/MRM/MWI command-ok flags.
    316 	 */
    317 	if (PCI_CACHELINE(bhlcr) == 0)
    318 		pa.pa_flags &= ~(PCI_FLAGS_MRL_OKAY|
    319 		    PCI_FLAGS_MRM_OKAY|PCI_FLAGS_MWI_OKAY);
    320 
    321 	if (sc->sc_bridgetag == NULL) {
    322 		pa.pa_intrswiz = 0;
    323 		pa.pa_intrtag = tag;
    324 	} else {
    325 		pa.pa_intrswiz = sc->sc_intrswiz + device;
    326 		pa.pa_intrtag = sc->sc_intrtag;
    327 	}
    328 
    329 	intr = pci_conf_read(pc, tag, PCI_INTERRUPT_REG);
    330 
    331 	pin = PCI_INTERRUPT_PIN(intr);
    332 	pa.pa_rawintrpin = pin;
    333 	if (pin == PCI_INTERRUPT_PIN_NONE) {
    334 		/* no interrupt */
    335 		pa.pa_intrpin = 0;
    336 	} else {
    337 		/*
    338 		 * swizzle it based on the number of busses we're
    339 		 * behind and our device number.
    340 		 */
    341 		pa.pa_intrpin = 	/* XXX */
    342 		    ((pin + pa.pa_intrswiz - 1) % 4) + 1;
    343 	}
    344 	pa.pa_intrline = PCI_INTERRUPT_LINE(intr);
    345 
    346 	if (match != NULL) {
    347 		ret = (*match)(&pa);
    348 		if (ret != 0 && pap != NULL)
    349 			*pap = pa;
    350 	} else {
    351 		ret = config_found_sm(&sc->sc_dev, &pa, pciprint,
    352 		    pcisubmatch) != NULL;
    353 	}
    354 
    355 	return (ret);
    356 }
    357 
    358 int
    359 pci_get_capability(pc, tag, capid, offset, value)
    360 	pci_chipset_tag_t pc;
    361 	pcitag_t tag;
    362 	int capid;
    363 	int *offset;
    364 	pcireg_t *value;
    365 {
    366 	pcireg_t reg;
    367 	unsigned int ofs;
    368 
    369 	reg = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
    370 	if (!(reg & PCI_STATUS_CAPLIST_SUPPORT))
    371 		return (0);
    372 
    373 	/* Determine the Capability List Pointer register to start with. */
    374 	reg = pci_conf_read(pc, tag, PCI_BHLC_REG);
    375 	switch (PCI_HDRTYPE_TYPE(reg)) {
    376 	case 0:	/* standard device header */
    377 		ofs = PCI_CAPLISTPTR_REG;
    378 		break;
    379 	case 2:	/* PCI-CardBus Bridge header */
    380 		ofs = PCI_CARDBUS_CAPLISTPTR_REG;
    381 		break;
    382 	default:
    383 		return (0);
    384 	}
    385 
    386 	ofs = PCI_CAPLIST_PTR(pci_conf_read(pc, tag, ofs));
    387 	while (ofs != 0) {
    388 #ifdef DIAGNOSTIC
    389 		if ((ofs & 3) || (ofs < 0x40))
    390 			panic("pci_get_capability");
    391 #endif
    392 		reg = pci_conf_read(pc, tag, ofs);
    393 		if (PCI_CAPLIST_CAP(reg) == capid) {
    394 			if (offset)
    395 				*offset = ofs;
    396 			if (value)
    397 				*value = reg;
    398 			return (1);
    399 		}
    400 		ofs = PCI_CAPLIST_NEXT(reg);
    401 	}
    402 
    403 	return (0);
    404 }
    405 
    406 int
    407 pci_find_device(struct pci_attach_args *pa,
    408 		int (*match)(struct pci_attach_args *))
    409 {
    410 	extern struct cfdriver pci_cd;
    411 	struct device *pcidev;
    412 	int i;
    413 
    414 	for (i = 0; i < pci_cd.cd_ndevs; i++) {
    415 		pcidev = pci_cd.cd_devs[i];
    416 		if (pcidev != NULL &&
    417 		    pci_enumerate_bus((struct pci_softc *) pcidev,
    418 		    		      match, pa) != 0)
    419 			return (1);
    420 	}
    421 	return (0);
    422 }
    423 
    424 #ifndef PCI_MACHDEP_ENUMERATE_BUS
    425 /*
    426  * Generic PCI bus enumeration routine.  Used unless machine-dependent
    427  * code needs to provide something else.
    428  */
    429 int
    430 pci_enumerate_bus(struct pci_softc *sc,
    431     int (*match)(struct pci_attach_args *), struct pci_attach_args *pap)
    432 {
    433 	pci_chipset_tag_t pc = sc->sc_pc;
    434 	int device, function, nfunctions, ret;
    435 	const struct pci_quirkdata *qd;
    436 	pcireg_t id, bhlcr;
    437 	pcitag_t tag;
    438 #ifdef __PCI_BUS_DEVORDER
    439 	char devs[32];
    440 	int i;
    441 #endif
    442 
    443 #ifdef __PCI_BUS_DEVORDER
    444 	pci_bus_devorder(sc->sc_pc, sc->sc_bus, devs);
    445 	for (i = 0; (device = devs[i]) < 32 && device >= 0; i++)
    446 #else
    447 	for (device = 0; device < sc->sc_maxndevs; device++)
    448 #endif
    449 	{
    450 		tag = pci_make_tag(pc, sc->sc_bus, device, 0);
    451 
    452 		bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG);
    453 		if (PCI_HDRTYPE_TYPE(bhlcr) > 2)
    454 			continue;
    455 
    456 		id = pci_conf_read(pc, tag, PCI_ID_REG);
    457 
    458 		/* Invalid vendor ID value? */
    459 		if (PCI_VENDOR(id) == PCI_VENDOR_INVALID)
    460 			continue;
    461 		/* XXX Not invalid, but we've done this ~forever. */
    462 		if (PCI_VENDOR(id) == 0)
    463 			continue;
    464 
    465 		qd = pci_lookup_quirkdata(PCI_VENDOR(id), PCI_PRODUCT(id));
    466 
    467 		if (qd != NULL &&
    468 		      (qd->quirks & PCI_QUIRK_MULTIFUNCTION) != 0)
    469 			nfunctions = 8;
    470 		else if (qd != NULL &&
    471 		      (qd->quirks & PCI_QUIRK_MONOFUNCTION) != 0)
    472 			nfunctions = 1;
    473 		else
    474 			nfunctions = PCI_HDRTYPE_MULTIFN(bhlcr) ? 8 : 1;
    475 
    476 		for (function = 0; function < nfunctions; function++) {
    477 			if (qd != NULL &&
    478 			    (qd->quirks & PCI_QUIRK_SKIP_FUNC(function)) != 0)
    479 				continue;
    480 			tag = pci_make_tag(pc, sc->sc_bus, device, function);
    481 			ret = pci_probe_device(sc, tag, match, pap);
    482 			if (match != NULL && ret != 0)
    483 				return (ret);
    484 		}
    485 	}
    486 	return (0);
    487 }
    488 #endif /* PCI_MACHDEP_ENUMERATE_BUS */
    489 
    490 /*
    491  * Power Management Capability (Rev 2.2)
    492  */
    493 
    494 int
    495 pci_powerstate(pci_chipset_tag_t pc, pcitag_t tag, const int *newstate,
    496     int *oldstate)
    497 {
    498 	int offset;
    499 	pcireg_t value, cap, now;
    500 
    501 	if (!pci_get_capability(pc, tag, PCI_CAP_PWRMGMT, &offset, &value))
    502 		return EOPNOTSUPP;
    503 
    504 	cap = value >> 16;
    505 	value = pci_conf_read(pc, tag, offset + PCI_PMCSR);
    506 	now = value & PCI_PMCSR_STATE_MASK;
    507 	value &= ~PCI_PMCSR_STATE_MASK;
    508 	if (oldstate) {
    509 		switch (now) {
    510 		case PCI_PMCSR_STATE_D0:
    511 			*oldstate = PCI_PWR_D0;
    512 			break;
    513 		case PCI_PMCSR_STATE_D1:
    514 			*oldstate = PCI_PWR_D1;
    515 			break;
    516 		case PCI_PMCSR_STATE_D2:
    517 			*oldstate = PCI_PWR_D2;
    518 			break;
    519 		case PCI_PMCSR_STATE_D3:
    520 			*oldstate = PCI_PWR_D3;
    521 			break;
    522 		default:
    523 			return EINVAL;
    524 		}
    525 	}
    526 	if (newstate == NULL)
    527 		return 0;
    528 	switch (*newstate) {
    529 	case PCI_PWR_D0:
    530 		if (now == PCI_PMCSR_STATE_D0)
    531 			return 0;
    532 		value |= PCI_PMCSR_STATE_D0;
    533 		break;
    534 	case PCI_PWR_D1:
    535 		if (now == PCI_PMCSR_STATE_D1)
    536 			return 0;
    537 		if (now == PCI_PMCSR_STATE_D2 || now == PCI_PMCSR_STATE_D3)
    538 			return EINVAL;
    539 		if (!(cap & PCI_PMCR_D1SUPP))
    540 			return EOPNOTSUPP;
    541 		value |= PCI_PMCSR_STATE_D1;
    542 		break;
    543 	case PCI_PWR_D2:
    544 		if (now == PCI_PMCSR_STATE_D2)
    545 			return 0;
    546 		if (now == PCI_PMCSR_STATE_D3)
    547 			return EINVAL;
    548 		if (!(cap & PCI_PMCR_D2SUPP))
    549 			return EOPNOTSUPP;
    550 		value |= PCI_PMCSR_STATE_D2;
    551 		break;
    552 	case PCI_PWR_D3:
    553 		if (now == PCI_PMCSR_STATE_D3)
    554 			return 0;
    555 		value |= PCI_PMCSR_STATE_D3;
    556 		break;
    557 	default:
    558 		return EINVAL;
    559 	}
    560 	pci_conf_write(pc, tag, offset + PCI_PMCSR, value);
    561 	DELAY(1000);
    562 
    563 	return 0;
    564 }
    565 
    566 /*
    567  * Vital Product Data (PCI 2.2)
    568  */
    569 
    570 int
    571 pci_vpd_read(pci_chipset_tag_t pc, pcitag_t tag, int offset, int count,
    572     pcireg_t *data)
    573 {
    574 	uint32_t reg;
    575 	int ofs, i, j;
    576 
    577 	KASSERT(data != NULL);
    578 	KASSERT((offset + count) < 0x7fff);
    579 
    580 	if (pci_get_capability(pc, tag, PCI_CAP_VPD, &ofs, &reg) == 0)
    581 		return (1);
    582 
    583 	for (i = 0; i < count; offset += sizeof(*data), i++) {
    584 		reg &= 0x0000ffff;
    585 		reg &= ~PCI_VPD_OPFLAG;
    586 		reg |= PCI_VPD_ADDRESS(offset);
    587 		pci_conf_write(pc, tag, ofs, reg);
    588 
    589 		/*
    590 		 * PCI 2.2 does not specify how long we should poll
    591 		 * for completion nor whether the operation can fail.
    592 		 */
    593 		j = 0;
    594 		do {
    595 			if (j++ == 20)
    596 				return (1);
    597 			delay(4);
    598 			reg = pci_conf_read(pc, tag, ofs);
    599 		} while ((reg & PCI_VPD_OPFLAG) == 0);
    600 		data[i] = pci_conf_read(pc, tag, PCI_VPD_DATAREG(ofs));
    601 	}
    602 
    603 	return (0);
    604 }
    605 
    606 int
    607 pci_vpd_write(pci_chipset_tag_t pc, pcitag_t tag, int offset, int count,
    608     pcireg_t *data)
    609 {
    610 	pcireg_t reg;
    611 	int ofs, i, j;
    612 
    613 	KASSERT(data != NULL);
    614 	KASSERT((offset + count) < 0x7fff);
    615 
    616 	if (pci_get_capability(pc, tag, PCI_CAP_VPD, &ofs, &reg) == 0)
    617 		return (1);
    618 
    619 	for (i = 0; i < count; offset += sizeof(*data), i++) {
    620 		pci_conf_write(pc, tag, PCI_VPD_DATAREG(ofs), data[i]);
    621 
    622 		reg &= 0x0000ffff;
    623 		reg |= PCI_VPD_OPFLAG;
    624 		reg |= PCI_VPD_ADDRESS(offset);
    625 		pci_conf_write(pc, tag, ofs, reg);
    626 
    627 		/*
    628 		 * PCI 2.2 does not specify how long we should poll
    629 		 * for completion nor whether the operation can fail.
    630 		 */
    631 		j = 0;
    632 		do {
    633 			if (j++ == 20)
    634 				return (1);
    635 			delay(1);
    636 			reg = pci_conf_read(pc, tag, ofs);
    637 		} while (reg & PCI_VPD_OPFLAG);
    638 	}
    639 
    640 	return (0);
    641 }
    642 
    643 int
    644 pci_dma64_available(struct pci_attach_args *pa)
    645 {
    646 #ifdef _PCI_HAVE_DMA64
    647 	if (BUS_DMA_TAG_VALID(pa->pa_dmat64) &&
    648 		((uint64_t)physmem << PAGE_SHIFT) > 0xffffffffULL)
    649                         return 1;
    650 #endif
    651         return 0;
    652 }
    653