Home | History | Annotate | Line # | Download | only in pci
pci.c revision 1.81
      1 /*	$NetBSD: pci.c,v 1.81 2003/08/15 07:17:21 itojun Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1995, 1996, 1997, 1998
      5  *     Christopher G. Demetriou.  All rights reserved.
      6  * Copyright (c) 1994 Charles M. Hannum.  All rights reserved.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  * 3. All advertising materials mentioning features or use of this software
     17  *    must display the following acknowledgement:
     18  *	This product includes software developed by Charles M. Hannum.
     19  * 4. The name of the author may not be used to endorse or promote products
     20  *    derived from this software without specific prior written permission.
     21  *
     22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     23  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     24  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     25  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     26  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     27  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     28  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     29  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     31  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     32  */
     33 
     34 /*
     35  * PCI bus autoconfiguration.
     36  */
     37 
     38 #include <sys/cdefs.h>
     39 __KERNEL_RCSID(0, "$NetBSD: pci.c,v 1.81 2003/08/15 07:17:21 itojun Exp $");
     40 
     41 #include "opt_pci.h"
     42 
     43 #include <sys/param.h>
     44 #include <sys/systm.h>
     45 #include <sys/device.h>
     46 
     47 #include <dev/pci/pcireg.h>
     48 #include <dev/pci/pcivar.h>
     49 #include <dev/pci/pcidevs.h>
     50 
     51 #include <uvm/uvm_extern.h>
     52 
     53 #include "locators.h"
     54 
     55 #ifdef PCI_CONFIG_DUMP
     56 int pci_config_dump = 1;
     57 #else
     58 int pci_config_dump = 0;
     59 #endif
     60 
     61 int pcimatch __P((struct device *, struct cfdata *, void *));
     62 void pciattach __P((struct device *, struct device *, void *));
     63 
     64 CFATTACH_DECL(pci, sizeof(struct pci_softc),
     65     pcimatch, pciattach, NULL, NULL);
     66 
     67 int	pciprint __P((void *, const char *));
     68 int	pcisubmatch __P((struct device *, struct cfdata *, void *));
     69 
     70 /*
     71  * Important note about PCI-ISA bridges:
     72  *
     73  * Callbacks are used to configure these devices so that ISA/EISA bridges
     74  * can attach their child busses after PCI configuration is done.
     75  *
     76  * This works because:
     77  *	(1) there can be at most one ISA/EISA bridge per PCI bus, and
     78  *	(2) any ISA/EISA bridges must be attached to primary PCI
     79  *	    busses (i.e. bus zero).
     80  *
     81  * That boils down to: there can only be one of these outstanding
     82  * at a time, it is cleared when configuring PCI bus 0 before any
     83  * subdevices have been found, and it is run after all subdevices
     84  * of PCI bus 0 have been found.
     85  *
     86  * This is needed because there are some (legacy) PCI devices which
     87  * can show up as ISA/EISA devices as well (the prime example of which
     88  * are VGA controllers).  If you attach ISA from a PCI-ISA/EISA bridge,
     89  * and the bridge is seen before the video board is, the board can show
     90  * up as an ISA device, and that can (bogusly) complicate the PCI device's
     91  * attach code, or make the PCI device not be properly attached at all.
     92  *
     93  * We use the generic config_defer() facility to achieve this.
     94  */
     95 
     96 int
     97 pcimatch(parent, cf, aux)
     98 	struct device *parent;
     99 	struct cfdata *cf;
    100 	void *aux;
    101 {
    102 	struct pcibus_attach_args *pba = aux;
    103 
    104 	if (strcmp(pba->pba_busname, cf->cf_name))
    105 		return (0);
    106 
    107 	/* Check the locators */
    108 	if (cf->pcibuscf_bus != PCIBUS_UNK_BUS &&
    109 	    cf->pcibuscf_bus != pba->pba_bus)
    110 		return (0);
    111 
    112 	/* sanity */
    113 	if (pba->pba_bus < 0 || pba->pba_bus > 255)
    114 		return (0);
    115 
    116 	/*
    117 	 * XXX check other (hardware?) indicators
    118 	 */
    119 
    120 	return (1);
    121 }
    122 
    123 void
    124 pciattach(parent, self, aux)
    125 	struct device *parent, *self;
    126 	void *aux;
    127 {
    128 	struct pcibus_attach_args *pba = aux;
    129 	struct pci_softc *sc = (struct pci_softc *)self;
    130 	int io_enabled, mem_enabled, mrl_enabled, mrm_enabled, mwi_enabled;
    131 	const char *sep = "";
    132 
    133 	pci_attach_hook(parent, self, pba);
    134 
    135 	aprint_naive("\n");
    136 	aprint_normal("\n");
    137 
    138 	io_enabled = (pba->pba_flags & PCI_FLAGS_IO_ENABLED);
    139 	mem_enabled = (pba->pba_flags & PCI_FLAGS_MEM_ENABLED);
    140 	mrl_enabled = (pba->pba_flags & PCI_FLAGS_MRL_OKAY);
    141 	mrm_enabled = (pba->pba_flags & PCI_FLAGS_MRM_OKAY);
    142 	mwi_enabled = (pba->pba_flags & PCI_FLAGS_MWI_OKAY);
    143 
    144 	if (io_enabled == 0 && mem_enabled == 0) {
    145 		aprint_error("%s: no spaces enabled!\n", self->dv_xname);
    146 		return;
    147 	}
    148 
    149 #define	PRINT(str)							\
    150 do {									\
    151 	aprint_normal("%s%s", sep, str);				\
    152 	sep = ", ";							\
    153 } while (/*CONSTCOND*/0)
    154 
    155 	aprint_normal("%s: ", self->dv_xname);
    156 
    157 	if (io_enabled)
    158 		PRINT("i/o space");
    159 	if (mem_enabled)
    160 		PRINT("memory space");
    161 	aprint_normal(" enabled");
    162 
    163 	if (mrl_enabled || mrm_enabled || mwi_enabled) {
    164 		if (mrl_enabled)
    165 			PRINT("rd/line");
    166 		if (mrm_enabled)
    167 			PRINT("rd/mult");
    168 		if (mwi_enabled)
    169 			PRINT("wr/inv");
    170 		aprint_normal(" ok");
    171 	}
    172 
    173 	aprint_normal("\n");
    174 
    175 #undef PRINT
    176 
    177 	sc->sc_iot = pba->pba_iot;
    178 	sc->sc_memt = pba->pba_memt;
    179 	sc->sc_dmat = pba->pba_dmat;
    180 	sc->sc_dmat64 = pba->pba_dmat64;
    181 	sc->sc_pc = pba->pba_pc;
    182 	sc->sc_bus = pba->pba_bus;
    183 	sc->sc_bridgetag = pba->pba_bridgetag;
    184 	sc->sc_maxndevs = pci_bus_maxdevs(pba->pba_pc, pba->pba_bus);
    185 	sc->sc_intrswiz = pba->pba_intrswiz;
    186 	sc->sc_intrtag = pba->pba_intrtag;
    187 	sc->sc_flags = pba->pba_flags;
    188 	pci_enumerate_bus(sc, NULL, NULL);
    189 }
    190 
    191 int
    192 pciprint(aux, pnp)
    193 	void *aux;
    194 	const char *pnp;
    195 {
    196 	struct pci_attach_args *pa = aux;
    197 	char devinfo[256];
    198 	const struct pci_quirkdata *qd;
    199 
    200 	if (pnp) {
    201 		pci_devinfo(pa->pa_id, pa->pa_class, 1, devinfo);
    202 		aprint_normal("%s at %s", devinfo, pnp);
    203 	}
    204 	aprint_normal(" dev %d function %d", pa->pa_device, pa->pa_function);
    205 	if (pci_config_dump) {
    206 		printf(": ");
    207 		pci_conf_print(pa->pa_pc, pa->pa_tag, NULL);
    208 		if (!pnp)
    209 			pci_devinfo(pa->pa_id, pa->pa_class, 1, devinfo);
    210 		printf("%s at %s", devinfo, pnp ? pnp : "?");
    211 		printf(" dev %d function %d (", pa->pa_device, pa->pa_function);
    212 #ifdef __i386__
    213 		printf("tag %#lx, intrtag %#lx, intrswiz %#lx, intrpin %#lx",
    214 		    *(long *)&pa->pa_tag, *(long *)&pa->pa_intrtag,
    215 		    (long)pa->pa_intrswiz, (long)pa->pa_intrpin);
    216 #else
    217 		printf("intrswiz %#lx, intrpin %#lx",
    218 		    (long)pa->pa_intrswiz, (long)pa->pa_intrpin);
    219 #endif
    220 		printf(", i/o %s, mem %s,",
    221 		    pa->pa_flags & PCI_FLAGS_IO_ENABLED ? "on" : "off",
    222 		    pa->pa_flags & PCI_FLAGS_MEM_ENABLED ? "on" : "off");
    223 		qd = pci_lookup_quirkdata(PCI_VENDOR(pa->pa_id),
    224 		    PCI_PRODUCT(pa->pa_id));
    225 		if (qd == NULL) {
    226 			printf(" no quirks");
    227 		} else {
    228 			bitmask_snprintf(qd->quirks,
    229 			    "\20\1multifn", devinfo, sizeof (devinfo));
    230 			printf(" quirks %s", devinfo);
    231 		}
    232 		printf(")");
    233 	}
    234 	return (UNCONF);
    235 }
    236 
    237 int
    238 pcisubmatch(parent, cf, aux)
    239 	struct device *parent;
    240 	struct cfdata *cf;
    241 	void *aux;
    242 {
    243 	struct pci_attach_args *pa = aux;
    244 
    245 	if (cf->pcicf_dev != PCI_UNK_DEV &&
    246 	    cf->pcicf_dev != pa->pa_device)
    247 		return (0);
    248 	if (cf->pcicf_function != PCI_UNK_FUNCTION &&
    249 	    cf->pcicf_function != pa->pa_function)
    250 		return (0);
    251 	return (config_match(parent, cf, aux));
    252 }
    253 
    254 int
    255 pci_probe_device(struct pci_softc *sc, pcitag_t tag,
    256     int (*match)(struct pci_attach_args *), struct pci_attach_args *pap)
    257 {
    258 	pci_chipset_tag_t pc = sc->sc_pc;
    259 	struct pci_attach_args pa;
    260 	pcireg_t id, csr, class, intr, bhlcr;
    261 	int ret, pin, bus, device, function;
    262 
    263 	pci_decompose_tag(pc, tag, &bus, &device, &function);
    264 
    265 	bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG);
    266 	if (PCI_HDRTYPE_TYPE(bhlcr) > 2)
    267 		return (0);
    268 
    269 	id = pci_conf_read(pc, tag, PCI_ID_REG);
    270 	csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
    271 	class = pci_conf_read(pc, tag, PCI_CLASS_REG);
    272 
    273 	/* Invalid vendor ID value? */
    274 	if (PCI_VENDOR(id) == PCI_VENDOR_INVALID)
    275 		return (0);
    276 	/* XXX Not invalid, but we've done this ~forever. */
    277 	if (PCI_VENDOR(id) == 0)
    278 		return (0);
    279 
    280 	pa.pa_iot = sc->sc_iot;
    281 	pa.pa_memt = sc->sc_memt;
    282 	pa.pa_dmat = sc->sc_dmat;
    283 	pa.pa_dmat64 = sc->sc_dmat64;
    284 	pa.pa_pc = pc;
    285 	pa.pa_bus = bus;
    286 	pa.pa_device = device;
    287 	pa.pa_function = function;
    288 	pa.pa_tag = tag;
    289 	pa.pa_id = id;
    290 	pa.pa_class = class;
    291 
    292 	/*
    293 	 * Set up memory, I/O enable, and PCI command flags
    294 	 * as appropriate.
    295 	 */
    296 	pa.pa_flags = sc->sc_flags;
    297 	if ((csr & PCI_COMMAND_IO_ENABLE) == 0)
    298 		pa.pa_flags &= ~PCI_FLAGS_IO_ENABLED;
    299 	if ((csr & PCI_COMMAND_MEM_ENABLE) == 0)
    300 		pa.pa_flags &= ~PCI_FLAGS_MEM_ENABLED;
    301 
    302 	/*
    303 	 * If the cache line size is not configured, then
    304 	 * clear the MRL/MRM/MWI command-ok flags.
    305 	 */
    306 	if (PCI_CACHELINE(bhlcr) == 0)
    307 		pa.pa_flags &= ~(PCI_FLAGS_MRL_OKAY|
    308 		    PCI_FLAGS_MRM_OKAY|PCI_FLAGS_MWI_OKAY);
    309 
    310 	if (sc->sc_bridgetag == NULL) {
    311 		pa.pa_intrswiz = 0;
    312 		pa.pa_intrtag = tag;
    313 	} else {
    314 		pa.pa_intrswiz = sc->sc_intrswiz + device;
    315 		pa.pa_intrtag = sc->sc_intrtag;
    316 	}
    317 
    318 	intr = pci_conf_read(pc, tag, PCI_INTERRUPT_REG);
    319 
    320 	pin = PCI_INTERRUPT_PIN(intr);
    321 	pa.pa_rawintrpin = pin;
    322 	if (pin == PCI_INTERRUPT_PIN_NONE) {
    323 		/* no interrupt */
    324 		pa.pa_intrpin = 0;
    325 	} else {
    326 		/*
    327 		 * swizzle it based on the number of busses we're
    328 		 * behind and our device number.
    329 		 */
    330 		pa.pa_intrpin = 	/* XXX */
    331 		    ((pin + pa.pa_intrswiz - 1) % 4) + 1;
    332 	}
    333 	pa.pa_intrline = PCI_INTERRUPT_LINE(intr);
    334 
    335 	if (match != NULL) {
    336 		ret = (*match)(&pa);
    337 		if (ret != 0 && pap != NULL)
    338 			*pap = pa;
    339 	} else {
    340 		ret = config_found_sm(&sc->sc_dev, &pa, pciprint,
    341 		    pcisubmatch) != NULL;
    342 	}
    343 
    344 	return (ret);
    345 }
    346 
    347 int
    348 pci_get_capability(pc, tag, capid, offset, value)
    349 	pci_chipset_tag_t pc;
    350 	pcitag_t tag;
    351 	int capid;
    352 	int *offset;
    353 	pcireg_t *value;
    354 {
    355 	pcireg_t reg;
    356 	unsigned int ofs;
    357 
    358 	reg = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
    359 	if (!(reg & PCI_STATUS_CAPLIST_SUPPORT))
    360 		return (0);
    361 
    362 	/* Determine the Capability List Pointer register to start with. */
    363 	reg = pci_conf_read(pc, tag, PCI_BHLC_REG);
    364 	switch (PCI_HDRTYPE_TYPE(reg)) {
    365 	case 0:	/* standard device header */
    366 		ofs = PCI_CAPLISTPTR_REG;
    367 		break;
    368 	case 2:	/* PCI-CardBus Bridge header */
    369 		ofs = PCI_CARDBUS_CAPLISTPTR_REG;
    370 		break;
    371 	default:
    372 		return (0);
    373 	}
    374 
    375 	ofs = PCI_CAPLIST_PTR(pci_conf_read(pc, tag, ofs));
    376 	while (ofs != 0) {
    377 #ifdef DIAGNOSTIC
    378 		if ((ofs & 3) || (ofs < 0x40))
    379 			panic("pci_get_capability");
    380 #endif
    381 		reg = pci_conf_read(pc, tag, ofs);
    382 		if (PCI_CAPLIST_CAP(reg) == capid) {
    383 			if (offset)
    384 				*offset = ofs;
    385 			if (value)
    386 				*value = reg;
    387 			return (1);
    388 		}
    389 		ofs = PCI_CAPLIST_NEXT(reg);
    390 	}
    391 
    392 	return (0);
    393 }
    394 
    395 int
    396 pci_find_device(struct pci_attach_args *pa,
    397 		int (*match)(struct pci_attach_args *))
    398 {
    399 	extern struct cfdriver pci_cd;
    400 	struct device *pcidev;
    401 	int i;
    402 
    403 	for (i = 0; i < pci_cd.cd_ndevs; i++) {
    404 		pcidev = pci_cd.cd_devs[i];
    405 		if (pcidev != NULL &&
    406 		    pci_enumerate_bus((struct pci_softc *) pcidev,
    407 		    		      match, pa) != 0)
    408 			return (1);
    409 	}
    410 	return (0);
    411 }
    412 
    413 /*
    414  * Generic PCI bus enumeration routine.  Used unless machine-dependent
    415  * code needs to provide something else.
    416  */
    417 int
    418 pci_enumerate_bus_generic(struct pci_softc *sc,
    419     int (*match)(struct pci_attach_args *), struct pci_attach_args *pap)
    420 {
    421 	pci_chipset_tag_t pc = sc->sc_pc;
    422 	int device, function, nfunctions, ret;
    423 	const struct pci_quirkdata *qd;
    424 	pcireg_t id, bhlcr;
    425 	pcitag_t tag;
    426 #ifdef __PCI_BUS_DEVORDER
    427 	char devs[32];
    428 	int i;
    429 #endif
    430 
    431 #ifdef __PCI_BUS_DEVORDER
    432 	pci_bus_devorder(sc->sc_pc, sc->sc_bus, devs);
    433 	for (i = 0; (device = devs[i]) < 32 && device >= 0; i++)
    434 #else
    435 	for (device = 0; device < sc->sc_maxndevs; device++)
    436 #endif
    437 	{
    438 		tag = pci_make_tag(pc, sc->sc_bus, device, 0);
    439 
    440 		bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG);
    441 		if (PCI_HDRTYPE_TYPE(bhlcr) > 2)
    442 			continue;
    443 
    444 		id = pci_conf_read(pc, tag, PCI_ID_REG);
    445 
    446 		/* Invalid vendor ID value? */
    447 		if (PCI_VENDOR(id) == PCI_VENDOR_INVALID)
    448 			continue;
    449 		/* XXX Not invalid, but we've done this ~forever. */
    450 		if (PCI_VENDOR(id) == 0)
    451 			continue;
    452 
    453 		qd = pci_lookup_quirkdata(PCI_VENDOR(id), PCI_PRODUCT(id));
    454 
    455 		if (qd != NULL &&
    456 		      (qd->quirks & PCI_QUIRK_MULTIFUNCTION) != 0)
    457 			nfunctions = 8;
    458 		else if (qd != NULL &&
    459 		      (qd->quirks & PCI_QUIRK_MONOFUNCTION) != 0)
    460 			nfunctions = 1;
    461 		else
    462 			nfunctions = PCI_HDRTYPE_MULTIFN(bhlcr) ? 8 : 1;
    463 
    464 		for (function = 0; function < nfunctions; function++) {
    465 			if (qd != NULL &&
    466 			    (qd->quirks & PCI_QUIRK_SKIP_FUNC(function)) != 0)
    467 				continue;
    468 			tag = pci_make_tag(pc, sc->sc_bus, device, function);
    469 			ret = pci_probe_device(sc, tag, match, pap);
    470 			if (match != NULL && ret != 0)
    471 				return (ret);
    472 		}
    473 	}
    474 	return (0);
    475 }
    476 
    477 /*
    478  * Power Management Capability (Rev 2.2)
    479  */
    480 
    481 int
    482 pci_set_powerstate(pci_chipset_tag_t pc, pcitag_t tag, int newstate)
    483 {
    484 	int offset;
    485 	pcireg_t value, cap, now;
    486 
    487 	if (!pci_get_capability(pc, tag, PCI_CAP_PWRMGMT, &offset, &value))
    488 		return (EOPNOTSUPP);
    489 
    490 	cap = value >> 16;
    491 	value = pci_conf_read(pc, tag, offset + PCI_PMCSR);
    492 	now    = value & PCI_PMCSR_STATE_MASK;
    493 	value &= ~PCI_PMCSR_STATE_MASK;
    494 	switch (newstate) {
    495 	case PCI_PWR_D0:
    496 		if (now == PCI_PMCSR_STATE_D0)
    497 			return (0);
    498 		value |= PCI_PMCSR_STATE_D0;
    499 		break;
    500 	case PCI_PWR_D1:
    501 		if (now == PCI_PMCSR_STATE_D1)
    502 			return (0);
    503 		if (now == PCI_PMCSR_STATE_D2 || now == PCI_PMCSR_STATE_D3)
    504 			return (EINVAL);
    505 		if (!(cap & PCI_PMCR_D1SUPP))
    506 			return (EOPNOTSUPP);
    507 		value |= PCI_PMCSR_STATE_D1;
    508 		break;
    509 	case PCI_PWR_D2:
    510 		if (now == PCI_PMCSR_STATE_D2)
    511 			return (0);
    512 		if (now == PCI_PMCSR_STATE_D3)
    513 			return (EINVAL);
    514 		if (!(cap & PCI_PMCR_D2SUPP))
    515 			return (EOPNOTSUPP);
    516 		value |= PCI_PMCSR_STATE_D2;
    517 		break;
    518 	case PCI_PWR_D3:
    519 		if (now == PCI_PMCSR_STATE_D3)
    520 			return (0);
    521 		value |= PCI_PMCSR_STATE_D3;
    522 		break;
    523 	default:
    524 		return (EINVAL);
    525 	}
    526 	pci_conf_write(pc, tag, offset + PCI_PMCSR, value);
    527 	DELAY(1000);
    528 
    529 	return (0);
    530 }
    531 
    532 int
    533 pci_get_powerstate(pci_chipset_tag_t pc, pcitag_t tag)
    534 {
    535 	int offset;
    536 	pcireg_t value;
    537 
    538 	if (!pci_get_capability(pc, tag, PCI_CAP_PWRMGMT, &offset, &value))
    539 		return (PCI_PWR_D0);
    540 	value = pci_conf_read(pc, tag, offset + PCI_PMCSR);
    541 	value &= PCI_PMCSR_STATE_MASK;
    542 	switch (value) {
    543 	case PCI_PMCSR_STATE_D0:
    544 		return (PCI_PWR_D0);
    545 	case PCI_PMCSR_STATE_D1:
    546 		return (PCI_PWR_D1);
    547 	case PCI_PMCSR_STATE_D2:
    548 		return (PCI_PWR_D2);
    549 	case PCI_PMCSR_STATE_D3:
    550 		return (PCI_PWR_D3);
    551 	}
    552 
    553 	return (PCI_PWR_D0);
    554 }
    555 
    556 /*
    557  * Vital Product Data (PCI 2.2)
    558  */
    559 
    560 int
    561 pci_vpd_read(pci_chipset_tag_t pc, pcitag_t tag, int offset, int count,
    562     pcireg_t *data)
    563 {
    564 	uint32_t reg;
    565 	int ofs, i, j;
    566 
    567 	KASSERT(data != NULL);
    568 	KASSERT((offset + count) < 0x7fff);
    569 
    570 	if (pci_get_capability(pc, tag, PCI_CAP_VPD, &ofs, &reg) == 0)
    571 		return (1);
    572 
    573 	for (i = 0; i < count; offset += sizeof(*data), i++) {
    574 		reg &= 0x0000ffff;
    575 		reg &= ~PCI_VPD_OPFLAG;
    576 		reg |= PCI_VPD_ADDRESS(offset);
    577 		pci_conf_write(pc, tag, ofs, reg);
    578 
    579 		/*
    580 		 * PCI 2.2 does not specify how long we should poll
    581 		 * for completion nor whether the operation can fail.
    582 		 */
    583 		j = 0;
    584 		do {
    585 			if (j++ == 20)
    586 				return (1);
    587 			delay(4);
    588 			reg = pci_conf_read(pc, tag, ofs);
    589 		} while ((reg & PCI_VPD_OPFLAG) == 0);
    590 		data[i] = pci_conf_read(pc, tag, PCI_VPD_DATAREG(ofs));
    591 	}
    592 
    593 	return (0);
    594 }
    595 
    596 int
    597 pci_vpd_write(pci_chipset_tag_t pc, pcitag_t tag, int offset, int count,
    598     pcireg_t *data)
    599 {
    600 	pcireg_t reg;
    601 	int ofs, i, j;
    602 
    603 	KASSERT(data != NULL);
    604 	KASSERT((offset + count) < 0x7fff);
    605 
    606 	if (pci_get_capability(pc, tag, PCI_CAP_VPD, &ofs, &reg) == 0)
    607 		return (1);
    608 
    609 	for (i = 0; i < count; offset += sizeof(*data), i++) {
    610 		pci_conf_write(pc, tag, PCI_VPD_DATAREG(ofs), data[i]);
    611 
    612 		reg &= 0x0000ffff;
    613 		reg |= PCI_VPD_OPFLAG;
    614 		reg |= PCI_VPD_ADDRESS(offset);
    615 		pci_conf_write(pc, tag, ofs, reg);
    616 
    617 		/*
    618 		 * PCI 2.2 does not specify how long we should poll
    619 		 * for completion nor whether the operation can fail.
    620 		 */
    621 		j = 0;
    622 		do {
    623 			if (j++ == 20)
    624 				return (1);
    625 			delay(1);
    626 			reg = pci_conf_read(pc, tag, ofs);
    627 		} while (reg & PCI_VPD_OPFLAG);
    628 	}
    629 
    630 	return (0);
    631 }
    632 
    633 int
    634 pci_dma64_available(struct pci_attach_args *pa)
    635 {
    636 #ifdef _PCI_HAVE_DMA64
    637 	if (BUS_DMA_TAG_VALID(pa->pa_dmat64) &&
    638 		((uint64_t)physmem << PAGE_SHIFT) > 0xffffffffULL)
    639                         return 1;
    640 #endif
    641         return 0;
    642 }
    643