Home | History | Annotate | Line # | Download | only in pci
pci.c revision 1.83
      1 /*	$NetBSD: pci.c,v 1.83 2004/04/23 21:13:07 itojun Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1995, 1996, 1997, 1998
      5  *     Christopher G. Demetriou.  All rights reserved.
      6  * Copyright (c) 1994 Charles M. Hannum.  All rights reserved.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  * 3. All advertising materials mentioning features or use of this software
     17  *    must display the following acknowledgement:
     18  *	This product includes software developed by Charles M. Hannum.
     19  * 4. The name of the author may not be used to endorse or promote products
     20  *    derived from this software without specific prior written permission.
     21  *
     22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     23  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     24  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     25  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     26  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     27  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     28  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     29  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     31  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     32  */
     33 
     34 /*
     35  * PCI bus autoconfiguration.
     36  */
     37 
     38 #include <sys/cdefs.h>
     39 __KERNEL_RCSID(0, "$NetBSD: pci.c,v 1.83 2004/04/23 21:13:07 itojun Exp $");
     40 
     41 #include "opt_pci.h"
     42 
     43 #include <sys/param.h>
     44 #include <sys/systm.h>
     45 #include <sys/device.h>
     46 
     47 #include <dev/pci/pcireg.h>
     48 #include <dev/pci/pcivar.h>
     49 #include <dev/pci/pcidevs.h>
     50 
     51 #include <uvm/uvm_extern.h>
     52 
     53 #include "locators.h"
     54 
     55 #ifdef PCI_CONFIG_DUMP
     56 int pci_config_dump = 1;
     57 #else
     58 int pci_config_dump = 0;
     59 #endif
     60 
     61 int pcimatch __P((struct device *, struct cfdata *, void *));
     62 void pciattach __P((struct device *, struct device *, void *));
     63 
     64 CFATTACH_DECL(pci, sizeof(struct pci_softc),
     65     pcimatch, pciattach, NULL, NULL);
     66 
     67 int	pciprint __P((void *, const char *));
     68 int	pcisubmatch __P((struct device *, struct cfdata *, void *));
     69 
     70 /*
     71  * Important note about PCI-ISA bridges:
     72  *
     73  * Callbacks are used to configure these devices so that ISA/EISA bridges
     74  * can attach their child busses after PCI configuration is done.
     75  *
     76  * This works because:
     77  *	(1) there can be at most one ISA/EISA bridge per PCI bus, and
     78  *	(2) any ISA/EISA bridges must be attached to primary PCI
     79  *	    busses (i.e. bus zero).
     80  *
     81  * That boils down to: there can only be one of these outstanding
     82  * at a time, it is cleared when configuring PCI bus 0 before any
     83  * subdevices have been found, and it is run after all subdevices
     84  * of PCI bus 0 have been found.
     85  *
     86  * This is needed because there are some (legacy) PCI devices which
     87  * can show up as ISA/EISA devices as well (the prime example of which
     88  * are VGA controllers).  If you attach ISA from a PCI-ISA/EISA bridge,
     89  * and the bridge is seen before the video board is, the board can show
     90  * up as an ISA device, and that can (bogusly) complicate the PCI device's
     91  * attach code, or make the PCI device not be properly attached at all.
     92  *
     93  * We use the generic config_defer() facility to achieve this.
     94  */
     95 
     96 int
     97 pcimatch(parent, cf, aux)
     98 	struct device *parent;
     99 	struct cfdata *cf;
    100 	void *aux;
    101 {
    102 	struct pcibus_attach_args *pba = aux;
    103 
    104 	if (strcmp(pba->pba_busname, cf->cf_name))
    105 		return (0);
    106 
    107 	/* Check the locators */
    108 	if (cf->pcibuscf_bus != PCIBUS_UNK_BUS &&
    109 	    cf->pcibuscf_bus != pba->pba_bus)
    110 		return (0);
    111 
    112 	/* sanity */
    113 	if (pba->pba_bus < 0 || pba->pba_bus > 255)
    114 		return (0);
    115 
    116 	/*
    117 	 * XXX check other (hardware?) indicators
    118 	 */
    119 
    120 	return (1);
    121 }
    122 
    123 void
    124 pciattach(parent, self, aux)
    125 	struct device *parent, *self;
    126 	void *aux;
    127 {
    128 	struct pcibus_attach_args *pba = aux;
    129 	struct pci_softc *sc = (struct pci_softc *)self;
    130 	int io_enabled, mem_enabled, mrl_enabled, mrm_enabled, mwi_enabled;
    131 	const char *sep = "";
    132 
    133 	pci_attach_hook(parent, self, pba);
    134 
    135 	aprint_naive("\n");
    136 	aprint_normal("\n");
    137 
    138 	io_enabled = (pba->pba_flags & PCI_FLAGS_IO_ENABLED);
    139 	mem_enabled = (pba->pba_flags & PCI_FLAGS_MEM_ENABLED);
    140 	mrl_enabled = (pba->pba_flags & PCI_FLAGS_MRL_OKAY);
    141 	mrm_enabled = (pba->pba_flags & PCI_FLAGS_MRM_OKAY);
    142 	mwi_enabled = (pba->pba_flags & PCI_FLAGS_MWI_OKAY);
    143 
    144 	if (io_enabled == 0 && mem_enabled == 0) {
    145 		aprint_error("%s: no spaces enabled!\n", self->dv_xname);
    146 		return;
    147 	}
    148 
    149 #define	PRINT(str)							\
    150 do {									\
    151 	aprint_normal("%s%s", sep, str);				\
    152 	sep = ", ";							\
    153 } while (/*CONSTCOND*/0)
    154 
    155 	aprint_normal("%s: ", self->dv_xname);
    156 
    157 	if (io_enabled)
    158 		PRINT("i/o space");
    159 	if (mem_enabled)
    160 		PRINT("memory space");
    161 	aprint_normal(" enabled");
    162 
    163 	if (mrl_enabled || mrm_enabled || mwi_enabled) {
    164 		if (mrl_enabled)
    165 			PRINT("rd/line");
    166 		if (mrm_enabled)
    167 			PRINT("rd/mult");
    168 		if (mwi_enabled)
    169 			PRINT("wr/inv");
    170 		aprint_normal(" ok");
    171 	}
    172 
    173 	aprint_normal("\n");
    174 
    175 #undef PRINT
    176 
    177 	sc->sc_iot = pba->pba_iot;
    178 	sc->sc_memt = pba->pba_memt;
    179 	sc->sc_dmat = pba->pba_dmat;
    180 	sc->sc_dmat64 = pba->pba_dmat64;
    181 	sc->sc_pc = pba->pba_pc;
    182 	sc->sc_bus = pba->pba_bus;
    183 	sc->sc_bridgetag = pba->pba_bridgetag;
    184 	sc->sc_maxndevs = pci_bus_maxdevs(pba->pba_pc, pba->pba_bus);
    185 	sc->sc_intrswiz = pba->pba_intrswiz;
    186 	sc->sc_intrtag = pba->pba_intrtag;
    187 	sc->sc_flags = pba->pba_flags;
    188 	pci_enumerate_bus(sc, NULL, NULL);
    189 }
    190 
    191 int
    192 pciprint(aux, pnp)
    193 	void *aux;
    194 	const char *pnp;
    195 {
    196 	struct pci_attach_args *pa = aux;
    197 	char devinfo[256];
    198 	const struct pci_quirkdata *qd;
    199 
    200 	if (pnp) {
    201 		pci_devinfo(pa->pa_id, pa->pa_class, 1, devinfo, sizeof(devinfo));
    202 		aprint_normal("%s at %s", devinfo, pnp);
    203 	}
    204 	aprint_normal(" dev %d function %d", pa->pa_device, pa->pa_function);
    205 	if (pci_config_dump) {
    206 		printf(": ");
    207 		pci_conf_print(pa->pa_pc, pa->pa_tag, NULL);
    208 		if (!pnp)
    209 			pci_devinfo(pa->pa_id, pa->pa_class, 1, devinfo, sizeof(devinfo));
    210 		printf("%s at %s", devinfo, pnp ? pnp : "?");
    211 		printf(" dev %d function %d (", pa->pa_device, pa->pa_function);
    212 #ifdef __i386__
    213 		printf("tag %#lx, intrtag %#lx, intrswiz %#lx, intrpin %#lx",
    214 		    *(long *)&pa->pa_tag, *(long *)&pa->pa_intrtag,
    215 		    (long)pa->pa_intrswiz, (long)pa->pa_intrpin);
    216 #else
    217 		printf("intrswiz %#lx, intrpin %#lx",
    218 		    (long)pa->pa_intrswiz, (long)pa->pa_intrpin);
    219 #endif
    220 		printf(", i/o %s, mem %s,",
    221 		    pa->pa_flags & PCI_FLAGS_IO_ENABLED ? "on" : "off",
    222 		    pa->pa_flags & PCI_FLAGS_MEM_ENABLED ? "on" : "off");
    223 		qd = pci_lookup_quirkdata(PCI_VENDOR(pa->pa_id),
    224 		    PCI_PRODUCT(pa->pa_id));
    225 		if (qd == NULL) {
    226 			printf(" no quirks");
    227 		} else {
    228 			bitmask_snprintf(qd->quirks,
    229 			    "\002\001multifn\002singlefn\003skipfunc0"
    230 			    "\004skipfunc1\005skipfunc2\006skipfunc3"
    231 			    "\007skipfunc4\010skipfunc5\011skipfunc6"
    232 			    "\012skipfunc8",
    233 			    devinfo, sizeof (devinfo));
    234 			printf(" quirks %s", devinfo);
    235 		}
    236 		printf(")");
    237 	}
    238 	return (UNCONF);
    239 }
    240 
    241 int
    242 pcisubmatch(parent, cf, aux)
    243 	struct device *parent;
    244 	struct cfdata *cf;
    245 	void *aux;
    246 {
    247 	struct pci_attach_args *pa = aux;
    248 
    249 	if (cf->pcicf_dev != PCI_UNK_DEV &&
    250 	    cf->pcicf_dev != pa->pa_device)
    251 		return (0);
    252 	if (cf->pcicf_function != PCI_UNK_FUNCTION &&
    253 	    cf->pcicf_function != pa->pa_function)
    254 		return (0);
    255 	return (config_match(parent, cf, aux));
    256 }
    257 
    258 int
    259 pci_probe_device(struct pci_softc *sc, pcitag_t tag,
    260     int (*match)(struct pci_attach_args *), struct pci_attach_args *pap)
    261 {
    262 	pci_chipset_tag_t pc = sc->sc_pc;
    263 	struct pci_attach_args pa;
    264 	pcireg_t id, csr, class, intr, bhlcr;
    265 	int ret, pin, bus, device, function;
    266 
    267 	pci_decompose_tag(pc, tag, &bus, &device, &function);
    268 
    269 	bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG);
    270 	if (PCI_HDRTYPE_TYPE(bhlcr) > 2)
    271 		return (0);
    272 
    273 	id = pci_conf_read(pc, tag, PCI_ID_REG);
    274 	csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
    275 	class = pci_conf_read(pc, tag, PCI_CLASS_REG);
    276 
    277 	/* Invalid vendor ID value? */
    278 	if (PCI_VENDOR(id) == PCI_VENDOR_INVALID)
    279 		return (0);
    280 	/* XXX Not invalid, but we've done this ~forever. */
    281 	if (PCI_VENDOR(id) == 0)
    282 		return (0);
    283 
    284 	pa.pa_iot = sc->sc_iot;
    285 	pa.pa_memt = sc->sc_memt;
    286 	pa.pa_dmat = sc->sc_dmat;
    287 	pa.pa_dmat64 = sc->sc_dmat64;
    288 	pa.pa_pc = pc;
    289 	pa.pa_bus = bus;
    290 	pa.pa_device = device;
    291 	pa.pa_function = function;
    292 	pa.pa_tag = tag;
    293 	pa.pa_id = id;
    294 	pa.pa_class = class;
    295 
    296 	/*
    297 	 * Set up memory, I/O enable, and PCI command flags
    298 	 * as appropriate.
    299 	 */
    300 	pa.pa_flags = sc->sc_flags;
    301 	if ((csr & PCI_COMMAND_IO_ENABLE) == 0)
    302 		pa.pa_flags &= ~PCI_FLAGS_IO_ENABLED;
    303 	if ((csr & PCI_COMMAND_MEM_ENABLE) == 0)
    304 		pa.pa_flags &= ~PCI_FLAGS_MEM_ENABLED;
    305 
    306 	/*
    307 	 * If the cache line size is not configured, then
    308 	 * clear the MRL/MRM/MWI command-ok flags.
    309 	 */
    310 	if (PCI_CACHELINE(bhlcr) == 0)
    311 		pa.pa_flags &= ~(PCI_FLAGS_MRL_OKAY|
    312 		    PCI_FLAGS_MRM_OKAY|PCI_FLAGS_MWI_OKAY);
    313 
    314 	if (sc->sc_bridgetag == NULL) {
    315 		pa.pa_intrswiz = 0;
    316 		pa.pa_intrtag = tag;
    317 	} else {
    318 		pa.pa_intrswiz = sc->sc_intrswiz + device;
    319 		pa.pa_intrtag = sc->sc_intrtag;
    320 	}
    321 
    322 	intr = pci_conf_read(pc, tag, PCI_INTERRUPT_REG);
    323 
    324 	pin = PCI_INTERRUPT_PIN(intr);
    325 	pa.pa_rawintrpin = pin;
    326 	if (pin == PCI_INTERRUPT_PIN_NONE) {
    327 		/* no interrupt */
    328 		pa.pa_intrpin = 0;
    329 	} else {
    330 		/*
    331 		 * swizzle it based on the number of busses we're
    332 		 * behind and our device number.
    333 		 */
    334 		pa.pa_intrpin = 	/* XXX */
    335 		    ((pin + pa.pa_intrswiz - 1) % 4) + 1;
    336 	}
    337 	pa.pa_intrline = PCI_INTERRUPT_LINE(intr);
    338 
    339 	if (match != NULL) {
    340 		ret = (*match)(&pa);
    341 		if (ret != 0 && pap != NULL)
    342 			*pap = pa;
    343 	} else {
    344 		ret = config_found_sm(&sc->sc_dev, &pa, pciprint,
    345 		    pcisubmatch) != NULL;
    346 	}
    347 
    348 	return (ret);
    349 }
    350 
    351 int
    352 pci_get_capability(pc, tag, capid, offset, value)
    353 	pci_chipset_tag_t pc;
    354 	pcitag_t tag;
    355 	int capid;
    356 	int *offset;
    357 	pcireg_t *value;
    358 {
    359 	pcireg_t reg;
    360 	unsigned int ofs;
    361 
    362 	reg = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
    363 	if (!(reg & PCI_STATUS_CAPLIST_SUPPORT))
    364 		return (0);
    365 
    366 	/* Determine the Capability List Pointer register to start with. */
    367 	reg = pci_conf_read(pc, tag, PCI_BHLC_REG);
    368 	switch (PCI_HDRTYPE_TYPE(reg)) {
    369 	case 0:	/* standard device header */
    370 		ofs = PCI_CAPLISTPTR_REG;
    371 		break;
    372 	case 2:	/* PCI-CardBus Bridge header */
    373 		ofs = PCI_CARDBUS_CAPLISTPTR_REG;
    374 		break;
    375 	default:
    376 		return (0);
    377 	}
    378 
    379 	ofs = PCI_CAPLIST_PTR(pci_conf_read(pc, tag, ofs));
    380 	while (ofs != 0) {
    381 #ifdef DIAGNOSTIC
    382 		if ((ofs & 3) || (ofs < 0x40))
    383 			panic("pci_get_capability");
    384 #endif
    385 		reg = pci_conf_read(pc, tag, ofs);
    386 		if (PCI_CAPLIST_CAP(reg) == capid) {
    387 			if (offset)
    388 				*offset = ofs;
    389 			if (value)
    390 				*value = reg;
    391 			return (1);
    392 		}
    393 		ofs = PCI_CAPLIST_NEXT(reg);
    394 	}
    395 
    396 	return (0);
    397 }
    398 
    399 int
    400 pci_find_device(struct pci_attach_args *pa,
    401 		int (*match)(struct pci_attach_args *))
    402 {
    403 	extern struct cfdriver pci_cd;
    404 	struct device *pcidev;
    405 	int i;
    406 
    407 	for (i = 0; i < pci_cd.cd_ndevs; i++) {
    408 		pcidev = pci_cd.cd_devs[i];
    409 		if (pcidev != NULL &&
    410 		    pci_enumerate_bus((struct pci_softc *) pcidev,
    411 		    		      match, pa) != 0)
    412 			return (1);
    413 	}
    414 	return (0);
    415 }
    416 
    417 /*
    418  * Generic PCI bus enumeration routine.  Used unless machine-dependent
    419  * code needs to provide something else.
    420  */
    421 int
    422 pci_enumerate_bus_generic(struct pci_softc *sc,
    423     int (*match)(struct pci_attach_args *), struct pci_attach_args *pap)
    424 {
    425 	pci_chipset_tag_t pc = sc->sc_pc;
    426 	int device, function, nfunctions, ret;
    427 	const struct pci_quirkdata *qd;
    428 	pcireg_t id, bhlcr;
    429 	pcitag_t tag;
    430 #ifdef __PCI_BUS_DEVORDER
    431 	char devs[32];
    432 	int i;
    433 #endif
    434 
    435 #ifdef __PCI_BUS_DEVORDER
    436 	pci_bus_devorder(sc->sc_pc, sc->sc_bus, devs);
    437 	for (i = 0; (device = devs[i]) < 32 && device >= 0; i++)
    438 #else
    439 	for (device = 0; device < sc->sc_maxndevs; device++)
    440 #endif
    441 	{
    442 		tag = pci_make_tag(pc, sc->sc_bus, device, 0);
    443 
    444 		bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG);
    445 		if (PCI_HDRTYPE_TYPE(bhlcr) > 2)
    446 			continue;
    447 
    448 		id = pci_conf_read(pc, tag, PCI_ID_REG);
    449 
    450 		/* Invalid vendor ID value? */
    451 		if (PCI_VENDOR(id) == PCI_VENDOR_INVALID)
    452 			continue;
    453 		/* XXX Not invalid, but we've done this ~forever. */
    454 		if (PCI_VENDOR(id) == 0)
    455 			continue;
    456 
    457 		qd = pci_lookup_quirkdata(PCI_VENDOR(id), PCI_PRODUCT(id));
    458 
    459 		if (qd != NULL &&
    460 		      (qd->quirks & PCI_QUIRK_MULTIFUNCTION) != 0)
    461 			nfunctions = 8;
    462 		else if (qd != NULL &&
    463 		      (qd->quirks & PCI_QUIRK_MONOFUNCTION) != 0)
    464 			nfunctions = 1;
    465 		else
    466 			nfunctions = PCI_HDRTYPE_MULTIFN(bhlcr) ? 8 : 1;
    467 
    468 		for (function = 0; function < nfunctions; function++) {
    469 			if (qd != NULL &&
    470 			    (qd->quirks & PCI_QUIRK_SKIP_FUNC(function)) != 0)
    471 				continue;
    472 			tag = pci_make_tag(pc, sc->sc_bus, device, function);
    473 			ret = pci_probe_device(sc, tag, match, pap);
    474 			if (match != NULL && ret != 0)
    475 				return (ret);
    476 		}
    477 	}
    478 	return (0);
    479 }
    480 
    481 /*
    482  * Power Management Capability (Rev 2.2)
    483  */
    484 
    485 int
    486 pci_set_powerstate(pci_chipset_tag_t pc, pcitag_t tag, int newstate)
    487 {
    488 	int offset;
    489 	pcireg_t value, cap, now;
    490 
    491 	if (!pci_get_capability(pc, tag, PCI_CAP_PWRMGMT, &offset, &value))
    492 		return (EOPNOTSUPP);
    493 
    494 	cap = value >> 16;
    495 	value = pci_conf_read(pc, tag, offset + PCI_PMCSR);
    496 	now    = value & PCI_PMCSR_STATE_MASK;
    497 	value &= ~PCI_PMCSR_STATE_MASK;
    498 	switch (newstate) {
    499 	case PCI_PWR_D0:
    500 		if (now == PCI_PMCSR_STATE_D0)
    501 			return (0);
    502 		value |= PCI_PMCSR_STATE_D0;
    503 		break;
    504 	case PCI_PWR_D1:
    505 		if (now == PCI_PMCSR_STATE_D1)
    506 			return (0);
    507 		if (now == PCI_PMCSR_STATE_D2 || now == PCI_PMCSR_STATE_D3)
    508 			return (EINVAL);
    509 		if (!(cap & PCI_PMCR_D1SUPP))
    510 			return (EOPNOTSUPP);
    511 		value |= PCI_PMCSR_STATE_D1;
    512 		break;
    513 	case PCI_PWR_D2:
    514 		if (now == PCI_PMCSR_STATE_D2)
    515 			return (0);
    516 		if (now == PCI_PMCSR_STATE_D3)
    517 			return (EINVAL);
    518 		if (!(cap & PCI_PMCR_D2SUPP))
    519 			return (EOPNOTSUPP);
    520 		value |= PCI_PMCSR_STATE_D2;
    521 		break;
    522 	case PCI_PWR_D3:
    523 		if (now == PCI_PMCSR_STATE_D3)
    524 			return (0);
    525 		value |= PCI_PMCSR_STATE_D3;
    526 		break;
    527 	default:
    528 		return (EINVAL);
    529 	}
    530 	pci_conf_write(pc, tag, offset + PCI_PMCSR, value);
    531 	DELAY(1000);
    532 
    533 	return (0);
    534 }
    535 
    536 int
    537 pci_get_powerstate(pci_chipset_tag_t pc, pcitag_t tag)
    538 {
    539 	int offset;
    540 	pcireg_t value;
    541 
    542 	if (!pci_get_capability(pc, tag, PCI_CAP_PWRMGMT, &offset, &value))
    543 		return (PCI_PWR_D0);
    544 	value = pci_conf_read(pc, tag, offset + PCI_PMCSR);
    545 	value &= PCI_PMCSR_STATE_MASK;
    546 	switch (value) {
    547 	case PCI_PMCSR_STATE_D0:
    548 		return (PCI_PWR_D0);
    549 	case PCI_PMCSR_STATE_D1:
    550 		return (PCI_PWR_D1);
    551 	case PCI_PMCSR_STATE_D2:
    552 		return (PCI_PWR_D2);
    553 	case PCI_PMCSR_STATE_D3:
    554 		return (PCI_PWR_D3);
    555 	}
    556 
    557 	return (PCI_PWR_D0);
    558 }
    559 
    560 /*
    561  * Vital Product Data (PCI 2.2)
    562  */
    563 
    564 int
    565 pci_vpd_read(pci_chipset_tag_t pc, pcitag_t tag, int offset, int count,
    566     pcireg_t *data)
    567 {
    568 	uint32_t reg;
    569 	int ofs, i, j;
    570 
    571 	KASSERT(data != NULL);
    572 	KASSERT((offset + count) < 0x7fff);
    573 
    574 	if (pci_get_capability(pc, tag, PCI_CAP_VPD, &ofs, &reg) == 0)
    575 		return (1);
    576 
    577 	for (i = 0; i < count; offset += sizeof(*data), i++) {
    578 		reg &= 0x0000ffff;
    579 		reg &= ~PCI_VPD_OPFLAG;
    580 		reg |= PCI_VPD_ADDRESS(offset);
    581 		pci_conf_write(pc, tag, ofs, reg);
    582 
    583 		/*
    584 		 * PCI 2.2 does not specify how long we should poll
    585 		 * for completion nor whether the operation can fail.
    586 		 */
    587 		j = 0;
    588 		do {
    589 			if (j++ == 20)
    590 				return (1);
    591 			delay(4);
    592 			reg = pci_conf_read(pc, tag, ofs);
    593 		} while ((reg & PCI_VPD_OPFLAG) == 0);
    594 		data[i] = pci_conf_read(pc, tag, PCI_VPD_DATAREG(ofs));
    595 	}
    596 
    597 	return (0);
    598 }
    599 
    600 int
    601 pci_vpd_write(pci_chipset_tag_t pc, pcitag_t tag, int offset, int count,
    602     pcireg_t *data)
    603 {
    604 	pcireg_t reg;
    605 	int ofs, i, j;
    606 
    607 	KASSERT(data != NULL);
    608 	KASSERT((offset + count) < 0x7fff);
    609 
    610 	if (pci_get_capability(pc, tag, PCI_CAP_VPD, &ofs, &reg) == 0)
    611 		return (1);
    612 
    613 	for (i = 0; i < count; offset += sizeof(*data), i++) {
    614 		pci_conf_write(pc, tag, PCI_VPD_DATAREG(ofs), data[i]);
    615 
    616 		reg &= 0x0000ffff;
    617 		reg |= PCI_VPD_OPFLAG;
    618 		reg |= PCI_VPD_ADDRESS(offset);
    619 		pci_conf_write(pc, tag, ofs, reg);
    620 
    621 		/*
    622 		 * PCI 2.2 does not specify how long we should poll
    623 		 * for completion nor whether the operation can fail.
    624 		 */
    625 		j = 0;
    626 		do {
    627 			if (j++ == 20)
    628 				return (1);
    629 			delay(1);
    630 			reg = pci_conf_read(pc, tag, ofs);
    631 		} while (reg & PCI_VPD_OPFLAG);
    632 	}
    633 
    634 	return (0);
    635 }
    636 
    637 int
    638 pci_dma64_available(struct pci_attach_args *pa)
    639 {
    640 #ifdef _PCI_HAVE_DMA64
    641 	if (BUS_DMA_TAG_VALID(pa->pa_dmat64) &&
    642 		((uint64_t)physmem << PAGE_SHIFT) > 0xffffffffULL)
    643                         return 1;
    644 #endif
    645         return 0;
    646 }
    647