pci.c revision 1.91 1 /* $NetBSD: pci.c,v 1.91 2005/02/04 02:10:45 perry Exp $ */
2
3 /*
4 * Copyright (c) 1995, 1996, 1997, 1998
5 * Christopher G. Demetriou. All rights reserved.
6 * Copyright (c) 1994 Charles M. Hannum. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Charles M. Hannum.
19 * 4. The name of the author may not be used to endorse or promote products
20 * derived from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 /*
35 * PCI bus autoconfiguration.
36 */
37
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: pci.c,v 1.91 2005/02/04 02:10:45 perry Exp $");
40
41 #include "opt_pci.h"
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/device.h>
46
47 #include <dev/pci/pcireg.h>
48 #include <dev/pci/pcivar.h>
49 #include <dev/pci/pcidevs.h>
50
51 #include <uvm/uvm_extern.h>
52
53 #include "locators.h"
54
55 #ifdef PCI_CONFIG_DUMP
56 int pci_config_dump = 1;
57 #else
58 int pci_config_dump = 0;
59 #endif
60
61 int pcimatch(struct device *, struct cfdata *, void *);
62 void pciattach(struct device *, struct device *, void *);
63 int pcirescan(struct device *, const char *, const int *);
64 void pcidevdetached(struct device *, struct device *);
65
66 CFATTACH_DECL2(pci, sizeof(struct pci_softc),
67 pcimatch, pciattach, NULL, NULL, pcirescan, pcidevdetached);
68
69 int pciprint(void *, const char *);
70 int pcisubmatch(struct device *, struct cfdata *,
71 const locdesc_t *, void *);
72
73 #ifdef PCI_MACHDEP_ENUMERATE_BUS
74 #define pci_enumerate_bus PCI_MACHDEP_ENUMERATE_BUS
75 #else
76 int pci_enumerate_bus(struct pci_softc *, const int *,
77 int (*)(struct pci_attach_args *), struct pci_attach_args *);
78 #endif
79
80 /*
81 * Important note about PCI-ISA bridges:
82 *
83 * Callbacks are used to configure these devices so that ISA/EISA bridges
84 * can attach their child busses after PCI configuration is done.
85 *
86 * This works because:
87 * (1) there can be at most one ISA/EISA bridge per PCI bus, and
88 * (2) any ISA/EISA bridges must be attached to primary PCI
89 * busses (i.e. bus zero).
90 *
91 * That boils down to: there can only be one of these outstanding
92 * at a time, it is cleared when configuring PCI bus 0 before any
93 * subdevices have been found, and it is run after all subdevices
94 * of PCI bus 0 have been found.
95 *
96 * This is needed because there are some (legacy) PCI devices which
97 * can show up as ISA/EISA devices as well (the prime example of which
98 * are VGA controllers). If you attach ISA from a PCI-ISA/EISA bridge,
99 * and the bridge is seen before the video board is, the board can show
100 * up as an ISA device, and that can (bogusly) complicate the PCI device's
101 * attach code, or make the PCI device not be properly attached at all.
102 *
103 * We use the generic config_defer() facility to achieve this.
104 */
105
106 int
107 pcimatch(parent, cf, aux)
108 struct device *parent;
109 struct cfdata *cf;
110 void *aux;
111 {
112 struct pcibus_attach_args *pba = aux;
113
114 /* Check the locators */
115 if (cf->cf_loc[PCIBUSCF_BUS] != PCIBUSCF_BUS_DEFAULT &&
116 cf->cf_loc[PCIBUSCF_BUS] != pba->pba_bus)
117 return (0);
118
119 /* sanity */
120 if (pba->pba_bus < 0 || pba->pba_bus > 255)
121 return (0);
122
123 /*
124 * XXX check other (hardware?) indicators
125 */
126
127 return (1);
128 }
129
130 void
131 pciattach(parent, self, aux)
132 struct device *parent, *self;
133 void *aux;
134 {
135 struct pcibus_attach_args *pba = aux;
136 struct pci_softc *sc = (struct pci_softc *)self;
137 int io_enabled, mem_enabled, mrl_enabled, mrm_enabled, mwi_enabled;
138 const char *sep = "";
139 static const int wildcard[2] = { PCICF_DEV_DEFAULT,
140 PCICF_FUNCTION_DEFAULT };
141
142 pci_attach_hook(parent, self, pba);
143
144 aprint_naive("\n");
145 aprint_normal("\n");
146
147 io_enabled = (pba->pba_flags & PCI_FLAGS_IO_ENABLED);
148 mem_enabled = (pba->pba_flags & PCI_FLAGS_MEM_ENABLED);
149 mrl_enabled = (pba->pba_flags & PCI_FLAGS_MRL_OKAY);
150 mrm_enabled = (pba->pba_flags & PCI_FLAGS_MRM_OKAY);
151 mwi_enabled = (pba->pba_flags & PCI_FLAGS_MWI_OKAY);
152
153 if (io_enabled == 0 && mem_enabled == 0) {
154 aprint_error("%s: no spaces enabled!\n", self->dv_xname);
155 return;
156 }
157
158 #define PRINT(str) \
159 do { \
160 aprint_normal("%s%s", sep, str); \
161 sep = ", "; \
162 } while (/*CONSTCOND*/0)
163
164 aprint_normal("%s: ", self->dv_xname);
165
166 if (io_enabled)
167 PRINT("i/o space");
168 if (mem_enabled)
169 PRINT("memory space");
170 aprint_normal(" enabled");
171
172 if (mrl_enabled || mrm_enabled || mwi_enabled) {
173 if (mrl_enabled)
174 PRINT("rd/line");
175 if (mrm_enabled)
176 PRINT("rd/mult");
177 if (mwi_enabled)
178 PRINT("wr/inv");
179 aprint_normal(" ok");
180 }
181
182 aprint_normal("\n");
183
184 #undef PRINT
185
186 sc->sc_iot = pba->pba_iot;
187 sc->sc_memt = pba->pba_memt;
188 sc->sc_dmat = pba->pba_dmat;
189 sc->sc_dmat64 = pba->pba_dmat64;
190 sc->sc_pc = pba->pba_pc;
191 sc->sc_bus = pba->pba_bus;
192 sc->sc_bridgetag = pba->pba_bridgetag;
193 sc->sc_maxndevs = pci_bus_maxdevs(pba->pba_pc, pba->pba_bus);
194 sc->sc_intrswiz = pba->pba_intrswiz;
195 sc->sc_intrtag = pba->pba_intrtag;
196 sc->sc_flags = pba->pba_flags;
197 pcirescan(&sc->sc_dev, "pci", wildcard);
198 }
199
200 int
201 pcirescan(struct device *sc, const char *ifattr, const int *locators)
202 {
203
204 KASSERT(ifattr && !strcmp(ifattr, "pci"));
205 KASSERT(locators);
206
207 pci_enumerate_bus((struct pci_softc *)sc, locators, NULL, NULL);
208 return (0);
209 }
210
211 int
212 pciprint(aux, pnp)
213 void *aux;
214 const char *pnp;
215 {
216 struct pci_attach_args *pa = aux;
217 char devinfo[256];
218 const struct pci_quirkdata *qd;
219
220 if (pnp) {
221 pci_devinfo(pa->pa_id, pa->pa_class, 1, devinfo, sizeof(devinfo));
222 aprint_normal("%s at %s", devinfo, pnp);
223 }
224 aprint_normal(" dev %d function %d", pa->pa_device, pa->pa_function);
225 if (pci_config_dump) {
226 printf(": ");
227 pci_conf_print(pa->pa_pc, pa->pa_tag, NULL);
228 if (!pnp)
229 pci_devinfo(pa->pa_id, pa->pa_class, 1, devinfo, sizeof(devinfo));
230 printf("%s at %s", devinfo, pnp ? pnp : "?");
231 printf(" dev %d function %d (", pa->pa_device, pa->pa_function);
232 #ifdef __i386__
233 printf("tag %#lx, intrtag %#lx, intrswiz %#lx, intrpin %#lx",
234 *(long *)&pa->pa_tag, *(long *)&pa->pa_intrtag,
235 (long)pa->pa_intrswiz, (long)pa->pa_intrpin);
236 #else
237 printf("intrswiz %#lx, intrpin %#lx",
238 (long)pa->pa_intrswiz, (long)pa->pa_intrpin);
239 #endif
240 printf(", i/o %s, mem %s,",
241 pa->pa_flags & PCI_FLAGS_IO_ENABLED ? "on" : "off",
242 pa->pa_flags & PCI_FLAGS_MEM_ENABLED ? "on" : "off");
243 qd = pci_lookup_quirkdata(PCI_VENDOR(pa->pa_id),
244 PCI_PRODUCT(pa->pa_id));
245 if (qd == NULL) {
246 printf(" no quirks");
247 } else {
248 bitmask_snprintf(qd->quirks,
249 "\002\001multifn\002singlefn\003skipfunc0"
250 "\004skipfunc1\005skipfunc2\006skipfunc3"
251 "\007skipfunc4\010skipfunc5\011skipfunc6"
252 "\012skipfunc7",
253 devinfo, sizeof (devinfo));
254 printf(" quirks %s", devinfo);
255 }
256 printf(")");
257 }
258 return (UNCONF);
259 }
260
261 int
262 pcisubmatch(struct device *parent, struct cfdata *cf,
263 const locdesc_t *ldesc, void *aux)
264 {
265
266 if (cf->cf_loc[PCICF_DEV] != PCICF_DEV_DEFAULT &&
267 cf->cf_loc[PCICF_DEV] != ldesc->locs[PCICF_DEV])
268 return (0);
269 if (cf->cf_loc[PCICF_FUNCTION] != PCICF_FUNCTION_DEFAULT &&
270 cf->cf_loc[PCICF_FUNCTION] != ldesc->locs[PCICF_FUNCTION])
271 return (0);
272 return (config_match(parent, cf, aux));
273 }
274
275 int
276 pci_probe_device(struct pci_softc *sc, pcitag_t tag,
277 int (*match)(struct pci_attach_args *), struct pci_attach_args *pap)
278 {
279 pci_chipset_tag_t pc = sc->sc_pc;
280 struct pci_attach_args pa;
281 pcireg_t id, csr, class, intr, bhlcr;
282 int ret, pin, bus, device, function;
283 int help[3];
284 locdesc_t *ldp = (void *)&help; /* XXX XXX */
285 struct device *subdev;
286
287 pci_decompose_tag(pc, tag, &bus, &device, &function);
288
289 /* a driver already attached? */
290 if (sc->PCI_SC_DEVICESC(device, function) && !match)
291 return (0);
292
293 bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG);
294 if (PCI_HDRTYPE_TYPE(bhlcr) > 2)
295 return (0);
296
297 id = pci_conf_read(pc, tag, PCI_ID_REG);
298 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
299 class = pci_conf_read(pc, tag, PCI_CLASS_REG);
300
301 /* Invalid vendor ID value? */
302 if (PCI_VENDOR(id) == PCI_VENDOR_INVALID)
303 return (0);
304 /* XXX Not invalid, but we've done this ~forever. */
305 if (PCI_VENDOR(id) == 0)
306 return (0);
307
308 pa.pa_iot = sc->sc_iot;
309 pa.pa_memt = sc->sc_memt;
310 pa.pa_dmat = sc->sc_dmat;
311 pa.pa_dmat64 = sc->sc_dmat64;
312 pa.pa_pc = pc;
313 pa.pa_bus = bus;
314 pa.pa_device = device;
315 pa.pa_function = function;
316 pa.pa_tag = tag;
317 pa.pa_id = id;
318 pa.pa_class = class;
319
320 /*
321 * Set up memory, I/O enable, and PCI command flags
322 * as appropriate.
323 */
324 pa.pa_flags = sc->sc_flags;
325 if ((csr & PCI_COMMAND_IO_ENABLE) == 0)
326 pa.pa_flags &= ~PCI_FLAGS_IO_ENABLED;
327 if ((csr & PCI_COMMAND_MEM_ENABLE) == 0)
328 pa.pa_flags &= ~PCI_FLAGS_MEM_ENABLED;
329
330 /*
331 * If the cache line size is not configured, then
332 * clear the MRL/MRM/MWI command-ok flags.
333 */
334 if (PCI_CACHELINE(bhlcr) == 0)
335 pa.pa_flags &= ~(PCI_FLAGS_MRL_OKAY|
336 PCI_FLAGS_MRM_OKAY|PCI_FLAGS_MWI_OKAY);
337
338 if (sc->sc_bridgetag == NULL) {
339 pa.pa_intrswiz = 0;
340 pa.pa_intrtag = tag;
341 } else {
342 pa.pa_intrswiz = sc->sc_intrswiz + device;
343 pa.pa_intrtag = sc->sc_intrtag;
344 }
345
346 intr = pci_conf_read(pc, tag, PCI_INTERRUPT_REG);
347
348 pin = PCI_INTERRUPT_PIN(intr);
349 pa.pa_rawintrpin = pin;
350 if (pin == PCI_INTERRUPT_PIN_NONE) {
351 /* no interrupt */
352 pa.pa_intrpin = 0;
353 } else {
354 /*
355 * swizzle it based on the number of busses we're
356 * behind and our device number.
357 */
358 pa.pa_intrpin = /* XXX */
359 ((pin + pa.pa_intrswiz - 1) % 4) + 1;
360 }
361 pa.pa_intrline = PCI_INTERRUPT_LINE(intr);
362
363 if (match != NULL) {
364 ret = (*match)(&pa);
365 if (ret != 0 && pap != NULL)
366 *pap = pa;
367 } else {
368 ldp->len = 2;
369 ldp->locs[PCICF_DEV] = device;
370 ldp->locs[PCICF_FUNCTION] = function;
371
372 subdev = config_found_sm_loc(&sc->sc_dev, "pci", ldp, &pa,
373 pciprint, pcisubmatch);
374 sc->PCI_SC_DEVICESC(device, function) = subdev;
375 ret = (subdev != NULL);
376 }
377
378 return (ret);
379 }
380
381 void
382 pcidevdetached(struct device *sc, struct device *dev)
383 {
384 struct pci_softc *psc = (struct pci_softc *)sc;
385 int d, f;
386
387 KASSERT(dev->dv_locators);
388 d = dev->dv_locators[PCICF_DEV];
389 f = dev->dv_locators[PCICF_FUNCTION];
390
391 KASSERT(psc->PCI_SC_DEVICESC(d, f) == dev);
392
393 psc->PCI_SC_DEVICESC(d, f) = 0;
394 }
395
396 int
397 pci_get_capability(pc, tag, capid, offset, value)
398 pci_chipset_tag_t pc;
399 pcitag_t tag;
400 int capid;
401 int *offset;
402 pcireg_t *value;
403 {
404 pcireg_t reg;
405 unsigned int ofs;
406
407 reg = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
408 if (!(reg & PCI_STATUS_CAPLIST_SUPPORT))
409 return (0);
410
411 /* Determine the Capability List Pointer register to start with. */
412 reg = pci_conf_read(pc, tag, PCI_BHLC_REG);
413 switch (PCI_HDRTYPE_TYPE(reg)) {
414 case 0: /* standard device header */
415 ofs = PCI_CAPLISTPTR_REG;
416 break;
417 case 2: /* PCI-CardBus Bridge header */
418 ofs = PCI_CARDBUS_CAPLISTPTR_REG;
419 break;
420 default:
421 return (0);
422 }
423
424 ofs = PCI_CAPLIST_PTR(pci_conf_read(pc, tag, ofs));
425 while (ofs != 0) {
426 #ifdef DIAGNOSTIC
427 if ((ofs & 3) || (ofs < 0x40))
428 panic("pci_get_capability");
429 #endif
430 reg = pci_conf_read(pc, tag, ofs);
431 if (PCI_CAPLIST_CAP(reg) == capid) {
432 if (offset)
433 *offset = ofs;
434 if (value)
435 *value = reg;
436 return (1);
437 }
438 ofs = PCI_CAPLIST_NEXT(reg);
439 }
440
441 return (0);
442 }
443
444 int
445 pci_find_device(struct pci_attach_args *pa,
446 int (*match)(struct pci_attach_args *))
447 {
448 extern struct cfdriver pci_cd;
449 struct device *pcidev;
450 int i;
451 static const int wildcard[2] = {
452 PCICF_DEV_DEFAULT,
453 PCICF_FUNCTION_DEFAULT
454 };
455
456 for (i = 0; i < pci_cd.cd_ndevs; i++) {
457 pcidev = pci_cd.cd_devs[i];
458 if (pcidev != NULL &&
459 pci_enumerate_bus((struct pci_softc *)pcidev, wildcard,
460 match, pa) != 0)
461 return (1);
462 }
463 return (0);
464 }
465
466 #ifndef PCI_MACHDEP_ENUMERATE_BUS
467 /*
468 * Generic PCI bus enumeration routine. Used unless machine-dependent
469 * code needs to provide something else.
470 */
471 int
472 pci_enumerate_bus(struct pci_softc *sc, const int *locators,
473 int (*match)(struct pci_attach_args *), struct pci_attach_args *pap)
474 {
475 pci_chipset_tag_t pc = sc->sc_pc;
476 int device, function, nfunctions, ret;
477 const struct pci_quirkdata *qd;
478 pcireg_t id, bhlcr;
479 pcitag_t tag;
480 #ifdef __PCI_BUS_DEVORDER
481 char devs[32];
482 int i;
483 #endif
484
485 #ifdef __PCI_BUS_DEVORDER
486 pci_bus_devorder(sc->sc_pc, sc->sc_bus, devs);
487 for (i = 0; (device = devs[i]) < 32 && device >= 0; i++)
488 #else
489 for (device = 0; device < sc->sc_maxndevs; device++)
490 #endif
491 {
492 if ((locators[PCICF_DEV] != PCICF_DEV_DEFAULT) &&
493 (locators[PCICF_DEV] != device))
494 continue;
495
496 tag = pci_make_tag(pc, sc->sc_bus, device, 0);
497
498 bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG);
499 if (PCI_HDRTYPE_TYPE(bhlcr) > 2)
500 continue;
501
502 id = pci_conf_read(pc, tag, PCI_ID_REG);
503
504 /* Invalid vendor ID value? */
505 if (PCI_VENDOR(id) == PCI_VENDOR_INVALID)
506 continue;
507 /* XXX Not invalid, but we've done this ~forever. */
508 if (PCI_VENDOR(id) == 0)
509 continue;
510
511 qd = pci_lookup_quirkdata(PCI_VENDOR(id), PCI_PRODUCT(id));
512
513 if (qd != NULL &&
514 (qd->quirks & PCI_QUIRK_MULTIFUNCTION) != 0)
515 nfunctions = 8;
516 else if (qd != NULL &&
517 (qd->quirks & PCI_QUIRK_MONOFUNCTION) != 0)
518 nfunctions = 1;
519 else
520 nfunctions = PCI_HDRTYPE_MULTIFN(bhlcr) ? 8 : 1;
521
522 for (function = 0; function < nfunctions; function++) {
523 if ((locators[PCICF_FUNCTION] != PCICF_FUNCTION_DEFAULT)
524 && (locators[PCICF_FUNCTION] != function))
525 continue;
526
527 if (qd != NULL &&
528 (qd->quirks & PCI_QUIRK_SKIP_FUNC(function)) != 0)
529 continue;
530 tag = pci_make_tag(pc, sc->sc_bus, device, function);
531 ret = pci_probe_device(sc, tag, match, pap);
532 if (match != NULL && ret != 0)
533 return (ret);
534 }
535 }
536 return (0);
537 }
538 #endif /* PCI_MACHDEP_ENUMERATE_BUS */
539
540 /*
541 * Power Management Capability (Rev 2.2)
542 */
543
544 int
545 pci_powerstate(pci_chipset_tag_t pc, pcitag_t tag, const int *newstate,
546 int *oldstate)
547 {
548 int offset;
549 pcireg_t value, cap, now;
550
551 if (!pci_get_capability(pc, tag, PCI_CAP_PWRMGMT, &offset, &value))
552 return EOPNOTSUPP;
553
554 cap = value >> 16;
555 value = pci_conf_read(pc, tag, offset + PCI_PMCSR);
556 now = value & PCI_PMCSR_STATE_MASK;
557 value &= ~PCI_PMCSR_STATE_MASK;
558 if (oldstate) {
559 switch (now) {
560 case PCI_PMCSR_STATE_D0:
561 *oldstate = PCI_PWR_D0;
562 break;
563 case PCI_PMCSR_STATE_D1:
564 *oldstate = PCI_PWR_D1;
565 break;
566 case PCI_PMCSR_STATE_D2:
567 *oldstate = PCI_PWR_D2;
568 break;
569 case PCI_PMCSR_STATE_D3:
570 *oldstate = PCI_PWR_D3;
571 break;
572 default:
573 return EINVAL;
574 }
575 }
576 if (newstate == NULL)
577 return 0;
578 switch (*newstate) {
579 case PCI_PWR_D0:
580 if (now == PCI_PMCSR_STATE_D0)
581 return 0;
582 value |= PCI_PMCSR_STATE_D0;
583 break;
584 case PCI_PWR_D1:
585 if (now == PCI_PMCSR_STATE_D1)
586 return 0;
587 if (now == PCI_PMCSR_STATE_D2 || now == PCI_PMCSR_STATE_D3)
588 return EINVAL;
589 if (!(cap & PCI_PMCR_D1SUPP))
590 return EOPNOTSUPP;
591 value |= PCI_PMCSR_STATE_D1;
592 break;
593 case PCI_PWR_D2:
594 if (now == PCI_PMCSR_STATE_D2)
595 return 0;
596 if (now == PCI_PMCSR_STATE_D3)
597 return EINVAL;
598 if (!(cap & PCI_PMCR_D2SUPP))
599 return EOPNOTSUPP;
600 value |= PCI_PMCSR_STATE_D2;
601 break;
602 case PCI_PWR_D3:
603 if (now == PCI_PMCSR_STATE_D3)
604 return 0;
605 value |= PCI_PMCSR_STATE_D3;
606 break;
607 default:
608 return EINVAL;
609 }
610 pci_conf_write(pc, tag, offset + PCI_PMCSR, value);
611 DELAY(1000);
612
613 return 0;
614 }
615
616 /*
617 * Vital Product Data (PCI 2.2)
618 */
619
620 int
621 pci_vpd_read(pci_chipset_tag_t pc, pcitag_t tag, int offset, int count,
622 pcireg_t *data)
623 {
624 uint32_t reg;
625 int ofs, i, j;
626
627 KASSERT(data != NULL);
628 KASSERT((offset + count) < 0x7fff);
629
630 if (pci_get_capability(pc, tag, PCI_CAP_VPD, &ofs, ®) == 0)
631 return (1);
632
633 for (i = 0; i < count; offset += sizeof(*data), i++) {
634 reg &= 0x0000ffff;
635 reg &= ~PCI_VPD_OPFLAG;
636 reg |= PCI_VPD_ADDRESS(offset);
637 pci_conf_write(pc, tag, ofs, reg);
638
639 /*
640 * PCI 2.2 does not specify how long we should poll
641 * for completion nor whether the operation can fail.
642 */
643 j = 0;
644 do {
645 if (j++ == 20)
646 return (1);
647 delay(4);
648 reg = pci_conf_read(pc, tag, ofs);
649 } while ((reg & PCI_VPD_OPFLAG) == 0);
650 data[i] = pci_conf_read(pc, tag, PCI_VPD_DATAREG(ofs));
651 }
652
653 return (0);
654 }
655
656 int
657 pci_vpd_write(pci_chipset_tag_t pc, pcitag_t tag, int offset, int count,
658 pcireg_t *data)
659 {
660 pcireg_t reg;
661 int ofs, i, j;
662
663 KASSERT(data != NULL);
664 KASSERT((offset + count) < 0x7fff);
665
666 if (pci_get_capability(pc, tag, PCI_CAP_VPD, &ofs, ®) == 0)
667 return (1);
668
669 for (i = 0; i < count; offset += sizeof(*data), i++) {
670 pci_conf_write(pc, tag, PCI_VPD_DATAREG(ofs), data[i]);
671
672 reg &= 0x0000ffff;
673 reg |= PCI_VPD_OPFLAG;
674 reg |= PCI_VPD_ADDRESS(offset);
675 pci_conf_write(pc, tag, ofs, reg);
676
677 /*
678 * PCI 2.2 does not specify how long we should poll
679 * for completion nor whether the operation can fail.
680 */
681 j = 0;
682 do {
683 if (j++ == 20)
684 return (1);
685 delay(1);
686 reg = pci_conf_read(pc, tag, ofs);
687 } while (reg & PCI_VPD_OPFLAG);
688 }
689
690 return (0);
691 }
692
693 int
694 pci_dma64_available(struct pci_attach_args *pa)
695 {
696 #ifdef _PCI_HAVE_DMA64
697 if (BUS_DMA_TAG_VALID(pa->pa_dmat64) &&
698 ((uint64_t)physmem << PAGE_SHIFT) > 0xffffffffULL)
699 return 1;
700 #endif
701 return 0;
702 }
703
704 void
705 pci_conf_capture(pci_chipset_tag_t pc, pcitag_t tag,
706 struct pci_conf_state *pcs)
707 {
708 int off;
709
710 for (off = 0; off < 16; off++)
711 pcs->reg[off] = pci_conf_read(pc, tag, (off * 4));
712
713 return;
714 }
715
716 void
717 pci_conf_restore(pci_chipset_tag_t pc, pcitag_t tag,
718 struct pci_conf_state *pcs)
719 {
720 int off;
721
722 for (off = 0; off < 16; off++)
723 pci_conf_write(pc, tag, (off * 4), pcs->reg[off]);
724
725 return;
726 }
727