pci.c revision 1.95 1 /* $NetBSD: pci.c,v 1.95 2005/08/25 22:33:19 drochner Exp $ */
2
3 /*
4 * Copyright (c) 1995, 1996, 1997, 1998
5 * Christopher G. Demetriou. All rights reserved.
6 * Copyright (c) 1994 Charles M. Hannum. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Charles M. Hannum.
19 * 4. The name of the author may not be used to endorse or promote products
20 * derived from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 /*
35 * PCI bus autoconfiguration.
36 */
37
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: pci.c,v 1.95 2005/08/25 22:33:19 drochner Exp $");
40
41 #include "opt_pci.h"
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/device.h>
46
47 #include <dev/pci/pcireg.h>
48 #include <dev/pci/pcivar.h>
49 #include <dev/pci/pcidevs.h>
50
51 #include <uvm/uvm_extern.h>
52
53 #include "locators.h"
54
55 #ifdef PCI_CONFIG_DUMP
56 int pci_config_dump = 1;
57 #else
58 int pci_config_dump = 0;
59 #endif
60
61 int pciprint(void *, const char *);
62
63 #ifdef PCI_MACHDEP_ENUMERATE_BUS
64 #define pci_enumerate_bus PCI_MACHDEP_ENUMERATE_BUS
65 #else
66 int pci_enumerate_bus(struct pci_softc *, const int *,
67 int (*)(struct pci_attach_args *), struct pci_attach_args *);
68 #endif
69
70 /*
71 * Important note about PCI-ISA bridges:
72 *
73 * Callbacks are used to configure these devices so that ISA/EISA bridges
74 * can attach their child busses after PCI configuration is done.
75 *
76 * This works because:
77 * (1) there can be at most one ISA/EISA bridge per PCI bus, and
78 * (2) any ISA/EISA bridges must be attached to primary PCI
79 * busses (i.e. bus zero).
80 *
81 * That boils down to: there can only be one of these outstanding
82 * at a time, it is cleared when configuring PCI bus 0 before any
83 * subdevices have been found, and it is run after all subdevices
84 * of PCI bus 0 have been found.
85 *
86 * This is needed because there are some (legacy) PCI devices which
87 * can show up as ISA/EISA devices as well (the prime example of which
88 * are VGA controllers). If you attach ISA from a PCI-ISA/EISA bridge,
89 * and the bridge is seen before the video board is, the board can show
90 * up as an ISA device, and that can (bogusly) complicate the PCI device's
91 * attach code, or make the PCI device not be properly attached at all.
92 *
93 * We use the generic config_defer() facility to achieve this.
94 */
95
96 static int
97 pcirescan(struct device *sc, const char *ifattr, const int *locators)
98 {
99
100 KASSERT(ifattr && !strcmp(ifattr, "pci"));
101 KASSERT(locators);
102
103 pci_enumerate_bus((struct pci_softc *)sc, locators, NULL, NULL);
104 return (0);
105 }
106
107 static int
108 pcimatch(struct device *parent, struct cfdata *cf, void *aux)
109 {
110 struct pcibus_attach_args *pba = aux;
111
112 /* Check the locators */
113 if (cf->cf_loc[PCIBUSCF_BUS] != PCIBUSCF_BUS_DEFAULT &&
114 cf->cf_loc[PCIBUSCF_BUS] != pba->pba_bus)
115 return (0);
116
117 /* sanity */
118 if (pba->pba_bus < 0 || pba->pba_bus > 255)
119 return (0);
120
121 /*
122 * XXX check other (hardware?) indicators
123 */
124
125 return (1);
126 }
127
128 static void
129 pciattach(struct device *parent, struct device *self, void *aux)
130 {
131 struct pcibus_attach_args *pba = aux;
132 struct pci_softc *sc = (struct pci_softc *)self;
133 int io_enabled, mem_enabled, mrl_enabled, mrm_enabled, mwi_enabled;
134 const char *sep = "";
135 static const int wildcard[2] = { PCICF_DEV_DEFAULT,
136 PCICF_FUNCTION_DEFAULT };
137
138 pci_attach_hook(parent, self, pba);
139
140 aprint_naive("\n");
141 aprint_normal("\n");
142
143 io_enabled = (pba->pba_flags & PCI_FLAGS_IO_ENABLED);
144 mem_enabled = (pba->pba_flags & PCI_FLAGS_MEM_ENABLED);
145 mrl_enabled = (pba->pba_flags & PCI_FLAGS_MRL_OKAY);
146 mrm_enabled = (pba->pba_flags & PCI_FLAGS_MRM_OKAY);
147 mwi_enabled = (pba->pba_flags & PCI_FLAGS_MWI_OKAY);
148
149 if (io_enabled == 0 && mem_enabled == 0) {
150 aprint_error("%s: no spaces enabled!\n", self->dv_xname);
151 return;
152 }
153
154 #define PRINT(str) \
155 do { \
156 aprint_normal("%s%s", sep, str); \
157 sep = ", "; \
158 } while (/*CONSTCOND*/0)
159
160 aprint_normal("%s: ", self->dv_xname);
161
162 if (io_enabled)
163 PRINT("i/o space");
164 if (mem_enabled)
165 PRINT("memory space");
166 aprint_normal(" enabled");
167
168 if (mrl_enabled || mrm_enabled || mwi_enabled) {
169 if (mrl_enabled)
170 PRINT("rd/line");
171 if (mrm_enabled)
172 PRINT("rd/mult");
173 if (mwi_enabled)
174 PRINT("wr/inv");
175 aprint_normal(" ok");
176 }
177
178 aprint_normal("\n");
179
180 #undef PRINT
181
182 sc->sc_iot = pba->pba_iot;
183 sc->sc_memt = pba->pba_memt;
184 sc->sc_dmat = pba->pba_dmat;
185 sc->sc_dmat64 = pba->pba_dmat64;
186 sc->sc_pc = pba->pba_pc;
187 sc->sc_bus = pba->pba_bus;
188 sc->sc_bridgetag = pba->pba_bridgetag;
189 sc->sc_maxndevs = pci_bus_maxdevs(pba->pba_pc, pba->pba_bus);
190 sc->sc_intrswiz = pba->pba_intrswiz;
191 sc->sc_intrtag = pba->pba_intrtag;
192 sc->sc_flags = pba->pba_flags;
193 pcirescan(&sc->sc_dev, "pci", wildcard);
194 }
195
196 int
197 pciprint(void *aux, const char *pnp)
198 {
199 struct pci_attach_args *pa = aux;
200 char devinfo[256];
201 const struct pci_quirkdata *qd;
202
203 if (pnp) {
204 pci_devinfo(pa->pa_id, pa->pa_class, 1, devinfo, sizeof(devinfo));
205 aprint_normal("%s at %s", devinfo, pnp);
206 }
207 aprint_normal(" dev %d function %d", pa->pa_device, pa->pa_function);
208 if (pci_config_dump) {
209 printf(": ");
210 pci_conf_print(pa->pa_pc, pa->pa_tag, NULL);
211 if (!pnp)
212 pci_devinfo(pa->pa_id, pa->pa_class, 1, devinfo, sizeof(devinfo));
213 printf("%s at %s", devinfo, pnp ? pnp : "?");
214 printf(" dev %d function %d (", pa->pa_device, pa->pa_function);
215 #ifdef __i386__
216 printf("tag %#lx, intrtag %#lx, intrswiz %#lx, intrpin %#lx",
217 *(long *)&pa->pa_tag, *(long *)&pa->pa_intrtag,
218 (long)pa->pa_intrswiz, (long)pa->pa_intrpin);
219 #else
220 printf("intrswiz %#lx, intrpin %#lx",
221 (long)pa->pa_intrswiz, (long)pa->pa_intrpin);
222 #endif
223 printf(", i/o %s, mem %s,",
224 pa->pa_flags & PCI_FLAGS_IO_ENABLED ? "on" : "off",
225 pa->pa_flags & PCI_FLAGS_MEM_ENABLED ? "on" : "off");
226 qd = pci_lookup_quirkdata(PCI_VENDOR(pa->pa_id),
227 PCI_PRODUCT(pa->pa_id));
228 if (qd == NULL) {
229 printf(" no quirks");
230 } else {
231 bitmask_snprintf(qd->quirks,
232 "\002\001multifn\002singlefn\003skipfunc0"
233 "\004skipfunc1\005skipfunc2\006skipfunc3"
234 "\007skipfunc4\010skipfunc5\011skipfunc6"
235 "\012skipfunc7",
236 devinfo, sizeof (devinfo));
237 printf(" quirks %s", devinfo);
238 }
239 printf(")");
240 }
241 return (UNCONF);
242 }
243
244 int
245 pci_probe_device(struct pci_softc *sc, pcitag_t tag,
246 int (*match)(struct pci_attach_args *), struct pci_attach_args *pap)
247 {
248 pci_chipset_tag_t pc = sc->sc_pc;
249 struct pci_attach_args pa;
250 pcireg_t id, csr, class, intr, bhlcr;
251 int ret, pin, bus, device, function;
252 int locs[PCICF_NLOCS];
253 struct device *subdev;
254
255 pci_decompose_tag(pc, tag, &bus, &device, &function);
256
257 /* a driver already attached? */
258 if (sc->PCI_SC_DEVICESC(device, function) && !match)
259 return (0);
260
261 bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG);
262 if (PCI_HDRTYPE_TYPE(bhlcr) > 2)
263 return (0);
264
265 id = pci_conf_read(pc, tag, PCI_ID_REG);
266 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
267 class = pci_conf_read(pc, tag, PCI_CLASS_REG);
268
269 /* Invalid vendor ID value? */
270 if (PCI_VENDOR(id) == PCI_VENDOR_INVALID)
271 return (0);
272 /* XXX Not invalid, but we've done this ~forever. */
273 if (PCI_VENDOR(id) == 0)
274 return (0);
275
276 pa.pa_iot = sc->sc_iot;
277 pa.pa_memt = sc->sc_memt;
278 pa.pa_dmat = sc->sc_dmat;
279 pa.pa_dmat64 = sc->sc_dmat64;
280 pa.pa_pc = pc;
281 pa.pa_bus = bus;
282 pa.pa_device = device;
283 pa.pa_function = function;
284 pa.pa_tag = tag;
285 pa.pa_id = id;
286 pa.pa_class = class;
287
288 /*
289 * Set up memory, I/O enable, and PCI command flags
290 * as appropriate.
291 */
292 pa.pa_flags = sc->sc_flags;
293 if ((csr & PCI_COMMAND_IO_ENABLE) == 0)
294 pa.pa_flags &= ~PCI_FLAGS_IO_ENABLED;
295 if ((csr & PCI_COMMAND_MEM_ENABLE) == 0)
296 pa.pa_flags &= ~PCI_FLAGS_MEM_ENABLED;
297
298 /*
299 * If the cache line size is not configured, then
300 * clear the MRL/MRM/MWI command-ok flags.
301 */
302 if (PCI_CACHELINE(bhlcr) == 0)
303 pa.pa_flags &= ~(PCI_FLAGS_MRL_OKAY|
304 PCI_FLAGS_MRM_OKAY|PCI_FLAGS_MWI_OKAY);
305
306 if (sc->sc_bridgetag == NULL) {
307 pa.pa_intrswiz = 0;
308 pa.pa_intrtag = tag;
309 } else {
310 pa.pa_intrswiz = sc->sc_intrswiz + device;
311 pa.pa_intrtag = sc->sc_intrtag;
312 }
313
314 intr = pci_conf_read(pc, tag, PCI_INTERRUPT_REG);
315
316 pin = PCI_INTERRUPT_PIN(intr);
317 pa.pa_rawintrpin = pin;
318 if (pin == PCI_INTERRUPT_PIN_NONE) {
319 /* no interrupt */
320 pa.pa_intrpin = 0;
321 } else {
322 /*
323 * swizzle it based on the number of busses we're
324 * behind and our device number.
325 */
326 pa.pa_intrpin = /* XXX */
327 ((pin + pa.pa_intrswiz - 1) % 4) + 1;
328 }
329 pa.pa_intrline = PCI_INTERRUPT_LINE(intr);
330
331 if (match != NULL) {
332 ret = (*match)(&pa);
333 if (ret != 0 && pap != NULL)
334 *pap = pa;
335 } else {
336 locs[PCICF_DEV] = device;
337 locs[PCICF_FUNCTION] = function;
338
339 subdev = config_found_sm_loc(&sc->sc_dev, "pci", locs, &pa,
340 pciprint, config_stdsubmatch);
341 sc->PCI_SC_DEVICESC(device, function) = subdev;
342 ret = (subdev != NULL);
343 }
344
345 return (ret);
346 }
347
348 static void
349 pcidevdetached(struct device *sc, struct device *dev)
350 {
351 struct pci_softc *psc = (struct pci_softc *)sc;
352 int d, f;
353
354 KASSERT(dev->dv_locators);
355 d = dev->dv_locators[PCICF_DEV];
356 f = dev->dv_locators[PCICF_FUNCTION];
357
358 KASSERT(psc->PCI_SC_DEVICESC(d, f) == dev);
359
360 psc->PCI_SC_DEVICESC(d, f) = 0;
361 }
362
363 int
364 pci_get_capability(pci_chipset_tag_t pc, pcitag_t tag, int capid,
365 int *offset, pcireg_t *value)
366 {
367 pcireg_t reg;
368 unsigned int ofs;
369
370 reg = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
371 if (!(reg & PCI_STATUS_CAPLIST_SUPPORT))
372 return (0);
373
374 /* Determine the Capability List Pointer register to start with. */
375 reg = pci_conf_read(pc, tag, PCI_BHLC_REG);
376 switch (PCI_HDRTYPE_TYPE(reg)) {
377 case 0: /* standard device header */
378 ofs = PCI_CAPLISTPTR_REG;
379 break;
380 case 2: /* PCI-CardBus Bridge header */
381 ofs = PCI_CARDBUS_CAPLISTPTR_REG;
382 break;
383 default:
384 return (0);
385 }
386
387 ofs = PCI_CAPLIST_PTR(pci_conf_read(pc, tag, ofs));
388 while (ofs != 0) {
389 #ifdef DIAGNOSTIC
390 if ((ofs & 3) || (ofs < 0x40))
391 panic("pci_get_capability");
392 #endif
393 reg = pci_conf_read(pc, tag, ofs);
394 if (PCI_CAPLIST_CAP(reg) == capid) {
395 if (offset)
396 *offset = ofs;
397 if (value)
398 *value = reg;
399 return (1);
400 }
401 ofs = PCI_CAPLIST_NEXT(reg);
402 }
403
404 return (0);
405 }
406
407 int
408 pci_find_device(struct pci_attach_args *pa,
409 int (*match)(struct pci_attach_args *))
410 {
411 extern struct cfdriver pci_cd;
412 struct device *pcidev;
413 int i;
414 static const int wildcard[2] = {
415 PCICF_DEV_DEFAULT,
416 PCICF_FUNCTION_DEFAULT
417 };
418
419 for (i = 0; i < pci_cd.cd_ndevs; i++) {
420 pcidev = pci_cd.cd_devs[i];
421 if (pcidev != NULL &&
422 pci_enumerate_bus((struct pci_softc *)pcidev, wildcard,
423 match, pa) != 0)
424 return (1);
425 }
426 return (0);
427 }
428
429 #ifndef PCI_MACHDEP_ENUMERATE_BUS
430 /*
431 * Generic PCI bus enumeration routine. Used unless machine-dependent
432 * code needs to provide something else.
433 */
434 int
435 pci_enumerate_bus(struct pci_softc *sc, const int *locators,
436 int (*match)(struct pci_attach_args *), struct pci_attach_args *pap)
437 {
438 pci_chipset_tag_t pc = sc->sc_pc;
439 int device, function, nfunctions, ret;
440 const struct pci_quirkdata *qd;
441 pcireg_t id, bhlcr;
442 pcitag_t tag;
443 #ifdef __PCI_BUS_DEVORDER
444 char devs[32];
445 int i;
446 #endif
447
448 #ifdef __PCI_BUS_DEVORDER
449 pci_bus_devorder(sc->sc_pc, sc->sc_bus, devs);
450 for (i = 0; (device = devs[i]) < 32 && device >= 0; i++)
451 #else
452 for (device = 0; device < sc->sc_maxndevs; device++)
453 #endif
454 {
455 if ((locators[PCICF_DEV] != PCICF_DEV_DEFAULT) &&
456 (locators[PCICF_DEV] != device))
457 continue;
458
459 tag = pci_make_tag(pc, sc->sc_bus, device, 0);
460
461 bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG);
462 if (PCI_HDRTYPE_TYPE(bhlcr) > 2)
463 continue;
464
465 id = pci_conf_read(pc, tag, PCI_ID_REG);
466
467 /* Invalid vendor ID value? */
468 if (PCI_VENDOR(id) == PCI_VENDOR_INVALID)
469 continue;
470 /* XXX Not invalid, but we've done this ~forever. */
471 if (PCI_VENDOR(id) == 0)
472 continue;
473
474 qd = pci_lookup_quirkdata(PCI_VENDOR(id), PCI_PRODUCT(id));
475
476 if (qd != NULL &&
477 (qd->quirks & PCI_QUIRK_MULTIFUNCTION) != 0)
478 nfunctions = 8;
479 else if (qd != NULL &&
480 (qd->quirks & PCI_QUIRK_MONOFUNCTION) != 0)
481 nfunctions = 1;
482 else
483 nfunctions = PCI_HDRTYPE_MULTIFN(bhlcr) ? 8 : 1;
484
485 for (function = 0; function < nfunctions; function++) {
486 if ((locators[PCICF_FUNCTION] != PCICF_FUNCTION_DEFAULT)
487 && (locators[PCICF_FUNCTION] != function))
488 continue;
489
490 if (qd != NULL &&
491 (qd->quirks & PCI_QUIRK_SKIP_FUNC(function)) != 0)
492 continue;
493 tag = pci_make_tag(pc, sc->sc_bus, device, function);
494 ret = pci_probe_device(sc, tag, match, pap);
495 if (match != NULL && ret != 0)
496 return (ret);
497 }
498 }
499 return (0);
500 }
501 #endif /* PCI_MACHDEP_ENUMERATE_BUS */
502
503 /*
504 * Power Management Capability (Rev 2.2)
505 */
506
507 int
508 pci_powerstate(pci_chipset_tag_t pc, pcitag_t tag, const int *newstate,
509 int *oldstate)
510 {
511 int offset;
512 pcireg_t value, cap, now;
513
514 if (!pci_get_capability(pc, tag, PCI_CAP_PWRMGMT, &offset, &value))
515 return EOPNOTSUPP;
516
517 cap = value >> 16;
518 value = pci_conf_read(pc, tag, offset + PCI_PMCSR);
519 now = value & PCI_PMCSR_STATE_MASK;
520 value &= ~PCI_PMCSR_STATE_MASK;
521 if (oldstate) {
522 switch (now) {
523 case PCI_PMCSR_STATE_D0:
524 *oldstate = PCI_PWR_D0;
525 break;
526 case PCI_PMCSR_STATE_D1:
527 *oldstate = PCI_PWR_D1;
528 break;
529 case PCI_PMCSR_STATE_D2:
530 *oldstate = PCI_PWR_D2;
531 break;
532 case PCI_PMCSR_STATE_D3:
533 *oldstate = PCI_PWR_D3;
534 break;
535 default:
536 return EINVAL;
537 }
538 }
539 if (newstate == NULL)
540 return 0;
541 switch (*newstate) {
542 case PCI_PWR_D0:
543 if (now == PCI_PMCSR_STATE_D0)
544 return 0;
545 value |= PCI_PMCSR_STATE_D0;
546 break;
547 case PCI_PWR_D1:
548 if (now == PCI_PMCSR_STATE_D1)
549 return 0;
550 if (now == PCI_PMCSR_STATE_D2 || now == PCI_PMCSR_STATE_D3)
551 return EINVAL;
552 if (!(cap & PCI_PMCR_D1SUPP))
553 return EOPNOTSUPP;
554 value |= PCI_PMCSR_STATE_D1;
555 break;
556 case PCI_PWR_D2:
557 if (now == PCI_PMCSR_STATE_D2)
558 return 0;
559 if (now == PCI_PMCSR_STATE_D3)
560 return EINVAL;
561 if (!(cap & PCI_PMCR_D2SUPP))
562 return EOPNOTSUPP;
563 value |= PCI_PMCSR_STATE_D2;
564 break;
565 case PCI_PWR_D3:
566 if (now == PCI_PMCSR_STATE_D3)
567 return 0;
568 value |= PCI_PMCSR_STATE_D3;
569 break;
570 default:
571 return EINVAL;
572 }
573 pci_conf_write(pc, tag, offset + PCI_PMCSR, value);
574 DELAY(1000);
575
576 return 0;
577 }
578
579 /*
580 * Vital Product Data (PCI 2.2)
581 */
582
583 int
584 pci_vpd_read(pci_chipset_tag_t pc, pcitag_t tag, int offset, int count,
585 pcireg_t *data)
586 {
587 uint32_t reg;
588 int ofs, i, j;
589
590 KASSERT(data != NULL);
591 KASSERT((offset + count) < 0x7fff);
592
593 if (pci_get_capability(pc, tag, PCI_CAP_VPD, &ofs, ®) == 0)
594 return (1);
595
596 for (i = 0; i < count; offset += sizeof(*data), i++) {
597 reg &= 0x0000ffff;
598 reg &= ~PCI_VPD_OPFLAG;
599 reg |= PCI_VPD_ADDRESS(offset);
600 pci_conf_write(pc, tag, ofs, reg);
601
602 /*
603 * PCI 2.2 does not specify how long we should poll
604 * for completion nor whether the operation can fail.
605 */
606 j = 0;
607 do {
608 if (j++ == 20)
609 return (1);
610 delay(4);
611 reg = pci_conf_read(pc, tag, ofs);
612 } while ((reg & PCI_VPD_OPFLAG) == 0);
613 data[i] = pci_conf_read(pc, tag, PCI_VPD_DATAREG(ofs));
614 }
615
616 return (0);
617 }
618
619 int
620 pci_vpd_write(pci_chipset_tag_t pc, pcitag_t tag, int offset, int count,
621 pcireg_t *data)
622 {
623 pcireg_t reg;
624 int ofs, i, j;
625
626 KASSERT(data != NULL);
627 KASSERT((offset + count) < 0x7fff);
628
629 if (pci_get_capability(pc, tag, PCI_CAP_VPD, &ofs, ®) == 0)
630 return (1);
631
632 for (i = 0; i < count; offset += sizeof(*data), i++) {
633 pci_conf_write(pc, tag, PCI_VPD_DATAREG(ofs), data[i]);
634
635 reg &= 0x0000ffff;
636 reg |= PCI_VPD_OPFLAG;
637 reg |= PCI_VPD_ADDRESS(offset);
638 pci_conf_write(pc, tag, ofs, reg);
639
640 /*
641 * PCI 2.2 does not specify how long we should poll
642 * for completion nor whether the operation can fail.
643 */
644 j = 0;
645 do {
646 if (j++ == 20)
647 return (1);
648 delay(1);
649 reg = pci_conf_read(pc, tag, ofs);
650 } while (reg & PCI_VPD_OPFLAG);
651 }
652
653 return (0);
654 }
655
656 int
657 pci_dma64_available(struct pci_attach_args *pa)
658 {
659 #ifdef _PCI_HAVE_DMA64
660 if (BUS_DMA_TAG_VALID(pa->pa_dmat64) &&
661 ((uint64_t)physmem << PAGE_SHIFT) > 0xffffffffULL)
662 return 1;
663 #endif
664 return 0;
665 }
666
667 void
668 pci_conf_capture(pci_chipset_tag_t pc, pcitag_t tag,
669 struct pci_conf_state *pcs)
670 {
671 int off;
672
673 for (off = 0; off < 16; off++)
674 pcs->reg[off] = pci_conf_read(pc, tag, (off * 4));
675
676 return;
677 }
678
679 void
680 pci_conf_restore(pci_chipset_tag_t pc, pcitag_t tag,
681 struct pci_conf_state *pcs)
682 {
683 int off;
684
685 for (off = 0; off < 16; off++)
686 pci_conf_write(pc, tag, (off * 4), pcs->reg[off]);
687
688 return;
689 }
690
691 CFATTACH_DECL2(pci, sizeof(struct pci_softc),
692 pcimatch, pciattach, NULL, NULL, pcirescan, pcidevdetached);
693