pci.c revision 1.103.22.2 1 /* $NetBSD: pci.c,v 1.103.22.2 2007/08/05 19:33:02 jmcneill Exp $ */
2
3 /*
4 * Copyright (c) 1995, 1996, 1997, 1998
5 * Christopher G. Demetriou. All rights reserved.
6 * Copyright (c) 1994 Charles M. Hannum. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Charles M. Hannum.
19 * 4. The name of the author may not be used to endorse or promote products
20 * derived from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 /*
35 * PCI bus autoconfiguration.
36 */
37
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: pci.c,v 1.103.22.2 2007/08/05 19:33:02 jmcneill Exp $");
40
41 #include "opt_pci.h"
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/device.h>
46
47 #include <dev/pci/pcireg.h>
48 #include <dev/pci/pcivar.h>
49 #include <dev/pci/pcidevs.h>
50
51 #include <uvm/uvm_extern.h>
52
53 #include "locators.h"
54
55 #ifdef PCI_CONFIG_DUMP
56 int pci_config_dump = 1;
57 #else
58 int pci_config_dump = 0;
59 #endif
60
61 int pciprint(void *, const char *);
62
63 #ifdef PCI_MACHDEP_ENUMERATE_BUS
64 #define pci_enumerate_bus PCI_MACHDEP_ENUMERATE_BUS
65 #else
66 int pci_enumerate_bus(struct pci_softc *, const int *,
67 int (*)(struct pci_attach_args *), struct pci_attach_args *);
68 #endif
69
70 /*
71 * Important note about PCI-ISA bridges:
72 *
73 * Callbacks are used to configure these devices so that ISA/EISA bridges
74 * can attach their child busses after PCI configuration is done.
75 *
76 * This works because:
77 * (1) there can be at most one ISA/EISA bridge per PCI bus, and
78 * (2) any ISA/EISA bridges must be attached to primary PCI
79 * busses (i.e. bus zero).
80 *
81 * That boils down to: there can only be one of these outstanding
82 * at a time, it is cleared when configuring PCI bus 0 before any
83 * subdevices have been found, and it is run after all subdevices
84 * of PCI bus 0 have been found.
85 *
86 * This is needed because there are some (legacy) PCI devices which
87 * can show up as ISA/EISA devices as well (the prime example of which
88 * are VGA controllers). If you attach ISA from a PCI-ISA/EISA bridge,
89 * and the bridge is seen before the video board is, the board can show
90 * up as an ISA device, and that can (bogusly) complicate the PCI device's
91 * attach code, or make the PCI device not be properly attached at all.
92 *
93 * We use the generic config_defer() facility to achieve this.
94 */
95
96 static int
97 pcirescan(struct device *sc, const char *ifattr, const int *locators)
98 {
99
100 KASSERT(ifattr && !strcmp(ifattr, "pci"));
101 KASSERT(locators);
102
103 pci_enumerate_bus((struct pci_softc *)sc, locators, NULL, NULL);
104 return (0);
105 }
106
107 static int
108 pcimatch(struct device *parent, struct cfdata *cf, void *aux)
109 {
110 struct pcibus_attach_args *pba = aux;
111
112 /* Check the locators */
113 if (cf->cf_loc[PCIBUSCF_BUS] != PCIBUSCF_BUS_DEFAULT &&
114 cf->cf_loc[PCIBUSCF_BUS] != pba->pba_bus)
115 return (0);
116
117 /* sanity */
118 if (pba->pba_bus < 0 || pba->pba_bus > 255)
119 return (0);
120
121 /*
122 * XXX check other (hardware?) indicators
123 */
124
125 return (1);
126 }
127
128 static void
129 pci_power_devices(struct pci_softc *sc, pnp_state_t newstate)
130 {
131 pci_chipset_tag_t pc;
132 int device, function, nfunctions;
133 pcitag_t tag;
134 pcireg_t bhlcr, id;
135 pcireg_t state;
136 #ifdef __PCI_BUS_DEVORDER
137 char devs[32];
138 int i;
139 #endif
140
141 pc = sc->sc_pc;
142 switch (newstate) {
143 case PNP_STATE_D1:
144 state = PCI_PMCSR_STATE_D1;
145 break;
146 case PNP_STATE_D3:
147 state = PCI_PMCSR_STATE_D3;
148 break;
149 case PNP_STATE_D0:
150 state = PCI_PMCSR_STATE_D0;
151 break;
152 default:
153 /* we should never be called here */
154 #ifdef DIAGNOSTIC
155 panic("pci_power_devices called with invalid reason %d\n",
156 why);
157 /* NOTREACHED */
158 #endif
159 return;
160 }
161
162 #ifdef __PCI_BUS_DEVORDER
163 pci_bus_devorder(sc->sc_pc, sc->sc_bus, devs);
164 for (i = 0; (device = devs[i]) < 32 && device >= 0; i++)
165 #else
166 for (device = 0; device < sc->sc_maxndevs; device++)
167 #endif
168 {
169 tag = pci_make_tag(pc, sc->sc_bus, device, 0);
170 bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG);
171 if (PCI_HDRTYPE_TYPE(bhlcr) > 2)
172 continue;
173 id = pci_conf_read(pc, tag, PCI_ID_REG);
174 if (PCI_VENDOR(id) == PCI_VENDOR_INVALID ||
175 PCI_VENDOR(id) == 0x0000)
176 continue;
177 nfunctions = PCI_HDRTYPE_MULTIFN(bhlcr) ? 8 : 1;
178
179 for (function = 0; function < nfunctions; function++) {
180 tag = pci_make_tag(pc, sc->sc_bus, device, function);
181 if (sc->PCI_SC_DEVICESC(device, function) != NULL)
182 continue;
183 (void)pci_set_powerstate(pc, tag, state);
184 }
185 }
186
187 return;
188 }
189
190 static pnp_status_t
191 pci_power(device_t dv, pnp_request_t req, void *opaque)
192 {
193 struct pci_softc *sc;
194 pnp_capabilities_t *pcaps;
195 pnp_state_t *pstate;
196
197 sc = (struct pci_softc *)dv;
198
199 switch (req) {
200 case PNP_REQUEST_GET_CAPABILITIES:
201 pcaps = opaque;
202 pcaps->state |= PNP_STATE_D0 | PNP_STATE_D3;
203 break;
204
205 case PNP_REQUEST_GET_STATE:
206 pstate = opaque;
207 *pstate = PNP_STATE_D0; /* XXX */
208 break;
209
210 case PNP_REQUEST_SET_STATE:
211 pstate = opaque;
212
213 if (*pstate == PNP_STATE_D2)
214 return PNP_STATUS_UNSUPPORTED;
215
216 pci_power_devices(sc, req);
217 break;
218
219 case PNP_REQUEST_NOTIFY:
220 /* XXX TODO */
221 break;
222
223 default:
224 return PNP_STATUS_UNSUPPORTED;
225 }
226
227 return PNP_STATUS_SUCCESS;
228 }
229
230 static void
231 pciattach(struct device *parent, struct device *self, void *aux)
232 {
233 struct pcibus_attach_args *pba = aux;
234 struct pci_softc *sc = (struct pci_softc *)self;
235 int io_enabled, mem_enabled, mrl_enabled, mrm_enabled, mwi_enabled;
236 pnp_status_t status;
237 const char *sep = "";
238 static const int wildcard[PCICF_NLOCS] = {
239 PCICF_DEV_DEFAULT, PCICF_FUNCTION_DEFAULT
240 };
241
242 pci_attach_hook(parent, self, pba);
243
244 aprint_naive("\n");
245 aprint_normal("\n");
246
247 io_enabled = (pba->pba_flags & PCI_FLAGS_IO_ENABLED);
248 mem_enabled = (pba->pba_flags & PCI_FLAGS_MEM_ENABLED);
249 mrl_enabled = (pba->pba_flags & PCI_FLAGS_MRL_OKAY);
250 mrm_enabled = (pba->pba_flags & PCI_FLAGS_MRM_OKAY);
251 mwi_enabled = (pba->pba_flags & PCI_FLAGS_MWI_OKAY);
252
253 if (io_enabled == 0 && mem_enabled == 0) {
254 aprint_error("%s: no spaces enabled!\n", self->dv_xname);
255 return;
256 }
257
258 #define PRINT(str) \
259 do { \
260 aprint_normal("%s%s", sep, str); \
261 sep = ", "; \
262 } while (/*CONSTCOND*/0)
263
264 aprint_normal("%s: ", self->dv_xname);
265
266 if (io_enabled)
267 PRINT("i/o space");
268 if (mem_enabled)
269 PRINT("memory space");
270 aprint_normal(" enabled");
271
272 if (mrl_enabled || mrm_enabled || mwi_enabled) {
273 if (mrl_enabled)
274 PRINT("rd/line");
275 if (mrm_enabled)
276 PRINT("rd/mult");
277 if (mwi_enabled)
278 PRINT("wr/inv");
279 aprint_normal(" ok");
280 }
281
282 aprint_normal("\n");
283
284 #undef PRINT
285
286 sc->sc_iot = pba->pba_iot;
287 sc->sc_memt = pba->pba_memt;
288 sc->sc_dmat = pba->pba_dmat;
289 sc->sc_dmat64 = pba->pba_dmat64;
290 sc->sc_pc = pba->pba_pc;
291 sc->sc_bus = pba->pba_bus;
292 sc->sc_bridgetag = pba->pba_bridgetag;
293 sc->sc_maxndevs = pci_bus_maxdevs(pba->pba_pc, pba->pba_bus);
294 sc->sc_intrswiz = pba->pba_intrswiz;
295 sc->sc_intrtag = pba->pba_intrtag;
296 sc->sc_flags = pba->pba_flags;
297
298 status = pnp_register(self, pci_power);
299 if (status != PNP_STATUS_SUCCESS)
300 aprint_error("%s: couldn't establish power handler\n",
301 device_xname(self));
302
303 pcirescan(&sc->sc_dev, "pci", wildcard);
304 }
305
306 int
307 pciprint(void *aux, const char *pnp)
308 {
309 struct pci_attach_args *pa = aux;
310 char devinfo[256];
311 const struct pci_quirkdata *qd;
312
313 if (pnp) {
314 pci_devinfo(pa->pa_id, pa->pa_class, 1, devinfo, sizeof(devinfo));
315 aprint_normal("%s at %s", devinfo, pnp);
316 }
317 aprint_normal(" dev %d function %d", pa->pa_device, pa->pa_function);
318 if (pci_config_dump) {
319 printf(": ");
320 pci_conf_print(pa->pa_pc, pa->pa_tag, NULL);
321 if (!pnp)
322 pci_devinfo(pa->pa_id, pa->pa_class, 1, devinfo, sizeof(devinfo));
323 printf("%s at %s", devinfo, pnp ? pnp : "?");
324 printf(" dev %d function %d (", pa->pa_device, pa->pa_function);
325 #ifdef __i386__
326 printf("tag %#lx, intrtag %#lx, intrswiz %#lx, intrpin %#lx",
327 *(long *)&pa->pa_tag, *(long *)&pa->pa_intrtag,
328 (long)pa->pa_intrswiz, (long)pa->pa_intrpin);
329 #else
330 printf("intrswiz %#lx, intrpin %#lx",
331 (long)pa->pa_intrswiz, (long)pa->pa_intrpin);
332 #endif
333 printf(", i/o %s, mem %s,",
334 pa->pa_flags & PCI_FLAGS_IO_ENABLED ? "on" : "off",
335 pa->pa_flags & PCI_FLAGS_MEM_ENABLED ? "on" : "off");
336 qd = pci_lookup_quirkdata(PCI_VENDOR(pa->pa_id),
337 PCI_PRODUCT(pa->pa_id));
338 if (qd == NULL) {
339 printf(" no quirks");
340 } else {
341 bitmask_snprintf(qd->quirks,
342 "\002\001multifn\002singlefn\003skipfunc0"
343 "\004skipfunc1\005skipfunc2\006skipfunc3"
344 "\007skipfunc4\010skipfunc5\011skipfunc6"
345 "\012skipfunc7",
346 devinfo, sizeof (devinfo));
347 printf(" quirks %s", devinfo);
348 }
349 printf(")");
350 }
351 return (UNCONF);
352 }
353
354 int
355 pci_probe_device(struct pci_softc *sc, pcitag_t tag,
356 int (*match)(struct pci_attach_args *), struct pci_attach_args *pap)
357 {
358 pci_chipset_tag_t pc = sc->sc_pc;
359 struct pci_attach_args pa;
360 pcireg_t id, csr, class, intr, bhlcr;
361 int ret, pin, bus, device, function;
362 int locs[PCICF_NLOCS];
363 struct device *subdev;
364
365 pci_decompose_tag(pc, tag, &bus, &device, &function);
366
367 /* a driver already attached? */
368 if (sc->PCI_SC_DEVICESC(device, function) && !match)
369 return (0);
370
371 bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG);
372 if (PCI_HDRTYPE_TYPE(bhlcr) > 2)
373 return (0);
374
375 id = pci_conf_read(pc, tag, PCI_ID_REG);
376 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
377 class = pci_conf_read(pc, tag, PCI_CLASS_REG);
378
379 /* Invalid vendor ID value? */
380 if (PCI_VENDOR(id) == PCI_VENDOR_INVALID)
381 return (0);
382 /* XXX Not invalid, but we've done this ~forever. */
383 if (PCI_VENDOR(id) == 0)
384 return (0);
385
386 pa.pa_iot = sc->sc_iot;
387 pa.pa_memt = sc->sc_memt;
388 pa.pa_dmat = sc->sc_dmat;
389 pa.pa_dmat64 = sc->sc_dmat64;
390 pa.pa_pc = pc;
391 pa.pa_bus = bus;
392 pa.pa_device = device;
393 pa.pa_function = function;
394 pa.pa_tag = tag;
395 pa.pa_id = id;
396 pa.pa_class = class;
397
398 /*
399 * Set up memory, I/O enable, and PCI command flags
400 * as appropriate.
401 */
402 pa.pa_flags = sc->sc_flags;
403 if ((csr & PCI_COMMAND_IO_ENABLE) == 0)
404 pa.pa_flags &= ~PCI_FLAGS_IO_ENABLED;
405 if ((csr & PCI_COMMAND_MEM_ENABLE) == 0)
406 pa.pa_flags &= ~PCI_FLAGS_MEM_ENABLED;
407
408 /*
409 * If the cache line size is not configured, then
410 * clear the MRL/MRM/MWI command-ok flags.
411 */
412 if (PCI_CACHELINE(bhlcr) == 0)
413 pa.pa_flags &= ~(PCI_FLAGS_MRL_OKAY|
414 PCI_FLAGS_MRM_OKAY|PCI_FLAGS_MWI_OKAY);
415
416 if (sc->sc_bridgetag == NULL) {
417 pa.pa_intrswiz = 0;
418 pa.pa_intrtag = tag;
419 } else {
420 pa.pa_intrswiz = sc->sc_intrswiz + device;
421 pa.pa_intrtag = sc->sc_intrtag;
422 }
423
424 intr = pci_conf_read(pc, tag, PCI_INTERRUPT_REG);
425
426 pin = PCI_INTERRUPT_PIN(intr);
427 pa.pa_rawintrpin = pin;
428 if (pin == PCI_INTERRUPT_PIN_NONE) {
429 /* no interrupt */
430 pa.pa_intrpin = 0;
431 } else {
432 /*
433 * swizzle it based on the number of busses we're
434 * behind and our device number.
435 */
436 pa.pa_intrpin = /* XXX */
437 ((pin + pa.pa_intrswiz - 1) % 4) + 1;
438 }
439 pa.pa_intrline = PCI_INTERRUPT_LINE(intr);
440
441 if (match != NULL) {
442 ret = (*match)(&pa);
443 if (ret != 0 && pap != NULL)
444 *pap = pa;
445 } else {
446 locs[PCICF_DEV] = device;
447 locs[PCICF_FUNCTION] = function;
448
449 subdev = config_found_sm_loc(&sc->sc_dev, "pci", locs, &pa,
450 pciprint, config_stdsubmatch);
451 sc->PCI_SC_DEVICESC(device, function) = subdev;
452 ret = (subdev != NULL);
453 }
454
455 return (ret);
456 }
457
458 static void
459 pcidevdetached(struct device *sc, struct device *dev)
460 {
461 struct pci_softc *psc = (struct pci_softc *)sc;
462 int d, f;
463
464 d = device_locator(dev, PCICF_DEV);
465 f = device_locator(dev, PCICF_FUNCTION);
466
467 KASSERT(psc->PCI_SC_DEVICESC(d, f) == dev);
468
469 psc->PCI_SC_DEVICESC(d, f) = 0;
470 }
471
472 int
473 pci_get_capability(pci_chipset_tag_t pc, pcitag_t tag, int capid,
474 int *offset, pcireg_t *value)
475 {
476 pcireg_t reg;
477 unsigned int ofs;
478
479 reg = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
480 if (!(reg & PCI_STATUS_CAPLIST_SUPPORT))
481 return (0);
482
483 /* Determine the Capability List Pointer register to start with. */
484 reg = pci_conf_read(pc, tag, PCI_BHLC_REG);
485 switch (PCI_HDRTYPE_TYPE(reg)) {
486 case 0: /* standard device header */
487 ofs = PCI_CAPLISTPTR_REG;
488 break;
489 case 2: /* PCI-CardBus Bridge header */
490 ofs = PCI_CARDBUS_CAPLISTPTR_REG;
491 break;
492 default:
493 return (0);
494 }
495
496 ofs = PCI_CAPLIST_PTR(pci_conf_read(pc, tag, ofs));
497 while (ofs != 0) {
498 #ifdef DIAGNOSTIC
499 if ((ofs & 3) || (ofs < 0x40))
500 panic("pci_get_capability");
501 #endif
502 reg = pci_conf_read(pc, tag, ofs);
503 if (PCI_CAPLIST_CAP(reg) == capid) {
504 if (offset)
505 *offset = ofs;
506 if (value)
507 *value = reg;
508 return (1);
509 }
510 ofs = PCI_CAPLIST_NEXT(reg);
511 }
512
513 return (0);
514 }
515
516 int
517 pci_find_device(struct pci_attach_args *pa,
518 int (*match)(struct pci_attach_args *))
519 {
520 extern struct cfdriver pci_cd;
521 struct device *pcidev;
522 int i;
523 static const int wildcard[2] = {
524 PCICF_DEV_DEFAULT,
525 PCICF_FUNCTION_DEFAULT
526 };
527
528 for (i = 0; i < pci_cd.cd_ndevs; i++) {
529 pcidev = pci_cd.cd_devs[i];
530 if (pcidev != NULL &&
531 pci_enumerate_bus((struct pci_softc *)pcidev, wildcard,
532 match, pa) != 0)
533 return (1);
534 }
535 return (0);
536 }
537
538 #ifndef PCI_MACHDEP_ENUMERATE_BUS
539 /*
540 * Generic PCI bus enumeration routine. Used unless machine-dependent
541 * code needs to provide something else.
542 */
543 int
544 pci_enumerate_bus(struct pci_softc *sc, const int *locators,
545 int (*match)(struct pci_attach_args *), struct pci_attach_args *pap)
546 {
547 pci_chipset_tag_t pc = sc->sc_pc;
548 int device, function, nfunctions, ret;
549 const struct pci_quirkdata *qd;
550 pcireg_t id, bhlcr;
551 pcitag_t tag;
552 #ifdef __PCI_BUS_DEVORDER
553 char devs[32];
554 int i;
555 #endif
556
557 #ifdef __PCI_BUS_DEVORDER
558 pci_bus_devorder(sc->sc_pc, sc->sc_bus, devs);
559 for (i = 0; (device = devs[i]) < 32 && device >= 0; i++)
560 #else
561 for (device = 0; device < sc->sc_maxndevs; device++)
562 #endif
563 {
564 if ((locators[PCICF_DEV] != PCICF_DEV_DEFAULT) &&
565 (locators[PCICF_DEV] != device))
566 continue;
567
568 tag = pci_make_tag(pc, sc->sc_bus, device, 0);
569
570 bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG);
571 if (PCI_HDRTYPE_TYPE(bhlcr) > 2)
572 continue;
573
574 id = pci_conf_read(pc, tag, PCI_ID_REG);
575
576 /* Invalid vendor ID value? */
577 if (PCI_VENDOR(id) == PCI_VENDOR_INVALID)
578 continue;
579 /* XXX Not invalid, but we've done this ~forever. */
580 if (PCI_VENDOR(id) == 0)
581 continue;
582
583 qd = pci_lookup_quirkdata(PCI_VENDOR(id), PCI_PRODUCT(id));
584
585 if (qd != NULL &&
586 (qd->quirks & PCI_QUIRK_MULTIFUNCTION) != 0)
587 nfunctions = 8;
588 else if (qd != NULL &&
589 (qd->quirks & PCI_QUIRK_MONOFUNCTION) != 0)
590 nfunctions = 1;
591 else
592 nfunctions = PCI_HDRTYPE_MULTIFN(bhlcr) ? 8 : 1;
593
594 for (function = 0; function < nfunctions; function++) {
595 if ((locators[PCICF_FUNCTION] != PCICF_FUNCTION_DEFAULT)
596 && (locators[PCICF_FUNCTION] != function))
597 continue;
598
599 if (qd != NULL &&
600 (qd->quirks & PCI_QUIRK_SKIP_FUNC(function)) != 0)
601 continue;
602 tag = pci_make_tag(pc, sc->sc_bus, device, function);
603 ret = pci_probe_device(sc, tag, match, pap);
604 if (match != NULL && ret != 0)
605 return (ret);
606 }
607 }
608 return (0);
609 }
610 #endif /* PCI_MACHDEP_ENUMERATE_BUS */
611
612
613 /*
614 * Vital Product Data (PCI 2.2)
615 */
616
617 int
618 pci_vpd_read(pci_chipset_tag_t pc, pcitag_t tag, int offset, int count,
619 pcireg_t *data)
620 {
621 uint32_t reg;
622 int ofs, i, j;
623
624 KASSERT(data != NULL);
625 KASSERT((offset + count) < 0x7fff);
626
627 if (pci_get_capability(pc, tag, PCI_CAP_VPD, &ofs, ®) == 0)
628 return (1);
629
630 for (i = 0; i < count; offset += sizeof(*data), i++) {
631 reg &= 0x0000ffff;
632 reg &= ~PCI_VPD_OPFLAG;
633 reg |= PCI_VPD_ADDRESS(offset);
634 pci_conf_write(pc, tag, ofs, reg);
635
636 /*
637 * PCI 2.2 does not specify how long we should poll
638 * for completion nor whether the operation can fail.
639 */
640 j = 0;
641 do {
642 if (j++ == 20)
643 return (1);
644 delay(4);
645 reg = pci_conf_read(pc, tag, ofs);
646 } while ((reg & PCI_VPD_OPFLAG) == 0);
647 data[i] = pci_conf_read(pc, tag, PCI_VPD_DATAREG(ofs));
648 }
649
650 return (0);
651 }
652
653 int
654 pci_vpd_write(pci_chipset_tag_t pc, pcitag_t tag, int offset, int count,
655 pcireg_t *data)
656 {
657 pcireg_t reg;
658 int ofs, i, j;
659
660 KASSERT(data != NULL);
661 KASSERT((offset + count) < 0x7fff);
662
663 if (pci_get_capability(pc, tag, PCI_CAP_VPD, &ofs, ®) == 0)
664 return (1);
665
666 for (i = 0; i < count; offset += sizeof(*data), i++) {
667 pci_conf_write(pc, tag, PCI_VPD_DATAREG(ofs), data[i]);
668
669 reg &= 0x0000ffff;
670 reg |= PCI_VPD_OPFLAG;
671 reg |= PCI_VPD_ADDRESS(offset);
672 pci_conf_write(pc, tag, ofs, reg);
673
674 /*
675 * PCI 2.2 does not specify how long we should poll
676 * for completion nor whether the operation can fail.
677 */
678 j = 0;
679 do {
680 if (j++ == 20)
681 return (1);
682 delay(1);
683 reg = pci_conf_read(pc, tag, ofs);
684 } while (reg & PCI_VPD_OPFLAG);
685 }
686
687 return (0);
688 }
689
690 int
691 pci_dma64_available(struct pci_attach_args *pa)
692 {
693 #ifdef _PCI_HAVE_DMA64
694 if (BUS_DMA_TAG_VALID(pa->pa_dmat64) &&
695 ((uint64_t)physmem << PAGE_SHIFT) > 0xffffffffULL)
696 return 1;
697 #endif
698 return 0;
699 }
700
701 void
702 pci_conf_capture(pci_chipset_tag_t pc, pcitag_t tag,
703 struct pci_conf_state *pcs)
704 {
705 int off;
706
707 for (off = 0; off < 16; off++)
708 pcs->reg[off] = pci_conf_read(pc, tag, (off * 4));
709
710 return;
711 }
712
713 void
714 pci_conf_restore(pci_chipset_tag_t pc, pcitag_t tag,
715 struct pci_conf_state *pcs)
716 {
717 int off;
718 pcireg_t val;
719
720 for (off = 15; off >= 0; off--) {
721 val = pci_conf_read(pc, tag, (off * 4));
722 if (val != pcs->reg[off])
723 pci_conf_write(pc, tag, (off * 4), pcs->reg[off]);
724 }
725
726 return;
727 }
728
729 /*
730 * Power Management Capability (Rev 2.2)
731 */
732 int
733 pci_get_powerstate(pci_chipset_tag_t pc, pcitag_t tag , pcireg_t *state)
734 {
735 int offset;
736 pcireg_t value, cap, now;
737
738 if (!pci_get_capability(pc, tag, PCI_CAP_PWRMGMT, &offset, &value))
739 return EOPNOTSUPP;
740
741 cap = value >> PCI_PMCR_SHIFT;
742 value = pci_conf_read(pc, tag, offset + PCI_PMCSR);
743 now = value & PCI_PMCSR_STATE_MASK;
744 switch (now) {
745 case PCI_PMCSR_STATE_D0:
746 case PCI_PMCSR_STATE_D1:
747 case PCI_PMCSR_STATE_D2:
748 case PCI_PMCSR_STATE_D3:
749 *state = now;
750 return 0;
751 default:
752 return EINVAL;
753 }
754 }
755
756 int
757 pci_set_powerstate(pci_chipset_tag_t pc, pcitag_t tag, pcireg_t state)
758 {
759 int offset;
760 pcireg_t value, cap, now;
761
762 if (!pci_get_capability(pc, tag, PCI_CAP_PWRMGMT, &offset, &value))
763 return EOPNOTSUPP;
764
765 cap = value >> PCI_PMCR_SHIFT;
766 value = pci_conf_read(pc, tag, offset + PCI_PMCSR);
767 now = value & PCI_PMCSR_STATE_MASK;
768 value &= ~PCI_PMCSR_STATE_MASK;
769
770 if (now == state)
771 return 0;
772 switch (state) {
773 case PCI_PMCSR_STATE_D0:
774 value |= PCI_PMCSR_STATE_D0;
775 break;
776 case PCI_PMCSR_STATE_D1:
777 if (now == PCI_PMCSR_STATE_D2 || now == PCI_PMCSR_STATE_D3)
778 return EINVAL;
779 if (!(cap & PCI_PMCR_D1SUPP))
780 return EOPNOTSUPP;
781 value |= PCI_PMCSR_STATE_D1;
782 break;
783 case PCI_PMCSR_STATE_D2:
784 if (now == PCI_PMCSR_STATE_D3)
785 return EINVAL;
786 if (!(cap & PCI_PMCR_D2SUPP))
787 return EOPNOTSUPP;
788 value |= PCI_PMCSR_STATE_D2;
789 break;
790 case PCI_PMCSR_STATE_D3:
791 if (now == PCI_PMCSR_STATE_D3)
792 return 0;
793 value |= PCI_PMCSR_STATE_D3;
794 break;
795 default:
796 return EINVAL;
797 }
798 pci_conf_write(pc, tag, offset + PCI_PMCSR, value);
799 DELAY(1000);
800 return 0;
801 }
802
803 pnp_state_t
804 pci_pnp_powerstate(pcireg_t reg)
805 {
806 pnp_state_t state;
807
808 switch (reg) {
809 case PCI_PMCSR_STATE_D0:
810 state = PNP_STATE_D0;
811 break;
812 case PCI_PMCSR_STATE_D1:
813 state = PNP_STATE_D1;
814 break;
815 case PCI_PMCSR_STATE_D2:
816 state = PNP_STATE_D2;
817 break;
818 case PCI_PMCSR_STATE_D3:
819 state = PNP_STATE_D3;
820 break;
821 default:
822 state = PNP_STATE_UNKNOWN;
823 break;
824 }
825
826 return state;
827 }
828
829 pnp_state_t
830 pci_pnp_capabilities(pcireg_t reg)
831 {
832 pnp_state_t state;
833 pcireg_t cap;
834
835 cap = reg >> PCI_PMCR_SHIFT;
836
837 state = PNP_STATE_D0 | PNP_STATE_D3;
838 if (cap & PCI_PMCR_D1SUPP)
839 state |= PNP_STATE_D1;
840 if (cap & PCI_PMCR_D2SUPP)
841 state |= PNP_STATE_D2;
842
843 return state;
844 }
845
846 int
847 pci_activate(pci_chipset_tag_t pc, pcitag_t tag, void *sc,
848 int (*wakefun)(pci_chipset_tag_t, pcitag_t, void *, pcireg_t))
849 {
850 struct device *dv = sc;
851 pcireg_t pmode;
852 int error;
853
854 if ((error = pci_get_powerstate(pc, tag, &pmode)))
855 return error;
856
857 switch (pmode) {
858 case PCI_PMCSR_STATE_D0:
859 break;
860 case PCI_PMCSR_STATE_D3:
861 if (wakefun == NULL) {
862 /*
863 * The card has lost all configuration data in
864 * this state, so punt.
865 */
866 aprint_error(
867 "%s: unable to wake up from power state D3\n",
868 dv->dv_xname);
869 return EOPNOTSUPP;
870 }
871 /*FALLTHROUGH*/
872 default:
873 if (wakefun) {
874 error = (*wakefun)(pc, tag, sc, pmode);
875 if (error)
876 return error;
877 }
878 aprint_normal("%s: waking up from power state D%d\n",
879 dv->dv_xname, pmode);
880 if ((error = pci_set_powerstate(pc, tag, PCI_PMCSR_STATE_D0)))
881 return error;
882 }
883 return 0;
884 }
885
886 int
887 pci_activate_null(pci_chipset_tag_t pc, pcitag_t tag,
888 void *sc, pcireg_t state)
889 {
890 return 0;
891 }
892
893 CFATTACH_DECL2(pci, sizeof(struct pci_softc),
894 pcimatch, pciattach, NULL, NULL, pcirescan, pcidevdetached);
895