pci.c revision 1.103.22.4 1 /* $NetBSD: pci.c,v 1.103.22.4 2007/08/21 06:33:51 joerg Exp $ */
2
3 /*
4 * Copyright (c) 1995, 1996, 1997, 1998
5 * Christopher G. Demetriou. All rights reserved.
6 * Copyright (c) 1994 Charles M. Hannum. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Charles M. Hannum.
19 * 4. The name of the author may not be used to endorse or promote products
20 * derived from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 /*
35 * PCI bus autoconfiguration.
36 */
37
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: pci.c,v 1.103.22.4 2007/08/21 06:33:51 joerg Exp $");
40
41 #include "opt_pci.h"
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/device.h>
46
47 #include <dev/pci/pcireg.h>
48 #include <dev/pci/pcivar.h>
49 #include <dev/pci/pcidevs.h>
50
51 #include <uvm/uvm_extern.h>
52
53 #include "locators.h"
54
55 #ifdef PCI_CONFIG_DUMP
56 int pci_config_dump = 1;
57 #else
58 int pci_config_dump = 0;
59 #endif
60
61 int pciprint(void *, const char *);
62
63 #ifdef PCI_MACHDEP_ENUMERATE_BUS
64 #define pci_enumerate_bus PCI_MACHDEP_ENUMERATE_BUS
65 #else
66 int pci_enumerate_bus(struct pci_softc *, const int *,
67 int (*)(struct pci_attach_args *), struct pci_attach_args *);
68 #endif
69
70 /*
71 * Important note about PCI-ISA bridges:
72 *
73 * Callbacks are used to configure these devices so that ISA/EISA bridges
74 * can attach their child busses after PCI configuration is done.
75 *
76 * This works because:
77 * (1) there can be at most one ISA/EISA bridge per PCI bus, and
78 * (2) any ISA/EISA bridges must be attached to primary PCI
79 * busses (i.e. bus zero).
80 *
81 * That boils down to: there can only be one of these outstanding
82 * at a time, it is cleared when configuring PCI bus 0 before any
83 * subdevices have been found, and it is run after all subdevices
84 * of PCI bus 0 have been found.
85 *
86 * This is needed because there are some (legacy) PCI devices which
87 * can show up as ISA/EISA devices as well (the prime example of which
88 * are VGA controllers). If you attach ISA from a PCI-ISA/EISA bridge,
89 * and the bridge is seen before the video board is, the board can show
90 * up as an ISA device, and that can (bogusly) complicate the PCI device's
91 * attach code, or make the PCI device not be properly attached at all.
92 *
93 * We use the generic config_defer() facility to achieve this.
94 */
95
96 static int
97 pcirescan(struct device *sc, const char *ifattr, const int *locators)
98 {
99
100 KASSERT(ifattr && !strcmp(ifattr, "pci"));
101 KASSERT(locators);
102
103 pci_enumerate_bus((struct pci_softc *)sc, locators, NULL, NULL);
104 return (0);
105 }
106
107 static int
108 pcimatch(struct device *parent, struct cfdata *cf, void *aux)
109 {
110 struct pcibus_attach_args *pba = aux;
111
112 /* Check the locators */
113 if (cf->cf_loc[PCIBUSCF_BUS] != PCIBUSCF_BUS_DEFAULT &&
114 cf->cf_loc[PCIBUSCF_BUS] != pba->pba_bus)
115 return (0);
116
117 /* sanity */
118 if (pba->pba_bus < 0 || pba->pba_bus > 255)
119 return (0);
120
121 /*
122 * XXX check other (hardware?) indicators
123 */
124
125 return (1);
126 }
127
128 static void
129 pci_power_devices(struct pci_softc *sc, pnp_state_t newstate)
130 {
131 pci_chipset_tag_t pc;
132 int device, function, nfunctions;
133 pcitag_t tag;
134 pcireg_t bhlcr, id;
135 pcireg_t state;
136 #ifdef __PCI_BUS_DEVORDER
137 char devs[32];
138 int i;
139 #endif
140
141 pc = sc->sc_pc;
142 switch (newstate) {
143 case PNP_STATE_D1:
144 state = PCI_PMCSR_STATE_D1;
145 break;
146 case PNP_STATE_D3:
147 state = PCI_PMCSR_STATE_D3;
148 break;
149 case PNP_STATE_D0:
150 state = PCI_PMCSR_STATE_D0;
151 break;
152 default:
153 /* we should never be called here */
154 #ifdef DIAGNOSTIC
155 panic("pci_power_devices called with invalid reason %d\n",
156 newstate);
157 /* NOTREACHED */
158 #endif
159 return;
160 }
161
162 #ifdef __PCI_BUS_DEVORDER
163 pci_bus_devorder(sc->sc_pc, sc->sc_bus, devs);
164 for (i = 0; (device = devs[i]) < 32 && device >= 0; i++)
165 #else
166 for (device = 0; device < sc->sc_maxndevs; device++)
167 #endif
168 {
169 tag = pci_make_tag(pc, sc->sc_bus, device, 0);
170 bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG);
171 if (PCI_HDRTYPE_TYPE(bhlcr) > 2)
172 continue;
173 id = pci_conf_read(pc, tag, PCI_ID_REG);
174 if (PCI_VENDOR(id) == PCI_VENDOR_INVALID ||
175 PCI_VENDOR(id) == 0x0000)
176 continue;
177 nfunctions = PCI_HDRTYPE_MULTIFN(bhlcr) ? 8 : 1;
178
179 for (function = 0; function < nfunctions; function++) {
180 tag = pci_make_tag(pc, sc->sc_bus, device, function);
181 if (sc->PCI_SC_DEVICESC(device, function) != NULL)
182 continue;
183 (void)pci_set_powerstate(pc, tag, state);
184 }
185 }
186
187 return;
188 }
189
190 static pnp_status_t
191 pci_power(device_t dv, pnp_request_t req, void *opaque)
192 {
193 struct pci_softc *sc;
194 pnp_capabilities_t *pcaps;
195 pnp_state_t *pstate;
196
197 sc = (struct pci_softc *)dv;
198
199 switch (req) {
200 case PNP_REQUEST_GET_CAPABILITIES:
201 pcaps = opaque;
202 pcaps->state |= PNP_STATE_D0 | PNP_STATE_D3;
203 break;
204
205 case PNP_REQUEST_GET_STATE:
206 pstate = opaque;
207 *pstate = PNP_STATE_D0; /* XXX */
208 break;
209
210 case PNP_REQUEST_SET_STATE:
211 pstate = opaque;
212
213 if (*pstate == PNP_STATE_D2)
214 return PNP_STATUS_UNSUPPORTED;
215
216 pci_power_devices(sc, req);
217 break;
218
219 case PNP_REQUEST_NOTIFY:
220 /* XXX TODO */
221 break;
222
223 default:
224 return PNP_STATUS_UNSUPPORTED;
225 }
226
227 return PNP_STATUS_SUCCESS;
228 }
229
230 static void
231 pciattach(struct device *parent, struct device *self, void *aux)
232 {
233 struct pcibus_attach_args *pba = aux;
234 struct pci_softc *sc = (struct pci_softc *)self;
235 int io_enabled, mem_enabled, mrl_enabled, mrm_enabled, mwi_enabled;
236 pnp_status_t status;
237 const char *sep = "";
238 static const int wildcard[PCICF_NLOCS] = {
239 PCICF_DEV_DEFAULT, PCICF_FUNCTION_DEFAULT
240 };
241
242 pci_attach_hook(parent, self, pba);
243
244 aprint_naive("\n");
245 aprint_normal("\n");
246
247 io_enabled = (pba->pba_flags & PCI_FLAGS_IO_ENABLED);
248 mem_enabled = (pba->pba_flags & PCI_FLAGS_MEM_ENABLED);
249 mrl_enabled = (pba->pba_flags & PCI_FLAGS_MRL_OKAY);
250 mrm_enabled = (pba->pba_flags & PCI_FLAGS_MRM_OKAY);
251 mwi_enabled = (pba->pba_flags & PCI_FLAGS_MWI_OKAY);
252
253 if (io_enabled == 0 && mem_enabled == 0) {
254 aprint_error("%s: no spaces enabled!\n", self->dv_xname);
255 return;
256 }
257
258 #define PRINT(str) \
259 do { \
260 aprint_normal("%s%s", sep, str); \
261 sep = ", "; \
262 } while (/*CONSTCOND*/0)
263
264 aprint_normal("%s: ", self->dv_xname);
265
266 if (io_enabled)
267 PRINT("i/o space");
268 if (mem_enabled)
269 PRINT("memory space");
270 aprint_normal(" enabled");
271
272 if (mrl_enabled || mrm_enabled || mwi_enabled) {
273 if (mrl_enabled)
274 PRINT("rd/line");
275 if (mrm_enabled)
276 PRINT("rd/mult");
277 if (mwi_enabled)
278 PRINT("wr/inv");
279 aprint_normal(" ok");
280 }
281
282 aprint_normal("\n");
283
284 #undef PRINT
285
286 sc->sc_iot = pba->pba_iot;
287 sc->sc_memt = pba->pba_memt;
288 sc->sc_dmat = pba->pba_dmat;
289 sc->sc_dmat64 = pba->pba_dmat64;
290 sc->sc_pc = pba->pba_pc;
291 sc->sc_bus = pba->pba_bus;
292 sc->sc_bridgetag = pba->pba_bridgetag;
293 sc->sc_maxndevs = pci_bus_maxdevs(pba->pba_pc, pba->pba_bus);
294 sc->sc_intrswiz = pba->pba_intrswiz;
295 sc->sc_intrtag = pba->pba_intrtag;
296 sc->sc_flags = pba->pba_flags;
297
298 status = pnp_register(self, pci_power);
299 if (status != PNP_STATUS_SUCCESS)
300 aprint_error("%s: couldn't establish power handler\n",
301 device_xname(self));
302
303 pcirescan(&sc->sc_dev, "pci", wildcard);
304 }
305
306 int
307 pciprint(void *aux, const char *pnp)
308 {
309 struct pci_attach_args *pa = aux;
310 char devinfo[256];
311 const struct pci_quirkdata *qd;
312
313 if (pnp) {
314 pci_devinfo(pa->pa_id, pa->pa_class, 1, devinfo, sizeof(devinfo));
315 aprint_normal("%s at %s", devinfo, pnp);
316 }
317 aprint_normal(" dev %d function %d", pa->pa_device, pa->pa_function);
318 if (pci_config_dump) {
319 printf(": ");
320 pci_conf_print(pa->pa_pc, pa->pa_tag, NULL);
321 if (!pnp)
322 pci_devinfo(pa->pa_id, pa->pa_class, 1, devinfo, sizeof(devinfo));
323 printf("%s at %s", devinfo, pnp ? pnp : "?");
324 printf(" dev %d function %d (", pa->pa_device, pa->pa_function);
325 #ifdef __i386__
326 printf("tag %#lx, intrtag %#lx, intrswiz %#lx, intrpin %#lx",
327 *(long *)&pa->pa_tag, *(long *)&pa->pa_intrtag,
328 (long)pa->pa_intrswiz, (long)pa->pa_intrpin);
329 #else
330 printf("intrswiz %#lx, intrpin %#lx",
331 (long)pa->pa_intrswiz, (long)pa->pa_intrpin);
332 #endif
333 printf(", i/o %s, mem %s,",
334 pa->pa_flags & PCI_FLAGS_IO_ENABLED ? "on" : "off",
335 pa->pa_flags & PCI_FLAGS_MEM_ENABLED ? "on" : "off");
336 qd = pci_lookup_quirkdata(PCI_VENDOR(pa->pa_id),
337 PCI_PRODUCT(pa->pa_id));
338 if (qd == NULL) {
339 printf(" no quirks");
340 } else {
341 bitmask_snprintf(qd->quirks,
342 "\002\001multifn\002singlefn\003skipfunc0"
343 "\004skipfunc1\005skipfunc2\006skipfunc3"
344 "\007skipfunc4\010skipfunc5\011skipfunc6"
345 "\012skipfunc7",
346 devinfo, sizeof (devinfo));
347 printf(" quirks %s", devinfo);
348 }
349 printf(")");
350 }
351 return (UNCONF);
352 }
353
354 int
355 pci_probe_device(struct pci_softc *sc, pcitag_t tag,
356 int (*match)(struct pci_attach_args *), struct pci_attach_args *pap)
357 {
358 pci_chipset_tag_t pc = sc->sc_pc;
359 struct pci_attach_args pa;
360 pcireg_t id, csr, class, intr, bhlcr;
361 int ret, pin, bus, device, function;
362 int locs[PCICF_NLOCS];
363 struct device *subdev;
364
365 pci_decompose_tag(pc, tag, &bus, &device, &function);
366
367 /* a driver already attached? */
368 if (sc->PCI_SC_DEVICESC(device, function) && !match)
369 return (0);
370
371 bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG);
372 if (PCI_HDRTYPE_TYPE(bhlcr) > 2)
373 return (0);
374
375 id = pci_conf_read(pc, tag, PCI_ID_REG);
376 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
377 class = pci_conf_read(pc, tag, PCI_CLASS_REG);
378
379 /* Invalid vendor ID value? */
380 if (PCI_VENDOR(id) == PCI_VENDOR_INVALID)
381 return (0);
382 /* XXX Not invalid, but we've done this ~forever. */
383 if (PCI_VENDOR(id) == 0)
384 return (0);
385
386 pa.pa_iot = sc->sc_iot;
387 pa.pa_memt = sc->sc_memt;
388 pa.pa_dmat = sc->sc_dmat;
389 pa.pa_dmat64 = sc->sc_dmat64;
390 pa.pa_pc = pc;
391 pa.pa_bus = bus;
392 pa.pa_device = device;
393 pa.pa_function = function;
394 pa.pa_tag = tag;
395 pa.pa_id = id;
396 pa.pa_class = class;
397
398 /*
399 * Set up memory, I/O enable, and PCI command flags
400 * as appropriate.
401 */
402 pa.pa_flags = sc->sc_flags;
403 if ((csr & PCI_COMMAND_IO_ENABLE) == 0)
404 pa.pa_flags &= ~PCI_FLAGS_IO_ENABLED;
405 if ((csr & PCI_COMMAND_MEM_ENABLE) == 0)
406 pa.pa_flags &= ~PCI_FLAGS_MEM_ENABLED;
407
408 /*
409 * If the cache line size is not configured, then
410 * clear the MRL/MRM/MWI command-ok flags.
411 */
412 if (PCI_CACHELINE(bhlcr) == 0)
413 pa.pa_flags &= ~(PCI_FLAGS_MRL_OKAY|
414 PCI_FLAGS_MRM_OKAY|PCI_FLAGS_MWI_OKAY);
415
416 if (sc->sc_bridgetag == NULL) {
417 pa.pa_intrswiz = 0;
418 pa.pa_intrtag = tag;
419 } else {
420 pa.pa_intrswiz = sc->sc_intrswiz + device;
421 pa.pa_intrtag = sc->sc_intrtag;
422 }
423
424 intr = pci_conf_read(pc, tag, PCI_INTERRUPT_REG);
425
426 pin = PCI_INTERRUPT_PIN(intr);
427 pa.pa_rawintrpin = pin;
428 if (pin == PCI_INTERRUPT_PIN_NONE) {
429 /* no interrupt */
430 pa.pa_intrpin = 0;
431 } else {
432 /*
433 * swizzle it based on the number of busses we're
434 * behind and our device number.
435 */
436 pa.pa_intrpin = /* XXX */
437 ((pin + pa.pa_intrswiz - 1) % 4) + 1;
438 }
439 pa.pa_intrline = PCI_INTERRUPT_LINE(intr);
440
441 if (match != NULL) {
442 ret = (*match)(&pa);
443 if (ret != 0 && pap != NULL)
444 *pap = pa;
445 } else {
446 locs[PCICF_DEV] = device;
447 locs[PCICF_FUNCTION] = function;
448
449 subdev = config_found_sm_loc(&sc->sc_dev, "pci", locs, &pa,
450 pciprint, config_stdsubmatch);
451 sc->PCI_SC_DEVICESC(device, function) = subdev;
452 ret = (subdev != NULL);
453 }
454
455 return (ret);
456 }
457
458 static void
459 pcidevdetached(struct device *sc, struct device *dev)
460 {
461 struct pci_softc *psc = (struct pci_softc *)sc;
462 int d, f;
463
464 d = device_locator(dev, PCICF_DEV);
465 f = device_locator(dev, PCICF_FUNCTION);
466
467 KASSERT(psc->PCI_SC_DEVICESC(d, f) == dev);
468
469 psc->PCI_SC_DEVICESC(d, f) = 0;
470 }
471
472 int
473 pci_get_capability(pci_chipset_tag_t pc, pcitag_t tag, int capid,
474 int *offset, pcireg_t *value)
475 {
476 pcireg_t reg;
477 unsigned int ofs;
478
479 reg = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
480 if (!(reg & PCI_STATUS_CAPLIST_SUPPORT))
481 return (0);
482
483 /* Determine the Capability List Pointer register to start with. */
484 reg = pci_conf_read(pc, tag, PCI_BHLC_REG);
485 switch (PCI_HDRTYPE_TYPE(reg)) {
486 case 0: /* standard device header */
487 case 1: /* PCI-PCI bridge header */
488 ofs = PCI_CAPLISTPTR_REG;
489 break;
490 case 2: /* PCI-CardBus Bridge header */
491 ofs = PCI_CARDBUS_CAPLISTPTR_REG;
492 break;
493 default:
494 return (0);
495 }
496
497 ofs = PCI_CAPLIST_PTR(pci_conf_read(pc, tag, ofs));
498 while (ofs != 0) {
499 #ifdef DIAGNOSTIC
500 if ((ofs & 3) || (ofs < 0x40))
501 panic("pci_get_capability");
502 #endif
503 reg = pci_conf_read(pc, tag, ofs);
504 if (PCI_CAPLIST_CAP(reg) == capid) {
505 if (offset)
506 *offset = ofs;
507 if (value)
508 *value = reg;
509 return (1);
510 }
511 ofs = PCI_CAPLIST_NEXT(reg);
512 }
513
514 return (0);
515 }
516
517 int
518 pci_find_device(struct pci_attach_args *pa,
519 int (*match)(struct pci_attach_args *))
520 {
521 extern struct cfdriver pci_cd;
522 struct device *pcidev;
523 int i;
524 static const int wildcard[2] = {
525 PCICF_DEV_DEFAULT,
526 PCICF_FUNCTION_DEFAULT
527 };
528
529 for (i = 0; i < pci_cd.cd_ndevs; i++) {
530 pcidev = pci_cd.cd_devs[i];
531 if (pcidev != NULL &&
532 pci_enumerate_bus((struct pci_softc *)pcidev, wildcard,
533 match, pa) != 0)
534 return (1);
535 }
536 return (0);
537 }
538
539 #ifndef PCI_MACHDEP_ENUMERATE_BUS
540 /*
541 * Generic PCI bus enumeration routine. Used unless machine-dependent
542 * code needs to provide something else.
543 */
544 int
545 pci_enumerate_bus(struct pci_softc *sc, const int *locators,
546 int (*match)(struct pci_attach_args *), struct pci_attach_args *pap)
547 {
548 pci_chipset_tag_t pc = sc->sc_pc;
549 int device, function, nfunctions, ret;
550 const struct pci_quirkdata *qd;
551 pcireg_t id, bhlcr;
552 pcitag_t tag;
553 #ifdef __PCI_BUS_DEVORDER
554 char devs[32];
555 int i;
556 #endif
557
558 #ifdef __PCI_BUS_DEVORDER
559 pci_bus_devorder(sc->sc_pc, sc->sc_bus, devs);
560 for (i = 0; (device = devs[i]) < 32 && device >= 0; i++)
561 #else
562 for (device = 0; device < sc->sc_maxndevs; device++)
563 #endif
564 {
565 if ((locators[PCICF_DEV] != PCICF_DEV_DEFAULT) &&
566 (locators[PCICF_DEV] != device))
567 continue;
568
569 tag = pci_make_tag(pc, sc->sc_bus, device, 0);
570
571 bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG);
572 if (PCI_HDRTYPE_TYPE(bhlcr) > 2)
573 continue;
574
575 id = pci_conf_read(pc, tag, PCI_ID_REG);
576
577 /* Invalid vendor ID value? */
578 if (PCI_VENDOR(id) == PCI_VENDOR_INVALID)
579 continue;
580 /* XXX Not invalid, but we've done this ~forever. */
581 if (PCI_VENDOR(id) == 0)
582 continue;
583
584 qd = pci_lookup_quirkdata(PCI_VENDOR(id), PCI_PRODUCT(id));
585
586 if (qd != NULL &&
587 (qd->quirks & PCI_QUIRK_MULTIFUNCTION) != 0)
588 nfunctions = 8;
589 else if (qd != NULL &&
590 (qd->quirks & PCI_QUIRK_MONOFUNCTION) != 0)
591 nfunctions = 1;
592 else
593 nfunctions = PCI_HDRTYPE_MULTIFN(bhlcr) ? 8 : 1;
594
595 for (function = 0; function < nfunctions; function++) {
596 if ((locators[PCICF_FUNCTION] != PCICF_FUNCTION_DEFAULT)
597 && (locators[PCICF_FUNCTION] != function))
598 continue;
599
600 if (qd != NULL &&
601 (qd->quirks & PCI_QUIRK_SKIP_FUNC(function)) != 0)
602 continue;
603 tag = pci_make_tag(pc, sc->sc_bus, device, function);
604 ret = pci_probe_device(sc, tag, match, pap);
605 if (match != NULL && ret != 0)
606 return (ret);
607 }
608 }
609 return (0);
610 }
611 #endif /* PCI_MACHDEP_ENUMERATE_BUS */
612
613
614 /*
615 * Vital Product Data (PCI 2.2)
616 */
617
618 int
619 pci_vpd_read(pci_chipset_tag_t pc, pcitag_t tag, int offset, int count,
620 pcireg_t *data)
621 {
622 uint32_t reg;
623 int ofs, i, j;
624
625 KASSERT(data != NULL);
626 KASSERT((offset + count) < 0x7fff);
627
628 if (pci_get_capability(pc, tag, PCI_CAP_VPD, &ofs, ®) == 0)
629 return (1);
630
631 for (i = 0; i < count; offset += sizeof(*data), i++) {
632 reg &= 0x0000ffff;
633 reg &= ~PCI_VPD_OPFLAG;
634 reg |= PCI_VPD_ADDRESS(offset);
635 pci_conf_write(pc, tag, ofs, reg);
636
637 /*
638 * PCI 2.2 does not specify how long we should poll
639 * for completion nor whether the operation can fail.
640 */
641 j = 0;
642 do {
643 if (j++ == 20)
644 return (1);
645 delay(4);
646 reg = pci_conf_read(pc, tag, ofs);
647 } while ((reg & PCI_VPD_OPFLAG) == 0);
648 data[i] = pci_conf_read(pc, tag, PCI_VPD_DATAREG(ofs));
649 }
650
651 return (0);
652 }
653
654 int
655 pci_vpd_write(pci_chipset_tag_t pc, pcitag_t tag, int offset, int count,
656 pcireg_t *data)
657 {
658 pcireg_t reg;
659 int ofs, i, j;
660
661 KASSERT(data != NULL);
662 KASSERT((offset + count) < 0x7fff);
663
664 if (pci_get_capability(pc, tag, PCI_CAP_VPD, &ofs, ®) == 0)
665 return (1);
666
667 for (i = 0; i < count; offset += sizeof(*data), i++) {
668 pci_conf_write(pc, tag, PCI_VPD_DATAREG(ofs), data[i]);
669
670 reg &= 0x0000ffff;
671 reg |= PCI_VPD_OPFLAG;
672 reg |= PCI_VPD_ADDRESS(offset);
673 pci_conf_write(pc, tag, ofs, reg);
674
675 /*
676 * PCI 2.2 does not specify how long we should poll
677 * for completion nor whether the operation can fail.
678 */
679 j = 0;
680 do {
681 if (j++ == 20)
682 return (1);
683 delay(1);
684 reg = pci_conf_read(pc, tag, ofs);
685 } while (reg & PCI_VPD_OPFLAG);
686 }
687
688 return (0);
689 }
690
691 int
692 pci_dma64_available(struct pci_attach_args *pa)
693 {
694 #ifdef _PCI_HAVE_DMA64
695 if (BUS_DMA_TAG_VALID(pa->pa_dmat64) &&
696 ((uint64_t)physmem << PAGE_SHIFT) > 0xffffffffULL)
697 return 1;
698 #endif
699 return 0;
700 }
701
702 void
703 pci_conf_capture(pci_chipset_tag_t pc, pcitag_t tag,
704 struct pci_conf_state *pcs)
705 {
706 int off;
707
708 for (off = 0; off < 16; off++)
709 pcs->reg[off] = pci_conf_read(pc, tag, (off * 4));
710
711 return;
712 }
713
714 void
715 pci_conf_restore(pci_chipset_tag_t pc, pcitag_t tag,
716 struct pci_conf_state *pcs)
717 {
718 int off;
719 pcireg_t val;
720
721 for (off = 15; off >= 0; off--) {
722 val = pci_conf_read(pc, tag, (off * 4));
723 if (val != pcs->reg[off])
724 pci_conf_write(pc, tag, (off * 4), pcs->reg[off]);
725 }
726
727 return;
728 }
729
730 /*
731 * Power Management Capability (Rev 2.2)
732 */
733 int
734 pci_get_powerstate(pci_chipset_tag_t pc, pcitag_t tag , pcireg_t *state)
735 {
736 int offset;
737 pcireg_t value, cap, now;
738
739 if (!pci_get_capability(pc, tag, PCI_CAP_PWRMGMT, &offset, &value))
740 return EOPNOTSUPP;
741
742 cap = value >> PCI_PMCR_SHIFT;
743 value = pci_conf_read(pc, tag, offset + PCI_PMCSR);
744 now = value & PCI_PMCSR_STATE_MASK;
745 switch (now) {
746 case PCI_PMCSR_STATE_D0:
747 case PCI_PMCSR_STATE_D1:
748 case PCI_PMCSR_STATE_D2:
749 case PCI_PMCSR_STATE_D3:
750 *state = now;
751 return 0;
752 default:
753 return EINVAL;
754 }
755 }
756
757 int
758 pci_set_powerstate(pci_chipset_tag_t pc, pcitag_t tag, pcireg_t state)
759 {
760 int offset;
761 pcireg_t value, cap, now;
762
763 if (!pci_get_capability(pc, tag, PCI_CAP_PWRMGMT, &offset, &value))
764 return EOPNOTSUPP;
765
766 cap = value >> PCI_PMCR_SHIFT;
767 value = pci_conf_read(pc, tag, offset + PCI_PMCSR);
768 now = value & PCI_PMCSR_STATE_MASK;
769 value &= ~PCI_PMCSR_STATE_MASK;
770
771 if (now == state)
772 return 0;
773 switch (state) {
774 case PCI_PMCSR_STATE_D0:
775 value |= PCI_PMCSR_STATE_D0;
776 break;
777 case PCI_PMCSR_STATE_D1:
778 if (now == PCI_PMCSR_STATE_D2 || now == PCI_PMCSR_STATE_D3)
779 return EINVAL;
780 if (!(cap & PCI_PMCR_D1SUPP))
781 return EOPNOTSUPP;
782 value |= PCI_PMCSR_STATE_D1;
783 break;
784 case PCI_PMCSR_STATE_D2:
785 if (now == PCI_PMCSR_STATE_D3)
786 return EINVAL;
787 if (!(cap & PCI_PMCR_D2SUPP))
788 return EOPNOTSUPP;
789 value |= PCI_PMCSR_STATE_D2;
790 break;
791 case PCI_PMCSR_STATE_D3:
792 if (now == PCI_PMCSR_STATE_D3)
793 return 0;
794 value |= PCI_PMCSR_STATE_D3;
795 break;
796 default:
797 return EINVAL;
798 }
799 pci_conf_write(pc, tag, offset + PCI_PMCSR, value);
800 DELAY(1000);
801 return 0;
802 }
803
804 pnp_state_t
805 pci_pnp_powerstate(pcireg_t reg)
806 {
807 pnp_state_t state;
808
809 switch (reg) {
810 case PCI_PMCSR_STATE_D0:
811 state = PNP_STATE_D0;
812 break;
813 case PCI_PMCSR_STATE_D1:
814 state = PNP_STATE_D1;
815 break;
816 case PCI_PMCSR_STATE_D2:
817 state = PNP_STATE_D2;
818 break;
819 case PCI_PMCSR_STATE_D3:
820 state = PNP_STATE_D3;
821 break;
822 default:
823 state = PNP_STATE_UNKNOWN;
824 break;
825 }
826
827 return state;
828 }
829
830 pnp_state_t
831 pci_pnp_capabilities(pcireg_t reg)
832 {
833 pnp_state_t state;
834 pcireg_t cap;
835
836 cap = reg >> PCI_PMCR_SHIFT;
837
838 state = PNP_STATE_D0 | PNP_STATE_D3;
839 if (cap & PCI_PMCR_D1SUPP)
840 state |= PNP_STATE_D1;
841 if (cap & PCI_PMCR_D2SUPP)
842 state |= PNP_STATE_D2;
843
844 return state;
845 }
846
847 int
848 pci_activate(pci_chipset_tag_t pc, pcitag_t tag, void *sc,
849 int (*wakefun)(pci_chipset_tag_t, pcitag_t, void *, pcireg_t))
850 {
851 struct device *dv = sc;
852 pcireg_t pmode;
853 int error;
854
855 if ((error = pci_get_powerstate(pc, tag, &pmode)))
856 return error;
857
858 switch (pmode) {
859 case PCI_PMCSR_STATE_D0:
860 break;
861 case PCI_PMCSR_STATE_D3:
862 if (wakefun == NULL) {
863 /*
864 * The card has lost all configuration data in
865 * this state, so punt.
866 */
867 aprint_error(
868 "%s: unable to wake up from power state D3\n",
869 dv->dv_xname);
870 return EOPNOTSUPP;
871 }
872 /*FALLTHROUGH*/
873 default:
874 if (wakefun) {
875 error = (*wakefun)(pc, tag, sc, pmode);
876 if (error)
877 return error;
878 }
879 aprint_normal("%s: waking up from power state D%d\n",
880 dv->dv_xname, pmode);
881 if ((error = pci_set_powerstate(pc, tag, PCI_PMCSR_STATE_D0)))
882 return error;
883 }
884 return 0;
885 }
886
887 int
888 pci_activate_null(pci_chipset_tag_t pc, pcitag_t tag,
889 void *sc, pcireg_t state)
890 {
891 return 0;
892 }
893
894 CFATTACH_DECL2(pci, sizeof(struct pci_softc),
895 pcimatch, pciattach, NULL, NULL, pcirescan, pcidevdetached);
896