pci.c revision 1.103.22.7 1 /* $NetBSD: pci.c,v 1.103.22.7 2007/10/01 05:37:50 joerg Exp $ */
2
3 /*
4 * Copyright (c) 1995, 1996, 1997, 1998
5 * Christopher G. Demetriou. All rights reserved.
6 * Copyright (c) 1994 Charles M. Hannum. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Charles M. Hannum.
19 * 4. The name of the author may not be used to endorse or promote products
20 * derived from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 /*
35 * PCI bus autoconfiguration.
36 */
37
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: pci.c,v 1.103.22.7 2007/10/01 05:37:50 joerg Exp $");
40
41 #include "opt_pci.h"
42
43 #include <sys/param.h>
44 #include <sys/malloc.h>
45 #include <sys/systm.h>
46 #include <sys/device.h>
47
48 #include <dev/pci/pcireg.h>
49 #include <dev/pci/pcivar.h>
50 #include <dev/pci/pcidevs.h>
51
52 #include <uvm/uvm_extern.h>
53
54 #include <net/if.h>
55
56 #include "locators.h"
57
58 #ifdef PCI_CONFIG_DUMP
59 int pci_config_dump = 1;
60 #else
61 int pci_config_dump = 0;
62 #endif
63
64 int pciprint(void *, const char *);
65
66 #ifdef PCI_MACHDEP_ENUMERATE_BUS
67 #define pci_enumerate_bus PCI_MACHDEP_ENUMERATE_BUS
68 #else
69 int pci_enumerate_bus(struct pci_softc *, const int *,
70 int (*)(struct pci_attach_args *), struct pci_attach_args *);
71 #endif
72
73 /*
74 * Important note about PCI-ISA bridges:
75 *
76 * Callbacks are used to configure these devices so that ISA/EISA bridges
77 * can attach their child busses after PCI configuration is done.
78 *
79 * This works because:
80 * (1) there can be at most one ISA/EISA bridge per PCI bus, and
81 * (2) any ISA/EISA bridges must be attached to primary PCI
82 * busses (i.e. bus zero).
83 *
84 * That boils down to: there can only be one of these outstanding
85 * at a time, it is cleared when configuring PCI bus 0 before any
86 * subdevices have been found, and it is run after all subdevices
87 * of PCI bus 0 have been found.
88 *
89 * This is needed because there are some (legacy) PCI devices which
90 * can show up as ISA/EISA devices as well (the prime example of which
91 * are VGA controllers). If you attach ISA from a PCI-ISA/EISA bridge,
92 * and the bridge is seen before the video board is, the board can show
93 * up as an ISA device, and that can (bogusly) complicate the PCI device's
94 * attach code, or make the PCI device not be properly attached at all.
95 *
96 * We use the generic config_defer() facility to achieve this.
97 */
98
99 static int
100 pcirescan(struct device *sc, const char *ifattr, const int *locators)
101 {
102
103 KASSERT(ifattr && !strcmp(ifattr, "pci"));
104 KASSERT(locators);
105
106 pci_enumerate_bus((struct pci_softc *)sc, locators, NULL, NULL);
107 return (0);
108 }
109
110 static int
111 pcimatch(struct device *parent, struct cfdata *cf, void *aux)
112 {
113 struct pcibus_attach_args *pba = aux;
114
115 /* Check the locators */
116 if (cf->cf_loc[PCIBUSCF_BUS] != PCIBUSCF_BUS_DEFAULT &&
117 cf->cf_loc[PCIBUSCF_BUS] != pba->pba_bus)
118 return (0);
119
120 /* sanity */
121 if (pba->pba_bus < 0 || pba->pba_bus > 255)
122 return (0);
123
124 /*
125 * XXX check other (hardware?) indicators
126 */
127
128 return (1);
129 }
130
131 static void
132 pciattach(struct device *parent, struct device *self, void *aux)
133 {
134 struct pcibus_attach_args *pba = aux;
135 struct pci_softc *sc = (struct pci_softc *)self;
136 int io_enabled, mem_enabled, mrl_enabled, mrm_enabled, mwi_enabled;
137 const char *sep = "";
138 static const int wildcard[PCICF_NLOCS] = {
139 PCICF_DEV_DEFAULT, PCICF_FUNCTION_DEFAULT
140 };
141
142 pci_attach_hook(parent, self, pba);
143
144 aprint_naive("\n");
145 aprint_normal("\n");
146
147 io_enabled = (pba->pba_flags & PCI_FLAGS_IO_ENABLED);
148 mem_enabled = (pba->pba_flags & PCI_FLAGS_MEM_ENABLED);
149 mrl_enabled = (pba->pba_flags & PCI_FLAGS_MRL_OKAY);
150 mrm_enabled = (pba->pba_flags & PCI_FLAGS_MRM_OKAY);
151 mwi_enabled = (pba->pba_flags & PCI_FLAGS_MWI_OKAY);
152
153 if (io_enabled == 0 && mem_enabled == 0) {
154 aprint_error("%s: no spaces enabled!\n", self->dv_xname);
155 return;
156 }
157
158 #define PRINT(str) \
159 do { \
160 aprint_normal("%s%s", sep, str); \
161 sep = ", "; \
162 } while (/*CONSTCOND*/0)
163
164 aprint_normal("%s: ", self->dv_xname);
165
166 if (io_enabled)
167 PRINT("i/o space");
168 if (mem_enabled)
169 PRINT("memory space");
170 aprint_normal(" enabled");
171
172 if (mrl_enabled || mrm_enabled || mwi_enabled) {
173 if (mrl_enabled)
174 PRINT("rd/line");
175 if (mrm_enabled)
176 PRINT("rd/mult");
177 if (mwi_enabled)
178 PRINT("wr/inv");
179 aprint_normal(" ok");
180 }
181
182 aprint_normal("\n");
183
184 #undef PRINT
185
186 sc->sc_iot = pba->pba_iot;
187 sc->sc_memt = pba->pba_memt;
188 sc->sc_dmat = pba->pba_dmat;
189 sc->sc_dmat64 = pba->pba_dmat64;
190 sc->sc_pc = pba->pba_pc;
191 sc->sc_bus = pba->pba_bus;
192 sc->sc_bridgetag = pba->pba_bridgetag;
193 sc->sc_maxndevs = pci_bus_maxdevs(pba->pba_pc, pba->pba_bus);
194 sc->sc_intrswiz = pba->pba_intrswiz;
195 sc->sc_intrtag = pba->pba_intrtag;
196 sc->sc_flags = pba->pba_flags;
197
198 pcirescan(&sc->sc_dev, "pci", wildcard);
199
200 (void)pnp_register(self, pnp_generic_power);
201 }
202
203 static int
204 pcidetach(struct device *self, int flags)
205 {
206 pnp_deregister(self);
207 return 0;
208 }
209
210 int
211 pciprint(void *aux, const char *pnp)
212 {
213 struct pci_attach_args *pa = aux;
214 char devinfo[256];
215 const struct pci_quirkdata *qd;
216
217 if (pnp) {
218 pci_devinfo(pa->pa_id, pa->pa_class, 1, devinfo, sizeof(devinfo));
219 aprint_normal("%s at %s", devinfo, pnp);
220 }
221 aprint_normal(" dev %d function %d", pa->pa_device, pa->pa_function);
222 if (pci_config_dump) {
223 printf(": ");
224 pci_conf_print(pa->pa_pc, pa->pa_tag, NULL);
225 if (!pnp)
226 pci_devinfo(pa->pa_id, pa->pa_class, 1, devinfo, sizeof(devinfo));
227 printf("%s at %s", devinfo, pnp ? pnp : "?");
228 printf(" dev %d function %d (", pa->pa_device, pa->pa_function);
229 #ifdef __i386__
230 printf("tag %#lx, intrtag %#lx, intrswiz %#lx, intrpin %#lx",
231 *(long *)&pa->pa_tag, *(long *)&pa->pa_intrtag,
232 (long)pa->pa_intrswiz, (long)pa->pa_intrpin);
233 #else
234 printf("intrswiz %#lx, intrpin %#lx",
235 (long)pa->pa_intrswiz, (long)pa->pa_intrpin);
236 #endif
237 printf(", i/o %s, mem %s,",
238 pa->pa_flags & PCI_FLAGS_IO_ENABLED ? "on" : "off",
239 pa->pa_flags & PCI_FLAGS_MEM_ENABLED ? "on" : "off");
240 qd = pci_lookup_quirkdata(PCI_VENDOR(pa->pa_id),
241 PCI_PRODUCT(pa->pa_id));
242 if (qd == NULL) {
243 printf(" no quirks");
244 } else {
245 bitmask_snprintf(qd->quirks,
246 "\002\001multifn\002singlefn\003skipfunc0"
247 "\004skipfunc1\005skipfunc2\006skipfunc3"
248 "\007skipfunc4\010skipfunc5\011skipfunc6"
249 "\012skipfunc7",
250 devinfo, sizeof (devinfo));
251 printf(" quirks %s", devinfo);
252 }
253 printf(")");
254 }
255 return (UNCONF);
256 }
257
258 int
259 pci_probe_device(struct pci_softc *sc, pcitag_t tag,
260 int (*match)(struct pci_attach_args *), struct pci_attach_args *pap)
261 {
262 pci_chipset_tag_t pc = sc->sc_pc;
263 struct pci_attach_args pa;
264 pcireg_t id, csr, class, intr, bhlcr;
265 int ret, pin, bus, device, function;
266 int locs[PCICF_NLOCS];
267 struct device *subdev;
268
269 pci_decompose_tag(pc, tag, &bus, &device, &function);
270
271 /* a driver already attached? */
272 if (sc->PCI_SC_DEVICESC(device, function) && !match)
273 return (0);
274
275 bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG);
276 if (PCI_HDRTYPE_TYPE(bhlcr) > 2)
277 return (0);
278
279 id = pci_conf_read(pc, tag, PCI_ID_REG);
280 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
281 class = pci_conf_read(pc, tag, PCI_CLASS_REG);
282
283 /* Invalid vendor ID value? */
284 if (PCI_VENDOR(id) == PCI_VENDOR_INVALID)
285 return (0);
286 /* XXX Not invalid, but we've done this ~forever. */
287 if (PCI_VENDOR(id) == 0)
288 return (0);
289
290 pa.pa_iot = sc->sc_iot;
291 pa.pa_memt = sc->sc_memt;
292 pa.pa_dmat = sc->sc_dmat;
293 pa.pa_dmat64 = sc->sc_dmat64;
294 pa.pa_pc = pc;
295 pa.pa_bus = bus;
296 pa.pa_device = device;
297 pa.pa_function = function;
298 pa.pa_tag = tag;
299 pa.pa_id = id;
300 pa.pa_class = class;
301
302 /*
303 * Set up memory, I/O enable, and PCI command flags
304 * as appropriate.
305 */
306 pa.pa_flags = sc->sc_flags;
307 if ((csr & PCI_COMMAND_IO_ENABLE) == 0)
308 pa.pa_flags &= ~PCI_FLAGS_IO_ENABLED;
309 if ((csr & PCI_COMMAND_MEM_ENABLE) == 0)
310 pa.pa_flags &= ~PCI_FLAGS_MEM_ENABLED;
311
312 /*
313 * If the cache line size is not configured, then
314 * clear the MRL/MRM/MWI command-ok flags.
315 */
316 if (PCI_CACHELINE(bhlcr) == 0)
317 pa.pa_flags &= ~(PCI_FLAGS_MRL_OKAY|
318 PCI_FLAGS_MRM_OKAY|PCI_FLAGS_MWI_OKAY);
319
320 if (sc->sc_bridgetag == NULL) {
321 pa.pa_intrswiz = 0;
322 pa.pa_intrtag = tag;
323 } else {
324 pa.pa_intrswiz = sc->sc_intrswiz + device;
325 pa.pa_intrtag = sc->sc_intrtag;
326 }
327
328 intr = pci_conf_read(pc, tag, PCI_INTERRUPT_REG);
329
330 pin = PCI_INTERRUPT_PIN(intr);
331 pa.pa_rawintrpin = pin;
332 if (pin == PCI_INTERRUPT_PIN_NONE) {
333 /* no interrupt */
334 pa.pa_intrpin = 0;
335 } else {
336 /*
337 * swizzle it based on the number of busses we're
338 * behind and our device number.
339 */
340 pa.pa_intrpin = /* XXX */
341 ((pin + pa.pa_intrswiz - 1) % 4) + 1;
342 }
343 pa.pa_intrline = PCI_INTERRUPT_LINE(intr);
344
345 if (match != NULL) {
346 ret = (*match)(&pa);
347 if (ret != 0 && pap != NULL)
348 *pap = pa;
349 } else {
350 locs[PCICF_DEV] = device;
351 locs[PCICF_FUNCTION] = function;
352
353 subdev = config_found_sm_loc(&sc->sc_dev, "pci", locs, &pa,
354 pciprint, config_stdsubmatch);
355 sc->PCI_SC_DEVICESC(device, function) = subdev;
356 ret = (subdev != NULL);
357 }
358
359 return (ret);
360 }
361
362 static void
363 pcidevdetached(struct device *sc, struct device *dev)
364 {
365 struct pci_softc *psc = (struct pci_softc *)sc;
366 int d, f;
367
368 d = device_locator(dev, PCICF_DEV);
369 f = device_locator(dev, PCICF_FUNCTION);
370
371 KASSERT(psc->PCI_SC_DEVICESC(d, f) == dev);
372
373 psc->PCI_SC_DEVICESC(d, f) = 0;
374 }
375
376 CFATTACH_DECL2(pci, sizeof(struct pci_softc),
377 pcimatch, pciattach, pcidetach, NULL, pcirescan, pcidevdetached);
378
379 int
380 pci_get_capability(pci_chipset_tag_t pc, pcitag_t tag, int capid,
381 int *offset, pcireg_t *value)
382 {
383 pcireg_t reg;
384 unsigned int ofs;
385
386 reg = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
387 if (!(reg & PCI_STATUS_CAPLIST_SUPPORT))
388 return (0);
389
390 /* Determine the Capability List Pointer register to start with. */
391 reg = pci_conf_read(pc, tag, PCI_BHLC_REG);
392 switch (PCI_HDRTYPE_TYPE(reg)) {
393 case 0: /* standard device header */
394 case 1: /* PCI-PCI bridge header */
395 ofs = PCI_CAPLISTPTR_REG;
396 break;
397 case 2: /* PCI-CardBus Bridge header */
398 ofs = PCI_CARDBUS_CAPLISTPTR_REG;
399 break;
400 default:
401 return (0);
402 }
403
404 ofs = PCI_CAPLIST_PTR(pci_conf_read(pc, tag, ofs));
405 while (ofs != 0) {
406 #ifdef DIAGNOSTIC
407 if ((ofs & 3) || (ofs < 0x40))
408 panic("pci_get_capability");
409 #endif
410 reg = pci_conf_read(pc, tag, ofs);
411 if (PCI_CAPLIST_CAP(reg) == capid) {
412 if (offset)
413 *offset = ofs;
414 if (value)
415 *value = reg;
416 return (1);
417 }
418 ofs = PCI_CAPLIST_NEXT(reg);
419 }
420
421 return (0);
422 }
423
424 int
425 pci_find_device(struct pci_attach_args *pa,
426 int (*match)(struct pci_attach_args *))
427 {
428 extern struct cfdriver pci_cd;
429 struct device *pcidev;
430 int i;
431 static const int wildcard[2] = {
432 PCICF_DEV_DEFAULT,
433 PCICF_FUNCTION_DEFAULT
434 };
435
436 for (i = 0; i < pci_cd.cd_ndevs; i++) {
437 pcidev = pci_cd.cd_devs[i];
438 if (pcidev != NULL &&
439 pci_enumerate_bus((struct pci_softc *)pcidev, wildcard,
440 match, pa) != 0)
441 return (1);
442 }
443 return (0);
444 }
445
446 #ifndef PCI_MACHDEP_ENUMERATE_BUS
447 /*
448 * Generic PCI bus enumeration routine. Used unless machine-dependent
449 * code needs to provide something else.
450 */
451 int
452 pci_enumerate_bus(struct pci_softc *sc, const int *locators,
453 int (*match)(struct pci_attach_args *), struct pci_attach_args *pap)
454 {
455 pci_chipset_tag_t pc = sc->sc_pc;
456 int device, function, nfunctions, ret;
457 const struct pci_quirkdata *qd;
458 pcireg_t id, bhlcr;
459 pcitag_t tag;
460 #ifdef __PCI_BUS_DEVORDER
461 char devs[32];
462 int i;
463 #endif
464
465 #ifdef __PCI_BUS_DEVORDER
466 pci_bus_devorder(sc->sc_pc, sc->sc_bus, devs);
467 for (i = 0; (device = devs[i]) < 32 && device >= 0; i++)
468 #else
469 for (device = 0; device < sc->sc_maxndevs; device++)
470 #endif
471 {
472 if ((locators[PCICF_DEV] != PCICF_DEV_DEFAULT) &&
473 (locators[PCICF_DEV] != device))
474 continue;
475
476 tag = pci_make_tag(pc, sc->sc_bus, device, 0);
477
478 bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG);
479 if (PCI_HDRTYPE_TYPE(bhlcr) > 2)
480 continue;
481
482 id = pci_conf_read(pc, tag, PCI_ID_REG);
483
484 /* Invalid vendor ID value? */
485 if (PCI_VENDOR(id) == PCI_VENDOR_INVALID)
486 continue;
487 /* XXX Not invalid, but we've done this ~forever. */
488 if (PCI_VENDOR(id) == 0)
489 continue;
490
491 qd = pci_lookup_quirkdata(PCI_VENDOR(id), PCI_PRODUCT(id));
492
493 if (qd != NULL &&
494 (qd->quirks & PCI_QUIRK_MULTIFUNCTION) != 0)
495 nfunctions = 8;
496 else if (qd != NULL &&
497 (qd->quirks & PCI_QUIRK_MONOFUNCTION) != 0)
498 nfunctions = 1;
499 else
500 nfunctions = PCI_HDRTYPE_MULTIFN(bhlcr) ? 8 : 1;
501
502 for (function = 0; function < nfunctions; function++) {
503 if ((locators[PCICF_FUNCTION] != PCICF_FUNCTION_DEFAULT)
504 && (locators[PCICF_FUNCTION] != function))
505 continue;
506
507 if (qd != NULL &&
508 (qd->quirks & PCI_QUIRK_SKIP_FUNC(function)) != 0)
509 continue;
510 tag = pci_make_tag(pc, sc->sc_bus, device, function);
511 ret = pci_probe_device(sc, tag, match, pap);
512 if (match != NULL && ret != 0)
513 return (ret);
514 }
515 }
516 return (0);
517 }
518 #endif /* PCI_MACHDEP_ENUMERATE_BUS */
519
520
521 /*
522 * Vital Product Data (PCI 2.2)
523 */
524
525 int
526 pci_vpd_read(pci_chipset_tag_t pc, pcitag_t tag, int offset, int count,
527 pcireg_t *data)
528 {
529 uint32_t reg;
530 int ofs, i, j;
531
532 KASSERT(data != NULL);
533 KASSERT((offset + count) < 0x7fff);
534
535 if (pci_get_capability(pc, tag, PCI_CAP_VPD, &ofs, ®) == 0)
536 return (1);
537
538 for (i = 0; i < count; offset += sizeof(*data), i++) {
539 reg &= 0x0000ffff;
540 reg &= ~PCI_VPD_OPFLAG;
541 reg |= PCI_VPD_ADDRESS(offset);
542 pci_conf_write(pc, tag, ofs, reg);
543
544 /*
545 * PCI 2.2 does not specify how long we should poll
546 * for completion nor whether the operation can fail.
547 */
548 j = 0;
549 do {
550 if (j++ == 20)
551 return (1);
552 delay(4);
553 reg = pci_conf_read(pc, tag, ofs);
554 } while ((reg & PCI_VPD_OPFLAG) == 0);
555 data[i] = pci_conf_read(pc, tag, PCI_VPD_DATAREG(ofs));
556 }
557
558 return (0);
559 }
560
561 int
562 pci_vpd_write(pci_chipset_tag_t pc, pcitag_t tag, int offset, int count,
563 pcireg_t *data)
564 {
565 pcireg_t reg;
566 int ofs, i, j;
567
568 KASSERT(data != NULL);
569 KASSERT((offset + count) < 0x7fff);
570
571 if (pci_get_capability(pc, tag, PCI_CAP_VPD, &ofs, ®) == 0)
572 return (1);
573
574 for (i = 0; i < count; offset += sizeof(*data), i++) {
575 pci_conf_write(pc, tag, PCI_VPD_DATAREG(ofs), data[i]);
576
577 reg &= 0x0000ffff;
578 reg |= PCI_VPD_OPFLAG;
579 reg |= PCI_VPD_ADDRESS(offset);
580 pci_conf_write(pc, tag, ofs, reg);
581
582 /*
583 * PCI 2.2 does not specify how long we should poll
584 * for completion nor whether the operation can fail.
585 */
586 j = 0;
587 do {
588 if (j++ == 20)
589 return (1);
590 delay(1);
591 reg = pci_conf_read(pc, tag, ofs);
592 } while (reg & PCI_VPD_OPFLAG);
593 }
594
595 return (0);
596 }
597
598 int
599 pci_dma64_available(struct pci_attach_args *pa)
600 {
601 #ifdef _PCI_HAVE_DMA64
602 if (BUS_DMA_TAG_VALID(pa->pa_dmat64) &&
603 ((uint64_t)physmem << PAGE_SHIFT) > 0xffffffffULL)
604 return 1;
605 #endif
606 return 0;
607 }
608
609 void
610 pci_conf_capture(pci_chipset_tag_t pc, pcitag_t tag,
611 struct pci_conf_state *pcs)
612 {
613 int off;
614
615 for (off = 0; off < 16; off++)
616 pcs->reg[off] = pci_conf_read(pc, tag, (off * 4));
617
618 return;
619 }
620
621 void
622 pci_conf_restore(pci_chipset_tag_t pc, pcitag_t tag,
623 struct pci_conf_state *pcs)
624 {
625 int off;
626 pcireg_t val;
627
628 for (off = 15; off >= 0; off--) {
629 val = pci_conf_read(pc, tag, (off * 4));
630 if (val != pcs->reg[off])
631 pci_conf_write(pc, tag, (off * 4), pcs->reg[off]);
632 }
633
634 return;
635 }
636
637 /*
638 * Power Management Capability (Rev 2.2)
639 */
640 static int
641 pci_get_powerstate_int(pci_chipset_tag_t pc, pcitag_t tag , pcireg_t *state,
642 int offset)
643 {
644 pcireg_t value, now;
645
646 value = pci_conf_read(pc, tag, offset + PCI_PMCSR);
647 now = value & PCI_PMCSR_STATE_MASK;
648 switch (now) {
649 case PCI_PMCSR_STATE_D0:
650 case PCI_PMCSR_STATE_D1:
651 case PCI_PMCSR_STATE_D2:
652 case PCI_PMCSR_STATE_D3:
653 *state = now;
654 return 0;
655 default:
656 return EINVAL;
657 }
658 }
659
660 int
661 pci_get_powerstate(pci_chipset_tag_t pc, pcitag_t tag , pcireg_t *state)
662 {
663 int offset;
664 pcireg_t value;
665
666 if (!pci_get_capability(pc, tag, PCI_CAP_PWRMGMT, &offset, &value))
667 return EOPNOTSUPP;
668
669 return pci_get_powerstate_int(pc, tag, state, offset);
670 }
671
672 static int
673 pci_set_powerstate_int(pci_chipset_tag_t pc, pcitag_t tag, pcireg_t state,
674 int offset, pcireg_t cap_reg)
675 {
676 pcireg_t value, cap, now;
677
678 cap = cap_reg >> PCI_PMCR_SHIFT;
679 value = pci_conf_read(pc, tag, offset + PCI_PMCSR);
680 now = value & PCI_PMCSR_STATE_MASK;
681 value &= ~PCI_PMCSR_STATE_MASK;
682
683 if (now == state)
684 return 0;
685 switch (state) {
686 case PCI_PMCSR_STATE_D0:
687 value |= PCI_PMCSR_STATE_D0;
688 break;
689 case PCI_PMCSR_STATE_D1:
690 if (now == PCI_PMCSR_STATE_D2 || now == PCI_PMCSR_STATE_D3) {
691 printf("invalid transition from %d to D1\n", (int)now);
692 return EINVAL;
693 }
694 if (!(cap & PCI_PMCR_D1SUPP)) {
695 printf("D1 not supported\n");
696 return EOPNOTSUPP;
697 }
698 value |= PCI_PMCSR_STATE_D1;
699 break;
700 case PCI_PMCSR_STATE_D2:
701 if (now == PCI_PMCSR_STATE_D3) {
702 printf("invalid transition from %d to D2\n", (int)now);
703 return EINVAL;
704 }
705 if (!(cap & PCI_PMCR_D2SUPP)) {
706 printf("D2 not supported\n");
707 return EOPNOTSUPP;
708 }
709 value |= PCI_PMCSR_STATE_D2;
710 break;
711 case PCI_PMCSR_STATE_D3:
712 value |= PCI_PMCSR_STATE_D3;
713 break;
714 default:
715 return EINVAL;
716 }
717 pci_conf_write(pc, tag, offset + PCI_PMCSR, value);
718 DELAY(1000);
719 return 0;
720 }
721
722 int
723 pci_set_powerstate(pci_chipset_tag_t pc, pcitag_t tag, pcireg_t state)
724 {
725 int offset;
726 pcireg_t value;
727
728 if (!pci_get_capability(pc, tag, PCI_CAP_PWRMGMT, &offset, &value)) {
729 printf("pci_set_powerstate not supported\n");
730 return EOPNOTSUPP;
731 }
732
733 return pci_set_powerstate_int(pc, tag, state, offset, value);
734 }
735
736 pnp_state_t
737 pci_pnp_powerstate(pcireg_t reg)
738 {
739 pnp_state_t state;
740
741 switch (reg) {
742 case PCI_PMCSR_STATE_D0:
743 state = PNP_STATE_D0;
744 break;
745 case PCI_PMCSR_STATE_D1:
746 state = PNP_STATE_D1;
747 break;
748 case PCI_PMCSR_STATE_D2:
749 state = PNP_STATE_D2;
750 break;
751 case PCI_PMCSR_STATE_D3:
752 state = PNP_STATE_D3;
753 break;
754 default:
755 state = PNP_STATE_UNKNOWN;
756 break;
757 }
758
759 return state;
760 }
761
762 pnp_state_t
763 pci_pnp_capabilities(pcireg_t reg)
764 {
765 pnp_state_t state;
766 pcireg_t cap;
767
768 cap = reg >> PCI_PMCR_SHIFT;
769
770 state = PNP_STATE_D0 | PNP_STATE_D3;
771 if (cap & PCI_PMCR_D1SUPP)
772 state |= PNP_STATE_D1;
773 if (cap & PCI_PMCR_D2SUPP)
774 state |= PNP_STATE_D2;
775
776 return state;
777 }
778
779 int
780 pci_activate(pci_chipset_tag_t pc, pcitag_t tag, void *sc,
781 int (*wakefun)(pci_chipset_tag_t, pcitag_t, void *, pcireg_t))
782 {
783 struct device *dv = sc;
784 pcireg_t pmode;
785 int error;
786
787 if ((error = pci_get_powerstate(pc, tag, &pmode)))
788 return error;
789
790 switch (pmode) {
791 case PCI_PMCSR_STATE_D0:
792 break;
793 case PCI_PMCSR_STATE_D3:
794 if (wakefun == NULL) {
795 /*
796 * The card has lost all configuration data in
797 * this state, so punt.
798 */
799 aprint_error(
800 "%s: unable to wake up from power state D3\n",
801 dv->dv_xname);
802 return EOPNOTSUPP;
803 }
804 /*FALLTHROUGH*/
805 default:
806 if (wakefun) {
807 error = (*wakefun)(pc, tag, sc, pmode);
808 if (error)
809 return error;
810 }
811 aprint_normal("%s: waking up from power state D%d\n",
812 dv->dv_xname, pmode);
813 if ((error = pci_set_powerstate(pc, tag, PCI_PMCSR_STATE_D0)))
814 return error;
815 }
816 return 0;
817 }
818
819 int
820 pci_activate_null(pci_chipset_tag_t pc, pcitag_t tag,
821 void *sc, pcireg_t state)
822 {
823 return 0;
824 }
825
826 void
827 pci_disable_retry(pci_chipset_tag_t pc, pcitag_t tag)
828 {
829 pcireg_t retry;
830
831 /*
832 * Disable retry timeout to keep PCI Tx retries from
833 * interfering with ACPI C3 CPU state.
834 */
835 retry = pci_conf_read(pc, tag, PCI_RETRY_TIMEOUT_REG);
836 retry &= ~PCI_RETRY_TIMEOUT_REG_MASK;
837 pci_conf_write(pc, tag, PCI_RETRY_TIMEOUT_REG, retry);
838 }
839
840 struct pci_generic_power {
841 struct pci_conf_state p_pciconf;
842 pci_chipset_tag_t p_pc;
843 pcitag_t p_tag;
844 bool p_has_pm;
845 int p_pm_offset;
846 pnp_state_t p_pm_states;
847 pcireg_t p_pm_cap;
848 void (*p_resume)(device_t);
849 void (*p_suspend)(device_t);
850 };
851
852 struct pci_net_generic_power {
853 struct pci_generic_power p_generic;
854 struct ifnet *p_ifp;
855 void (*p_resume)(device_t);
856 void (*p_suspend)(device_t);
857 };
858
859 static pnp_status_t
860 pci_generic_power(device_t dv, pnp_request_t req, void *opaque)
861 {
862 struct pci_generic_power *arg = device_power_private(dv);
863 pnp_status_t status;
864 pnp_state_t *state;
865 pnp_capabilities_t *caps;
866 pcireg_t val;
867
868 status = PNP_STATUS_UNSUPPORTED;
869
870 switch (req) {
871 case PNP_REQUEST_GET_CAPABILITIES:
872 caps = opaque;
873
874 caps->state = arg->p_pm_states;
875 status = PNP_STATUS_SUCCESS;
876 break;
877 case PNP_REQUEST_SET_STATE:
878 state = opaque;
879 switch (*state) {
880 case PNP_STATE_D0:
881 val = PCI_PMCSR_STATE_D0;
882 break;
883 case PNP_STATE_D3:
884 val = PCI_PMCSR_STATE_D3;
885 if (arg->p_suspend)
886 (*arg->p_suspend)(dv);
887 pci_conf_capture(arg->p_pc, arg->p_tag,
888 &arg->p_pciconf);
889 break;
890 default:
891 return PNP_STATUS_UNSUPPORTED;
892 }
893
894 if (arg->p_has_pm &&
895 pci_set_powerstate_int(arg->p_pc, arg->p_tag, val,
896 arg->p_pm_offset, arg->p_pm_cap)) {
897 aprint_error("%s: unsupported state, continuing.\n",
898 device_xname(dv));
899 }
900
901 if (*state == PNP_STATE_D0) {
902 pci_conf_restore(arg->p_pc, arg->p_tag,
903 &arg->p_pciconf);
904 if (arg->p_resume)
905 (*arg->p_resume)(dv);
906 }
907 status = PNP_STATUS_SUCCESS;
908 break;
909
910 case PNP_REQUEST_GET_STATE:
911 state = opaque;
912 if (arg->p_has_pm &&
913 pci_get_powerstate_int(arg->p_pc, arg->p_tag, &val,
914 arg->p_pm_offset) == 0)
915 *state = pci_pnp_powerstate(val);
916 else
917 *state = PNP_STATE_D0;
918 status = PNP_STATUS_SUCCESS;
919 break;
920 default:
921 status = PNP_STATUS_UNSUPPORTED;
922 }
923
924 return status;
925 }
926
927 static pnp_status_t
928 pci_generic_power_register_internal(device_t dv,
929 pci_chipset_tag_t p_pc, pcitag_t p_tag,
930 void (*p_suspend)(device_t), void (*p_resume)(device_t))
931 {
932 struct pci_generic_power *arg = device_power_private(dv);
933 pcireg_t reg;
934 int off;
935
936 arg->p_pc = p_pc;
937 arg->p_tag = p_tag;
938 arg->p_resume = p_resume;
939 arg->p_suspend = p_suspend;
940
941 if (pci_get_capability(p_pc, p_tag, PCI_CAP_PWRMGMT, &off, ®)) {
942 arg->p_pm_states = pci_pnp_capabilities(reg);
943 arg->p_has_pm = true;
944 arg->p_pm_offset = off;
945 arg->p_pm_cap = reg;
946 } else {
947 arg->p_pm_states = PNP_STATE_D0 | PNP_STATE_D3;
948 arg->p_has_pm = false;
949 arg->p_pm_offset = -1;
950 }
951
952 return pnp_register(dv, pci_generic_power);
953 }
954
955 static void
956 pci_generic_power_deregister_internal(device_t dv)
957 {
958 pnp_deregister(dv);
959 }
960
961 pnp_status_t
962 pci_generic_power_register(device_t dv,
963 pci_chipset_tag_t p_pc, pcitag_t p_tag,
964 void (*p_suspend)(device_t), void (*p_resume)(device_t))
965 {
966 struct pci_generic_power *arg;
967 pnp_status_t status;
968
969 arg = malloc(sizeof(*arg), M_DEVBUF, M_ZERO | M_WAITOK);
970 device_power_set_private(dv, arg);
971
972 status = pci_generic_power_register_internal(dv, p_pc, p_tag,
973 p_suspend, p_resume);
974 if (status != PNP_STATUS_SUCCESS) {
975 free(arg, M_DEVBUF);
976 device_power_set_private(dv, NULL);
977 }
978 return status;
979 }
980
981 void
982 pci_generic_power_deregister(device_t dv)
983 {
984 struct pci_generic_power *arg = device_power_private(dv);
985
986 if (arg == NULL)
987 return;
988
989 pci_generic_power_deregister_internal(dv);
990 free(arg, M_DEVBUF);
991 }
992
993 static void
994 pci_net_generic_power_resume(device_t dv)
995 {
996 struct pci_net_generic_power *arg = device_power_private(dv);
997 struct ifnet *ifp = arg->p_ifp;
998 int s;
999
1000 if (arg->p_resume)
1001 (*arg->p_resume)(dv);
1002
1003 s = splnet();
1004 if (ifp->if_flags & IFF_UP) {
1005 ifp->if_flags &= ~IFF_RUNNING;
1006 (*ifp->if_init)(ifp);
1007 (*ifp->if_start)(ifp);
1008 }
1009 splx(s);
1010 }
1011
1012 static void
1013 pci_net_generic_power_suspend(device_t dv)
1014 {
1015 struct pci_net_generic_power *arg = device_power_private(dv);
1016 struct ifnet *ifp = arg->p_ifp;
1017 int s;
1018
1019 s = splnet();
1020 (*ifp->if_stop)(ifp, 1);
1021 splx(s);
1022
1023 if (arg->p_suspend)
1024 (*arg->p_suspend)(dv);
1025 }
1026
1027 pnp_status_t
1028 pci_net_generic_power_register(device_t dv,
1029 pci_chipset_tag_t p_pc, pcitag_t p_tag, struct ifnet *p_ifp,
1030 void (*p_suspend)(device_t), void (*p_resume)(device_t))
1031 {
1032 struct pci_net_generic_power *arg;
1033 pnp_status_t status;
1034
1035 arg = malloc(sizeof(*arg), M_DEVBUF, M_ZERO | M_WAITOK);
1036 arg->p_resume = p_resume;
1037 arg->p_suspend = p_suspend;
1038 arg->p_ifp = p_ifp;
1039 device_power_set_private(dv, arg);
1040
1041 status = pci_generic_power_register_internal(dv, p_pc, p_tag,
1042 pci_net_generic_power_suspend, pci_net_generic_power_resume);
1043 if (status != PNP_STATUS_SUCCESS) {
1044 free(arg, M_DEVBUF);
1045 device_power_set_private(dv, NULL);
1046 }
1047 return status;
1048 }
1049
1050 void
1051 pci_net_generic_power_deregister(device_t dv)
1052 {
1053 struct pci_net_generic_power *arg = device_power_private(dv);
1054
1055 if (arg == NULL)
1056 return;
1057
1058 pci_generic_power_deregister_internal(dv);
1059 free(arg, M_DEVBUF);
1060 }
1061