pci.c revision 1.79 1 /* $NetBSD: pci.c,v 1.79 2003/05/03 18:02:37 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 1995, 1996, 1997, 1998
5 * Christopher G. Demetriou. All rights reserved.
6 * Copyright (c) 1994 Charles M. Hannum. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Charles M. Hannum.
19 * 4. The name of the author may not be used to endorse or promote products
20 * derived from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 /*
35 * PCI bus autoconfiguration.
36 */
37
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: pci.c,v 1.79 2003/05/03 18:02:37 thorpej Exp $");
40
41 #include "opt_pci.h"
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/device.h>
46
47 #include <dev/pci/pcireg.h>
48 #include <dev/pci/pcivar.h>
49 #include <dev/pci/pcidevs.h>
50
51 #include "locators.h"
52
53 #ifdef PCI_CONFIG_DUMP
54 int pci_config_dump = 1;
55 #else
56 int pci_config_dump = 0;
57 #endif
58
59 int pcimatch __P((struct device *, struct cfdata *, void *));
60 void pciattach __P((struct device *, struct device *, void *));
61
62 CFATTACH_DECL(pci, sizeof(struct pci_softc),
63 pcimatch, pciattach, NULL, NULL);
64
65 int pciprint __P((void *, const char *));
66 int pcisubmatch __P((struct device *, struct cfdata *, void *));
67
68 /*
69 * Important note about PCI-ISA bridges:
70 *
71 * Callbacks are used to configure these devices so that ISA/EISA bridges
72 * can attach their child busses after PCI configuration is done.
73 *
74 * This works because:
75 * (1) there can be at most one ISA/EISA bridge per PCI bus, and
76 * (2) any ISA/EISA bridges must be attached to primary PCI
77 * busses (i.e. bus zero).
78 *
79 * That boils down to: there can only be one of these outstanding
80 * at a time, it is cleared when configuring PCI bus 0 before any
81 * subdevices have been found, and it is run after all subdevices
82 * of PCI bus 0 have been found.
83 *
84 * This is needed because there are some (legacy) PCI devices which
85 * can show up as ISA/EISA devices as well (the prime example of which
86 * are VGA controllers). If you attach ISA from a PCI-ISA/EISA bridge,
87 * and the bridge is seen before the video board is, the board can show
88 * up as an ISA device, and that can (bogusly) complicate the PCI device's
89 * attach code, or make the PCI device not be properly attached at all.
90 *
91 * We use the generic config_defer() facility to achieve this.
92 */
93
94 int
95 pcimatch(parent, cf, aux)
96 struct device *parent;
97 struct cfdata *cf;
98 void *aux;
99 {
100 struct pcibus_attach_args *pba = aux;
101
102 if (strcmp(pba->pba_busname, cf->cf_name))
103 return (0);
104
105 /* Check the locators */
106 if (cf->pcibuscf_bus != PCIBUS_UNK_BUS &&
107 cf->pcibuscf_bus != pba->pba_bus)
108 return (0);
109
110 /* sanity */
111 if (pba->pba_bus < 0 || pba->pba_bus > 255)
112 return (0);
113
114 /*
115 * XXX check other (hardware?) indicators
116 */
117
118 return (1);
119 }
120
121 void
122 pciattach(parent, self, aux)
123 struct device *parent, *self;
124 void *aux;
125 {
126 struct pcibus_attach_args *pba = aux;
127 struct pci_softc *sc = (struct pci_softc *)self;
128 int io_enabled, mem_enabled, mrl_enabled, mrm_enabled, mwi_enabled;
129 const char *sep = "";
130
131 pci_attach_hook(parent, self, pba);
132
133 aprint_naive("\n");
134 aprint_normal("\n");
135
136 io_enabled = (pba->pba_flags & PCI_FLAGS_IO_ENABLED);
137 mem_enabled = (pba->pba_flags & PCI_FLAGS_MEM_ENABLED);
138 mrl_enabled = (pba->pba_flags & PCI_FLAGS_MRL_OKAY);
139 mrm_enabled = (pba->pba_flags & PCI_FLAGS_MRM_OKAY);
140 mwi_enabled = (pba->pba_flags & PCI_FLAGS_MWI_OKAY);
141
142 if (io_enabled == 0 && mem_enabled == 0) {
143 aprint_error("%s: no spaces enabled!\n", self->dv_xname);
144 return;
145 }
146
147 #define PRINT(str) \
148 do { \
149 aprint_normal("%s%s", sep, str); \
150 sep = ", "; \
151 } while (/*CONSTCOND*/0)
152
153 aprint_normal("%s: ", self->dv_xname);
154
155 if (io_enabled)
156 PRINT("i/o space");
157 if (mem_enabled)
158 PRINT("memory space");
159 aprint_normal(" enabled");
160
161 if (mrl_enabled || mrm_enabled || mwi_enabled) {
162 if (mrl_enabled)
163 PRINT("rd/line");
164 if (mrm_enabled)
165 PRINT("rd/mult");
166 if (mwi_enabled)
167 PRINT("wr/inv");
168 aprint_normal(" ok");
169 }
170
171 aprint_normal("\n");
172
173 #undef PRINT
174
175 sc->sc_iot = pba->pba_iot;
176 sc->sc_memt = pba->pba_memt;
177 sc->sc_dmat = pba->pba_dmat;
178 sc->sc_pc = pba->pba_pc;
179 sc->sc_bus = pba->pba_bus;
180 sc->sc_bridgetag = pba->pba_bridgetag;
181 sc->sc_maxndevs = pci_bus_maxdevs(pba->pba_pc, pba->pba_bus);
182 sc->sc_intrswiz = pba->pba_intrswiz;
183 sc->sc_intrtag = pba->pba_intrtag;
184 sc->sc_flags = pba->pba_flags;
185 pci_enumerate_bus(sc, NULL, NULL);
186 }
187
188 int
189 pciprint(aux, pnp)
190 void *aux;
191 const char *pnp;
192 {
193 struct pci_attach_args *pa = aux;
194 char devinfo[256];
195 const struct pci_quirkdata *qd;
196
197 if (pnp) {
198 pci_devinfo(pa->pa_id, pa->pa_class, 1, devinfo);
199 aprint_normal("%s at %s", devinfo, pnp);
200 }
201 aprint_normal(" dev %d function %d", pa->pa_device, pa->pa_function);
202 if (pci_config_dump) {
203 printf(": ");
204 pci_conf_print(pa->pa_pc, pa->pa_tag, NULL);
205 if (!pnp)
206 pci_devinfo(pa->pa_id, pa->pa_class, 1, devinfo);
207 printf("%s at %s", devinfo, pnp ? pnp : "?");
208 printf(" dev %d function %d (", pa->pa_device, pa->pa_function);
209 #ifdef __i386__
210 printf("tag %#lx, intrtag %#lx, intrswiz %#lx, intrpin %#lx",
211 *(long *)&pa->pa_tag, *(long *)&pa->pa_intrtag,
212 (long)pa->pa_intrswiz, (long)pa->pa_intrpin);
213 #else
214 printf("intrswiz %#lx, intrpin %#lx",
215 (long)pa->pa_intrswiz, (long)pa->pa_intrpin);
216 #endif
217 printf(", i/o %s, mem %s,",
218 pa->pa_flags & PCI_FLAGS_IO_ENABLED ? "on" : "off",
219 pa->pa_flags & PCI_FLAGS_MEM_ENABLED ? "on" : "off");
220 qd = pci_lookup_quirkdata(PCI_VENDOR(pa->pa_id),
221 PCI_PRODUCT(pa->pa_id));
222 if (qd == NULL) {
223 printf(" no quirks");
224 } else {
225 bitmask_snprintf(qd->quirks,
226 "\20\1multifn", devinfo, sizeof (devinfo));
227 printf(" quirks %s", devinfo);
228 }
229 printf(")");
230 }
231 return (UNCONF);
232 }
233
234 int
235 pcisubmatch(parent, cf, aux)
236 struct device *parent;
237 struct cfdata *cf;
238 void *aux;
239 {
240 struct pci_attach_args *pa = aux;
241
242 if (cf->pcicf_dev != PCI_UNK_DEV &&
243 cf->pcicf_dev != pa->pa_device)
244 return (0);
245 if (cf->pcicf_function != PCI_UNK_FUNCTION &&
246 cf->pcicf_function != pa->pa_function)
247 return (0);
248 return (config_match(parent, cf, aux));
249 }
250
251 int
252 pci_probe_device(struct pci_softc *sc, pcitag_t tag,
253 int (*match)(struct pci_attach_args *), struct pci_attach_args *pap)
254 {
255 pci_chipset_tag_t pc = sc->sc_pc;
256 struct pci_attach_args pa;
257 pcireg_t id, csr, class, intr, bhlcr;
258 int ret, pin, bus, device, function;
259
260 pci_decompose_tag(pc, tag, &bus, &device, &function);
261
262 id = pci_conf_read(pc, tag, PCI_ID_REG);
263 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
264 class = pci_conf_read(pc, tag, PCI_CLASS_REG);
265 intr = pci_conf_read(pc, tag, PCI_INTERRUPT_REG);
266 bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG);
267
268 /* Invalid vendor ID value? */
269 if (PCI_VENDOR(id) == PCI_VENDOR_INVALID)
270 return (0);
271 /* XXX Not invalid, but we've done this ~forever. */
272 if (PCI_VENDOR(id) == 0)
273 return (0);
274
275 pa.pa_iot = sc->sc_iot;
276 pa.pa_memt = sc->sc_memt;
277 pa.pa_dmat = sc->sc_dmat;
278 pa.pa_pc = pc;
279 pa.pa_bus = bus;
280 pa.pa_device = device;
281 pa.pa_function = function;
282 pa.pa_tag = tag;
283 pa.pa_id = id;
284 pa.pa_class = class;
285
286 /*
287 * Set up memory, I/O enable, and PCI command flags
288 * as appropriate.
289 */
290 pa.pa_flags = sc->sc_flags;
291 if ((csr & PCI_COMMAND_IO_ENABLE) == 0)
292 pa.pa_flags &= ~PCI_FLAGS_IO_ENABLED;
293 if ((csr & PCI_COMMAND_MEM_ENABLE) == 0)
294 pa.pa_flags &= ~PCI_FLAGS_MEM_ENABLED;
295
296 /*
297 * If the cache line size is not configured, then
298 * clear the MRL/MRM/MWI command-ok flags.
299 */
300 if (PCI_CACHELINE(bhlcr) == 0)
301 pa.pa_flags &= ~(PCI_FLAGS_MRL_OKAY|
302 PCI_FLAGS_MRM_OKAY|PCI_FLAGS_MWI_OKAY);
303
304 if (sc->sc_bridgetag == NULL) {
305 pa.pa_intrswiz = 0;
306 pa.pa_intrtag = tag;
307 } else {
308 pa.pa_intrswiz = sc->sc_intrswiz + device;
309 pa.pa_intrtag = sc->sc_intrtag;
310 }
311 pin = PCI_INTERRUPT_PIN(intr);
312 pa.pa_rawintrpin = pin;
313 if (pin == PCI_INTERRUPT_PIN_NONE) {
314 /* no interrupt */
315 pa.pa_intrpin = 0;
316 } else {
317 /*
318 * swizzle it based on the number of busses we're
319 * behind and our device number.
320 */
321 pa.pa_intrpin = /* XXX */
322 ((pin + pa.pa_intrswiz - 1) % 4) + 1;
323 }
324 pa.pa_intrline = PCI_INTERRUPT_LINE(intr);
325
326 if (match != NULL) {
327 ret = (*match)(&pa);
328 if (ret != 0 && pap != NULL)
329 *pap = pa;
330 } else {
331 ret = config_found_sm(&sc->sc_dev, &pa, pciprint,
332 pcisubmatch) != NULL;
333 }
334
335 return (ret);
336 }
337
338 int
339 pci_get_capability(pc, tag, capid, offset, value)
340 pci_chipset_tag_t pc;
341 pcitag_t tag;
342 int capid;
343 int *offset;
344 pcireg_t *value;
345 {
346 pcireg_t reg;
347 unsigned int ofs;
348
349 reg = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
350 if (!(reg & PCI_STATUS_CAPLIST_SUPPORT))
351 return (0);
352
353 /* Determine the Capability List Pointer register to start with. */
354 reg = pci_conf_read(pc, tag, PCI_BHLC_REG);
355 switch (PCI_HDRTYPE_TYPE(reg)) {
356 case 0: /* standard device header */
357 ofs = PCI_CAPLISTPTR_REG;
358 break;
359 case 2: /* PCI-CardBus Bridge header */
360 ofs = PCI_CARDBUS_CAPLISTPTR_REG;
361 break;
362 default:
363 return (0);
364 }
365
366 ofs = PCI_CAPLIST_PTR(pci_conf_read(pc, tag, ofs));
367 while (ofs != 0) {
368 #ifdef DIAGNOSTIC
369 if ((ofs & 3) || (ofs < 0x40))
370 panic("pci_get_capability");
371 #endif
372 reg = pci_conf_read(pc, tag, ofs);
373 if (PCI_CAPLIST_CAP(reg) == capid) {
374 if (offset)
375 *offset = ofs;
376 if (value)
377 *value = reg;
378 return (1);
379 }
380 ofs = PCI_CAPLIST_NEXT(reg);
381 }
382
383 return (0);
384 }
385
386 int
387 pci_find_device(struct pci_attach_args *pa,
388 int (*match)(struct pci_attach_args *))
389 {
390 extern struct cfdriver pci_cd;
391 struct device *pcidev;
392 int i;
393
394 for (i = 0; i < pci_cd.cd_ndevs; i++) {
395 pcidev = pci_cd.cd_devs[i];
396 if (pcidev != NULL &&
397 pci_enumerate_bus((struct pci_softc *) pcidev,
398 match, pa) != 0)
399 return (1);
400 }
401 return (0);
402 }
403
404 /*
405 * Generic PCI bus enumeration routine. Used unless machine-dependent
406 * code needs to provide something else.
407 */
408 int
409 pci_enumerate_bus_generic(struct pci_softc *sc,
410 int (*match)(struct pci_attach_args *), struct pci_attach_args *pap)
411 {
412 pci_chipset_tag_t pc = sc->sc_pc;
413 int device, function, nfunctions, ret;
414 const struct pci_quirkdata *qd;
415 pcireg_t id, bhlcr;
416 pcitag_t tag;
417 #ifdef __PCI_BUS_DEVORDER
418 char devs[32];
419 int i;
420 #endif
421
422 #ifdef __PCI_BUS_DEVORDER
423 pci_bus_devorder(sc->sc_pc, sc->sc_bus, devs);
424 for (i = 0; (device = devs[i]) < 32 && device >= 0; i++)
425 #else
426 for (device = 0; device < sc->sc_maxndevs; device++)
427 #endif
428 {
429 tag = pci_make_tag(pc, sc->sc_bus, device, 0);
430 id = pci_conf_read(pc, tag, PCI_ID_REG);
431
432 /* Invalid vendor ID value? */
433 if (PCI_VENDOR(id) == PCI_VENDOR_INVALID)
434 continue;
435 /* XXX Not invalid, but we've done this ~forever. */
436 if (PCI_VENDOR(id) == 0)
437 continue;
438
439 qd = pci_lookup_quirkdata(PCI_VENDOR(id), PCI_PRODUCT(id));
440
441 bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG);
442 if (PCI_HDRTYPE_MULTIFN(bhlcr) ||
443 (qd != NULL &&
444 (qd->quirks & PCI_QUIRK_MULTIFUNCTION) != 0))
445 nfunctions = 8;
446 else
447 nfunctions = 1;
448
449 for (function = 0; function < nfunctions; function++) {
450 tag = pci_make_tag(pc, sc->sc_bus, device, function);
451 ret = pci_probe_device(sc, tag, match, pap);
452 if (match != NULL && ret != 0)
453 return (ret);
454 }
455 }
456 return (0);
457 }
458
459 /*
460 * Power Management Capability (Rev 2.2)
461 */
462
463 int
464 pci_set_powerstate(pci_chipset_tag_t pc, pcitag_t tag, int newstate)
465 {
466 int offset;
467 pcireg_t value, cap, now;
468
469 if (!pci_get_capability(pc, tag, PCI_CAP_PWRMGMT, &offset, &value))
470 return (EOPNOTSUPP);
471
472 cap = value >> 16;
473 value = pci_conf_read(pc, tag, offset + PCI_PMCSR);
474 now = value & PCI_PMCSR_STATE_MASK;
475 value &= ~PCI_PMCSR_STATE_MASK;
476 switch (newstate) {
477 case PCI_PWR_D0:
478 if (now == PCI_PMCSR_STATE_D0)
479 return (0);
480 value |= PCI_PMCSR_STATE_D0;
481 break;
482 case PCI_PWR_D1:
483 if (now == PCI_PMCSR_STATE_D1)
484 return (0);
485 if (now == PCI_PMCSR_STATE_D2 || now == PCI_PMCSR_STATE_D3)
486 return (EINVAL);
487 if (!(cap & PCI_PMCR_D1SUPP))
488 return (EOPNOTSUPP);
489 value |= PCI_PMCSR_STATE_D1;
490 break;
491 case PCI_PWR_D2:
492 if (now == PCI_PMCSR_STATE_D2)
493 return (0);
494 if (now == PCI_PMCSR_STATE_D3)
495 return (EINVAL);
496 if (!(cap & PCI_PMCR_D2SUPP))
497 return (EOPNOTSUPP);
498 value |= PCI_PMCSR_STATE_D2;
499 break;
500 case PCI_PWR_D3:
501 if (now == PCI_PMCSR_STATE_D3)
502 return (0);
503 value |= PCI_PMCSR_STATE_D3;
504 break;
505 default:
506 return (EINVAL);
507 }
508 pci_conf_write(pc, tag, offset + PCI_PMCSR, value);
509 DELAY(1000);
510
511 return (0);
512 }
513
514 int
515 pci_get_powerstate(pci_chipset_tag_t pc, pcitag_t tag)
516 {
517 int offset;
518 pcireg_t value;
519
520 if (!pci_get_capability(pc, tag, PCI_CAP_PWRMGMT, &offset, &value))
521 return (PCI_PWR_D0);
522 value = pci_conf_read(pc, tag, offset + PCI_PMCSR);
523 value &= PCI_PMCSR_STATE_MASK;
524 switch (value) {
525 case PCI_PMCSR_STATE_D0:
526 return (PCI_PWR_D0);
527 case PCI_PMCSR_STATE_D1:
528 return (PCI_PWR_D1);
529 case PCI_PMCSR_STATE_D2:
530 return (PCI_PWR_D2);
531 case PCI_PMCSR_STATE_D3:
532 return (PCI_PWR_D3);
533 }
534
535 return (PCI_PWR_D0);
536 }
537
538 /*
539 * Vital Product Data (PCI 2.2)
540 */
541
542 int
543 pci_vpd_read(pci_chipset_tag_t pc, pcitag_t tag, int offset, int count,
544 pcireg_t *data)
545 {
546 uint32_t reg;
547 int ofs, i, j;
548
549 KASSERT(data != NULL);
550 KASSERT((offset + count) < 0x7fff);
551
552 if (pci_get_capability(pc, tag, PCI_CAP_VPD, &ofs, ®) == 0)
553 return (1);
554
555 for (i = 0; i < count; offset += sizeof(*data), i++) {
556 reg &= 0x0000ffff;
557 reg &= ~PCI_VPD_OPFLAG;
558 reg |= PCI_VPD_ADDRESS(offset);
559 pci_conf_write(pc, tag, ofs, reg);
560
561 /*
562 * PCI 2.2 does not specify how long we should poll
563 * for completion nor whether the operation can fail.
564 */
565 j = 0;
566 do {
567 if (j++ == 20)
568 return (1);
569 delay(4);
570 reg = pci_conf_read(pc, tag, ofs);
571 } while ((reg & PCI_VPD_OPFLAG) == 0);
572 data[i] = pci_conf_read(pc, tag, PCI_VPD_DATAREG(ofs));
573 }
574
575 return (0);
576 }
577
578 int
579 pci_vpd_write(pci_chipset_tag_t pc, pcitag_t tag, int offset, int count,
580 pcireg_t *data)
581 {
582 pcireg_t reg;
583 int ofs, i, j;
584
585 KASSERT(data != NULL);
586 KASSERT((offset + count) < 0x7fff);
587
588 if (pci_get_capability(pc, tag, PCI_CAP_VPD, &ofs, ®) == 0)
589 return (1);
590
591 for (i = 0; i < count; offset += sizeof(*data), i++) {
592 pci_conf_write(pc, tag, PCI_VPD_DATAREG(ofs), data[i]);
593
594 reg &= 0x0000ffff;
595 reg |= PCI_VPD_OPFLAG;
596 reg |= PCI_VPD_ADDRESS(offset);
597 pci_conf_write(pc, tag, ofs, reg);
598
599 /*
600 * PCI 2.2 does not specify how long we should poll
601 * for completion nor whether the operation can fail.
602 */
603 j = 0;
604 do {
605 if (j++ == 20)
606 return (1);
607 delay(1);
608 reg = pci_conf_read(pc, tag, ofs);
609 } while (reg & PCI_VPD_OPFLAG);
610 }
611
612 return (0);
613 }
614