pciconf.c revision 1.49 1 /* $NetBSD: pciconf.c,v 1.49 2020/10/10 15:22:15 jmcneill Exp $ */
2
3 /*
4 * Copyright 2001 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Allen Briggs for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37 /*
38 * Derived in part from code from PMON/2000 (http://pmon.groupbsd.org/).
39 */
40
41 /*
42 * To do:
43 * - Perform all data structure allocation dynamically, don't have
44 * statically-sized arrays ("oops, you lose because you have too
45 * many slots filled!")
46 * - Do this in 2 passes, with an MD hook to control the behavior:
47 * (1) Configure the bus (possibly including expansion
48 * ROMs.
49 * (2) Another pass to disable expansion ROMs if they're
50 * mapped (since you're not supposed to leave them
51 * mapped when you're not using them).
52 * This would facilitate MD code executing the expansion ROMs
53 * if necessary (possibly with an x86 emulator) to configure
54 * devices (e.g. VGA cards).
55 * - Deal with "anything can be hot-plugged" -- i.e., carry configuration
56 * information around & be able to reconfigure on the fly
57 * - Deal with segments (See IA64 System Abstraction Layer)
58 * - Deal with subtractive bridges (& non-spec positive/subtractive decode)
59 * - Deal with ISA/VGA/VGA palette snooping
60 * - Deal with device capabilities on bridges
61 * - Worry about changing a bridge to/from transparency
62 * From thorpej (05/25/01)
63 * - Try to handle devices that are already configured (perhaps using that
64 * as a hint to where we put other devices)
65 */
66
67 #include <sys/cdefs.h>
68 __KERNEL_RCSID(0, "$NetBSD: pciconf.c,v 1.49 2020/10/10 15:22:15 jmcneill Exp $");
69
70 #include "opt_pci.h"
71
72 #include <sys/param.h>
73 #include <sys/queue.h>
74 #include <sys/systm.h>
75 #include <sys/malloc.h>
76 #include <sys/kmem.h>
77 #include <sys/vmem.h>
78
79 #include <dev/pci/pcivar.h>
80 #include <dev/pci/pciconf.h>
81 #include <dev/pci/pcidevs.h>
82 #include <dev/pci/pccbbreg.h>
83
84 int pci_conf_debug = 0;
85
86 #if !defined(MIN)
87 #define MIN(a,b) (((a)<(b))?(a):(b))
88 #define MAX(a,b) (((a)>(b))?(a):(b))
89 #endif
90
91 /* per-bus constants. */
92 #define MAX_CONF_DEV 32 /* Arbitrary */
93 #define MAX_CONF_MEM (3 * MAX_CONF_DEV) /* Avg. 3 per device -- Arb. */
94 #define MAX_CONF_IO (3 * MAX_CONF_DEV) /* Avg. 1 per device -- Arb. */
95
96 struct _s_pciconf_bus_t; /* Forward declaration */
97
98 struct pciconf_resource {
99 vmem_t *arena;
100 bus_addr_t min_addr;
101 bus_addr_t max_addr;
102 bus_size_t total_size;
103 };
104
105 #define PCICONF_RESOURCE_NTYPES 3
106 CTASSERT(PCICONF_RESOURCE_IO < PCICONF_RESOURCE_NTYPES);
107 CTASSERT(PCICONF_RESOURCE_MEM < PCICONF_RESOURCE_NTYPES);
108 CTASSERT(PCICONF_RESOURCE_PREFETCHABLE_MEM < PCICONF_RESOURCE_NTYPES);
109
110 static const char *pciconf_resource_names[] = {
111 [PCICONF_RESOURCE_IO] = "pci-io",
112 [PCICONF_RESOURCE_MEM] = "pci-mem",
113 [PCICONF_RESOURCE_PREFETCHABLE_MEM] = "pci-pmem",
114 };
115
116 struct pciconf_resources {
117 struct pciconf_resource resources[PCICONF_RESOURCE_NTYPES];
118 };
119
120 struct pciconf_resource_rsvd {
121 int type;
122 uint64_t start;
123 bus_size_t size;
124 LIST_ENTRY(pciconf_resource_rsvd) next;
125 };
126
127 static LIST_HEAD(, pciconf_resource_rsvd) pciconf_resource_reservations =
128 LIST_HEAD_INITIALIZER(pciconf_resource_reservations);
129
130 typedef struct _s_pciconf_dev_t {
131 int ipin;
132 int iline;
133 int min_gnt;
134 int max_lat;
135 int enable;
136 pcitag_t tag;
137 pci_chipset_tag_t pc;
138 struct _s_pciconf_bus_t *ppb; /* I am really a bridge */
139 } pciconf_dev_t;
140
141 typedef struct _s_pciconf_win_t {
142 pciconf_dev_t *dev;
143 int reg; /* 0 for busses */
144 int align;
145 int prefetch;
146 uint64_t size;
147 uint64_t address;
148 } pciconf_win_t;
149
150 typedef struct _s_pciconf_bus_t {
151 int busno;
152 int next_busno;
153 int last_busno;
154 int max_mingnt;
155 int min_maxlat;
156 int cacheline_size;
157 int prefetch;
158 int fast_b2b;
159 int freq_66;
160 int def_ltim;
161 int max_ltim;
162 int bandwidth_used;
163 int swiz;
164 int io_32bit;
165 int pmem_64bit;
166 int mem_64bit;
167 int io_align;
168 int mem_align;
169 int pmem_align;
170
171 int ndevs;
172 pciconf_dev_t device[MAX_CONF_DEV];
173
174 /* These should be sorted in order of decreasing size */
175 int nmemwin;
176 pciconf_win_t pcimemwin[MAX_CONF_MEM];
177 int niowin;
178 pciconf_win_t pciiowin[MAX_CONF_IO];
179
180 bus_size_t io_total;
181 bus_size_t mem_total;
182 bus_size_t pmem_total;
183
184 struct pciconf_resource io_res;
185 struct pciconf_resource mem_res;
186 struct pciconf_resource pmem_res;
187
188 pci_chipset_tag_t pc;
189 struct _s_pciconf_bus_t *parent_bus;
190 } pciconf_bus_t;
191
192 static int probe_bus(pciconf_bus_t *);
193 static void alloc_busno(pciconf_bus_t *, pciconf_bus_t *);
194 static void set_busreg(pci_chipset_tag_t, pcitag_t, int, int, int);
195 static int pci_do_device_query(pciconf_bus_t *, pcitag_t, int, int, int);
196 static int setup_iowins(pciconf_bus_t *);
197 static int setup_memwins(pciconf_bus_t *);
198 static int configure_bridge(pciconf_dev_t *);
199 static int configure_bus(pciconf_bus_t *);
200 static uint64_t pci_allocate_range(struct pciconf_resource *, uint64_t, int,
201 bool);
202 static pciconf_win_t *get_io_desc(pciconf_bus_t *, bus_size_t);
203 static pciconf_win_t *get_mem_desc(pciconf_bus_t *, bus_size_t);
204 static pciconf_bus_t *query_bus(pciconf_bus_t *, pciconf_dev_t *, int);
205
206 static void print_tag(pci_chipset_tag_t, pcitag_t);
207
208 static vmem_t *
209 create_vmem_arena(const char *name, bus_addr_t start, bus_size_t size,
210 int flags)
211 {
212 KASSERT(start < VMEM_ADDR_MAX);
213 KASSERT(size == 0 ||
214 (VMEM_ADDR_MAX - start) >= (size - 1));
215
216 return vmem_create(name, start, size,
217 1, /*quantum*/
218 NULL, /*importfn*/
219 NULL, /*releasefn*/
220 NULL, /*source*/
221 0, /*qcache_max*/
222 flags,
223 IPL_NONE);
224 }
225
226 static int
227 init_range_resource(struct pciconf_resource *r, const char *name,
228 bus_addr_t start, bus_addr_t size)
229 {
230 r->arena = create_vmem_arena(name, start, size, VM_NOSLEEP);
231 if (r->arena == NULL)
232 return ENOMEM;
233
234 r->min_addr = start;
235 r->max_addr = start + (size - 1);
236 r->total_size = size;
237
238 return 0;
239 }
240
241 static void
242 fini_range_resource(struct pciconf_resource *r)
243 {
244 if (r->arena) {
245 vmem_xfreeall(r->arena);
246 vmem_destroy(r->arena);
247 }
248 memset(r, 0, sizeof(*r));
249 }
250
251 static void
252 print_tag(pci_chipset_tag_t pc, pcitag_t tag)
253 {
254 int bus, dev, func;
255
256 pci_decompose_tag(pc, tag, &bus, &dev, &func);
257 printf("PCI: bus %d, device %d, function %d: ", bus, dev, func);
258 }
259
260 #ifdef _LP64
261 #define __used_only_lp64 __unused
262 #else
263 #define __used_only_lp64 /* nothing */
264 #endif /* _LP64 */
265
266 /************************************************************************/
267 /************************************************************************/
268 /*********************** Bus probing routines ***********************/
269 /************************************************************************/
270 /************************************************************************/
271 static pciconf_win_t *
272 get_io_desc(pciconf_bus_t *pb, bus_size_t size)
273 {
274 int i, n;
275
276 n = pb->niowin;
277 for (i = n; i > 0 && size > pb->pciiowin[i-1].size; i--)
278 pb->pciiowin[i] = pb->pciiowin[i-1]; /* struct copy */
279 return &pb->pciiowin[i];
280 }
281
282 static pciconf_win_t *
283 get_mem_desc(pciconf_bus_t *pb, bus_size_t size)
284 {
285 int i, n;
286
287 n = pb->nmemwin;
288 for (i = n; i > 0 && size > pb->pcimemwin[i-1].size; i--)
289 pb->pcimemwin[i] = pb->pcimemwin[i-1]; /* struct copy */
290 return &pb->pcimemwin[i];
291 }
292
293 /*
294 * Set up bus common stuff, then loop over devices & functions.
295 * If we find something, call pci_do_device_query()).
296 */
297 static int
298 probe_bus(pciconf_bus_t *pb)
299 {
300 int device;
301 uint8_t devs[32];
302 int i, n;
303
304 pb->ndevs = 0;
305 pb->niowin = 0;
306 pb->nmemwin = 0;
307 pb->freq_66 = 1;
308 #ifdef PCICONF_NO_FAST_B2B
309 pb->fast_b2b = 0;
310 #else
311 pb->fast_b2b = 1;
312 #endif
313 pb->prefetch = 1;
314 pb->max_mingnt = 0; /* we are looking for the maximum */
315 pb->min_maxlat = 0x100; /* we are looking for the minimum */
316 pb->bandwidth_used = 0;
317
318 n = pci_bus_devorder(pb->pc, pb->busno, devs, __arraycount(devs));
319 for (i = 0; i < n; i++) {
320 pcitag_t tag;
321 pcireg_t id, bhlcr;
322 int function, nfunction;
323 int confmode;
324
325 device = devs[i];
326
327 tag = pci_make_tag(pb->pc, pb->busno, device, 0);
328 if (pci_conf_debug) {
329 print_tag(pb->pc, tag);
330 }
331 id = pci_conf_read(pb->pc, tag, PCI_ID_REG);
332
333 if (pci_conf_debug) {
334 printf("id=%x: Vendor=%x, Product=%x\n",
335 id, PCI_VENDOR(id), PCI_PRODUCT(id));
336 }
337 /* Invalid vendor ID value? */
338 if (PCI_VENDOR(id) == PCI_VENDOR_INVALID)
339 continue;
340
341 bhlcr = pci_conf_read(pb->pc, tag, PCI_BHLC_REG);
342 nfunction = PCI_HDRTYPE_MULTIFN(bhlcr) ? 8 : 1;
343 for (function = 0; function < nfunction; function++) {
344 tag = pci_make_tag(pb->pc, pb->busno, device, function);
345 id = pci_conf_read(pb->pc, tag, PCI_ID_REG);
346 if (PCI_VENDOR(id) == PCI_VENDOR_INVALID)
347 continue;
348 if (pb->ndevs + 1 < MAX_CONF_DEV) {
349 if (pci_conf_debug) {
350 print_tag(pb->pc, tag);
351 printf("Found dev 0x%04x 0x%04x -- "
352 "really probing.\n",
353 PCI_VENDOR(id), PCI_PRODUCT(id));
354 }
355 #ifdef __HAVE_PCI_CONF_HOOK
356 confmode = pci_conf_hook(pb->pc, pb->busno,
357 device, function, id);
358 if (confmode == 0)
359 continue;
360 #else
361 /*
362 * Don't enable expansion ROMS -- some cards
363 * share address decoders between the EXPROM
364 * and PCI memory space, and enabling the ROM
365 * when not needed will cause all sorts of
366 * lossage.
367 */
368 confmode = PCI_CONF_DEFAULT;
369 #endif
370 if (pci_do_device_query(pb, tag, device,
371 function, confmode))
372 return -1;
373 pb->ndevs++;
374 }
375 }
376 }
377 return 0;
378 }
379
380 static void
381 alloc_busno(pciconf_bus_t *parent, pciconf_bus_t *pb)
382 {
383 pb->busno = parent->next_busno;
384 pb->next_busno = pb->busno + 1;
385 }
386
387 static void
388 set_busreg(pci_chipset_tag_t pc, pcitag_t tag, int prim, int sec, int sub)
389 {
390 pcireg_t busreg;
391
392 busreg = __SHIFTIN(prim, PCI_BRIDGE_BUS_PRIMARY);
393 busreg |= __SHIFTIN(sec, PCI_BRIDGE_BUS_SECONDARY);
394 busreg |= __SHIFTIN(sub, PCI_BRIDGE_BUS_SUBORDINATE);
395 pci_conf_write(pc, tag, PCI_BRIDGE_BUS_REG, busreg);
396 }
397
398 static pciconf_bus_t *
399 query_bus(pciconf_bus_t *parent, pciconf_dev_t *pd, int dev)
400 {
401 pciconf_bus_t *pb;
402 pcireg_t io, pmem;
403 pciconf_win_t *pi, *pm;
404
405 pb = kmem_zalloc(sizeof (pciconf_bus_t), KM_SLEEP);
406 pb->cacheline_size = parent->cacheline_size;
407 pb->parent_bus = parent;
408 alloc_busno(parent, pb);
409
410 pb->mem_align = 0x100000; /* 1M alignment */
411 pb->pmem_align = 0x100000; /* 1M alignment */
412 pb->io_align = 0x1000; /* 4K alignment */
413
414 set_busreg(parent->pc, pd->tag, parent->busno, pb->busno, 0xff);
415
416 pb->swiz = parent->swiz + dev;
417
418 memset(&pb->io_res, 0, sizeof(pb->io_res));
419 memset(&pb->mem_res, 0, sizeof(pb->mem_res));
420 memset(&pb->pmem_res, 0, sizeof(pb->pmem_res));
421
422 pb->pc = parent->pc;
423 pb->io_total = pb->mem_total = pb->pmem_total = 0;
424
425 pb->io_32bit = 0;
426 if (parent->io_32bit) {
427 io = pci_conf_read(parent->pc, pd->tag, PCI_BRIDGE_STATIO_REG);
428 if (PCI_BRIDGE_IO_32BITS(io))
429 pb->io_32bit = 1;
430 }
431
432 pb->pmem_64bit = 0;
433 if (parent->pmem_64bit) {
434 pmem = pci_conf_read(parent->pc, pd->tag,
435 PCI_BRIDGE_PREFETCHMEM_REG);
436 if (PCI_BRIDGE_PREFETCHMEM_64BITS(pmem))
437 pb->pmem_64bit = 1;
438 }
439
440 /* Bridges only forward a 32-bit range of non-prefetcable memory. */
441 pb->mem_64bit = 0;
442
443 if (probe_bus(pb)) {
444 printf("Failed to probe bus %d\n", pb->busno);
445 goto err;
446 }
447
448 /* We have found all subordinate busses now, reprogram busreg. */
449 pb->last_busno = pb->next_busno - 1;
450 parent->next_busno = pb->next_busno;
451 set_busreg(parent->pc, pd->tag, parent->busno, pb->busno,
452 pb->last_busno);
453 if (pci_conf_debug)
454 printf("PCI bus bridge (parent %d) covers busses %d-%d\n",
455 parent->busno, pb->busno, pb->last_busno);
456
457 if (pb->io_total > 0) {
458 if (parent->niowin >= MAX_CONF_IO) {
459 printf("pciconf: too many (%d) I/O windows\n",
460 parent->niowin);
461 goto err;
462 }
463 pb->io_total |= pb->io_align - 1; /* Round up */
464 pi = get_io_desc(parent, pb->io_total);
465 pi->dev = pd;
466 pi->reg = 0;
467 pi->size = pb->io_total;
468 pi->align = pb->io_align; /* 4K min alignment */
469 if (parent->io_align < pb->io_align)
470 parent->io_align = pb->io_align;
471 pi->prefetch = 0;
472 parent->niowin++;
473 parent->io_total += pb->io_total;
474 }
475
476 if (pb->mem_total > 0) {
477 if (parent->nmemwin >= MAX_CONF_MEM) {
478 printf("pciconf: too many (%d) MEM windows\n",
479 parent->nmemwin);
480 goto err;
481 }
482 pb->mem_total |= pb->mem_align - 1; /* Round up */
483 pm = get_mem_desc(parent, pb->mem_total);
484 pm->dev = pd;
485 pm->reg = 0;
486 pm->size = pb->mem_total;
487 pm->align = pb->mem_align; /* 1M min alignment */
488 if (parent->mem_align < pb->mem_align)
489 parent->mem_align = pb->mem_align;
490 pm->prefetch = 0;
491 parent->nmemwin++;
492 parent->mem_total += pb->mem_total;
493 }
494
495 if (pb->pmem_total > 0) {
496 if (parent->nmemwin >= MAX_CONF_MEM) {
497 printf("pciconf: too many MEM windows\n");
498 goto err;
499 }
500 pb->pmem_total |= pb->pmem_align - 1; /* Round up */
501 pm = get_mem_desc(parent, pb->pmem_total);
502 pm->dev = pd;
503 pm->reg = 0;
504 pm->size = pb->pmem_total;
505 pm->align = pb->pmem_align; /* 1M alignment */
506 if (parent->pmem_align < pb->pmem_align)
507 parent->pmem_align = pb->pmem_align;
508 pm->prefetch = 1;
509 parent->nmemwin++;
510 parent->pmem_total += pb->pmem_total;
511 }
512
513 return pb;
514 err:
515 kmem_free(pb, sizeof(*pb));
516 return NULL;
517 }
518
519 static bool
520 pci_resource_is_reserved(int type, uint64_t addr, uint64_t size)
521 {
522 struct pciconf_resource_rsvd *rsvd;
523
524 LIST_FOREACH(rsvd, &pciconf_resource_reservations, next) {
525 if (rsvd->type != type)
526 continue;
527 if (rsvd->start <= addr + size && rsvd->start + rsvd->size >= addr)
528 return true;
529 }
530
531 return false;
532 }
533
534 static bool
535 pci_device_is_reserved(pciconf_bus_t *pb, pcitag_t tag)
536 {
537 pcireg_t base, base64, mask, mask64;
538 uint64_t addr, size;
539 int br, width;
540
541 /*
542 * Look to see if this device is enabled and one of the resources
543 * is already in use (firmware configured console device). If so,
544 * skip resource assignment and use firmware values.
545 */
546 width = 4;
547 for (br = PCI_MAPREG_START; br < PCI_MAPREG_END; br += width) {
548
549 base = pci_conf_read(pb->pc, tag, br);
550 pci_conf_write(pb->pc, tag, br, 0xffffffff);
551 mask = pci_conf_read(pb->pc, tag, br);
552 pci_conf_write(pb->pc, tag, br, base);
553 width = 4;
554
555 switch (PCI_MAPREG_TYPE(base)) {
556 case PCI_MAPREG_TYPE_IO:
557 addr = PCI_MAPREG_IO_ADDR(base);
558 size = PCI_MAPREG_IO_SIZE(mask);
559 if (pci_resource_is_reserved(PCI_CONF_MAP_IO, addr, size))
560 return true;
561 break;
562 case PCI_MAPREG_TYPE_MEM:
563 if (PCI_MAPREG_MEM_TYPE(base) == PCI_MAPREG_MEM_TYPE_64BIT) {
564 base64 = pci_conf_read(pb->pc, tag, br + 4);
565 pci_conf_write(pb->pc, tag, br + 4, 0xffffffff);
566 mask64 = pci_conf_read(pb->pc, tag, br + 4);
567 pci_conf_write(pb->pc, tag, br + 4, base64);
568 addr = (uint64_t)PCI_MAPREG_MEM64_ADDR(
569 (((uint64_t)base64) << 32) | base);
570 size = (uint64_t)PCI_MAPREG_MEM64_SIZE(
571 (((uint64_t)mask64) << 32) | mask);
572 width = 8;
573 } else {
574 addr = PCI_MAPREG_MEM_ADDR(base);
575 size = PCI_MAPREG_MEM_SIZE(mask);
576 }
577 if (pci_resource_is_reserved(PCI_CONF_MAP_MEM, addr, size))
578 return true;
579 break;
580 }
581 }
582
583 return false;
584 }
585
586 static int
587 pci_do_device_query(pciconf_bus_t *pb, pcitag_t tag, int dev, int func,
588 int mode)
589 {
590 pciconf_dev_t *pd;
591 pciconf_win_t *pi, *pm;
592 pcireg_t classreg, cmd, icr, bhlc, bar, mask, bar64, mask64,
593 busreg;
594 uint64_t size;
595 int br, width, reg_start, reg_end;
596
597 pd = &pb->device[pb->ndevs];
598 pd->pc = pb->pc;
599 pd->tag = tag;
600 pd->ppb = NULL;
601 pd->enable = mode;
602
603 classreg = pci_conf_read(pb->pc, tag, PCI_CLASS_REG);
604
605 cmd = pci_conf_read(pb->pc, tag, PCI_COMMAND_STATUS_REG);
606 bhlc = pci_conf_read(pb->pc, tag, PCI_BHLC_REG);
607
608 if (PCI_CLASS(classreg) != PCI_CLASS_BRIDGE
609 && PCI_HDRTYPE_TYPE(bhlc) != PCI_HDRTYPE_PPB) {
610 cmd &= ~(PCI_COMMAND_MASTER_ENABLE |
611 PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE);
612 pci_conf_write(pb->pc, tag, PCI_COMMAND_STATUS_REG, cmd);
613 } else if (pci_conf_debug) {
614 print_tag(pb->pc, tag);
615 printf("device is a bridge; not clearing enables\n");
616 }
617
618 if ((cmd & PCI_STATUS_BACKTOBACK_SUPPORT) == 0)
619 pb->fast_b2b = 0;
620
621 if ((cmd & PCI_STATUS_66MHZ_SUPPORT) == 0)
622 pb->freq_66 = 0;
623
624 switch (PCI_HDRTYPE_TYPE(bhlc)) {
625 case PCI_HDRTYPE_DEVICE:
626 reg_start = PCI_MAPREG_START;
627 reg_end = PCI_MAPREG_END;
628 break;
629 case PCI_HDRTYPE_PPB:
630 pd->ppb = query_bus(pb, pd, dev);
631 if (pd->ppb == NULL)
632 return -1;
633 return 0;
634 case PCI_HDRTYPE_PCB:
635 reg_start = PCI_MAPREG_START;
636 reg_end = PCI_MAPREG_PCB_END;
637
638 busreg = pci_conf_read(pb->pc, tag, PCI_BUSNUM);
639 busreg = (busreg & 0xff000000) |
640 __SHIFTIN(pb->busno, PCI_BRIDGE_BUS_PRIMARY) |
641 __SHIFTIN(pb->next_busno, PCI_BRIDGE_BUS_SECONDARY) |
642 __SHIFTIN(pb->next_busno, PCI_BRIDGE_BUS_SUBORDINATE);
643 pci_conf_write(pb->pc, tag, PCI_BUSNUM, busreg);
644
645 pb->next_busno++;
646 break;
647 default:
648 return -1;
649 }
650
651 icr = pci_conf_read(pb->pc, tag, PCI_INTERRUPT_REG);
652 pd->ipin = PCI_INTERRUPT_PIN(icr);
653 pd->iline = PCI_INTERRUPT_LINE(icr);
654 pd->min_gnt = PCI_MIN_GNT(icr);
655 pd->max_lat = PCI_MAX_LAT(icr);
656 if (pd->iline || pd->ipin) {
657 pci_conf_interrupt(pb->pc, pb->busno, dev, pd->ipin, pb->swiz,
658 &pd->iline);
659 icr &= ~(PCI_INTERRUPT_LINE_MASK << PCI_INTERRUPT_LINE_SHIFT);
660 icr |= (pd->iline << PCI_INTERRUPT_LINE_SHIFT);
661 pci_conf_write(pb->pc, tag, PCI_INTERRUPT_REG, icr);
662 }
663
664 if (pd->min_gnt != 0 || pd->max_lat != 0) {
665 if (pd->min_gnt != 0 && pd->min_gnt > pb->max_mingnt)
666 pb->max_mingnt = pd->min_gnt;
667
668 if (pd->max_lat != 0 && pd->max_lat < pb->min_maxlat)
669 pb->min_maxlat = pd->max_lat;
670
671 pb->bandwidth_used += pd->min_gnt * 4000000 /
672 (pd->min_gnt + pd->max_lat);
673 }
674
675 if (PCI_HDRTYPE_TYPE(bhlc) == PCI_HDRTYPE_DEVICE &&
676 pci_device_is_reserved(pb, tag)) {
677 /*
678 * Device already configured by firmware.
679 */
680 return 0;
681 }
682
683 width = 4;
684 for (br = reg_start; br < reg_end; br += width) {
685 #if 0
686 /* XXX Should only ignore if IDE not in legacy mode? */
687 if (PCI_CLASS(classreg) == PCI_CLASS_MASS_STORAGE &&
688 PCI_SUBCLASS(classreg) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
689 break;
690 }
691 #endif
692 bar = pci_conf_read(pb->pc, tag, br);
693 pci_conf_write(pb->pc, tag, br, 0xffffffff);
694 mask = pci_conf_read(pb->pc, tag, br);
695 pci_conf_write(pb->pc, tag, br, bar);
696 width = 4;
697
698 if ( (mode & PCI_CONF_MAP_IO)
699 && (PCI_MAPREG_TYPE(mask) == PCI_MAPREG_TYPE_IO)) {
700 /*
701 * Upper 16 bits must be one. Devices may hardwire
702 * them to zero, though, per PCI 2.2, 6.2.5.1, p 203.
703 */
704 mask |= 0xffff0000;
705
706 size = PCI_MAPREG_IO_SIZE(mask);
707 if (size == 0) {
708 if (pci_conf_debug) {
709 print_tag(pb->pc, tag);
710 printf("I/O BAR 0x%x is void\n", br);
711 }
712 continue;
713 }
714
715 if (pb->niowin >= MAX_CONF_IO) {
716 printf("pciconf: too many I/O windows\n");
717 return -1;
718 }
719
720 pi = get_io_desc(pb, size);
721 pi->dev = pd;
722 pi->reg = br;
723 pi->size = (uint64_t)size;
724 pi->align = 4;
725 if (pb->io_align < pi->size)
726 pb->io_align = pi->size;
727 pi->prefetch = 0;
728 if (pci_conf_debug) {
729 print_tag(pb->pc, tag);
730 printf("Register 0x%x, I/O size %" PRIu64 "\n",
731 br, pi->size);
732 }
733 pb->niowin++;
734 pb->io_total += size;
735 } else if ((mode & PCI_CONF_MAP_MEM)
736 && (PCI_MAPREG_TYPE(mask) == PCI_MAPREG_TYPE_MEM)) {
737 switch (PCI_MAPREG_MEM_TYPE(mask)) {
738 case PCI_MAPREG_MEM_TYPE_32BIT:
739 case PCI_MAPREG_MEM_TYPE_32BIT_1M:
740 size = (uint64_t)PCI_MAPREG_MEM_SIZE(mask);
741 break;
742 case PCI_MAPREG_MEM_TYPE_64BIT:
743 bar64 = pci_conf_read(pb->pc, tag, br + 4);
744 pci_conf_write(pb->pc, tag, br + 4, 0xffffffff);
745 mask64 = pci_conf_read(pb->pc, tag, br + 4);
746 pci_conf_write(pb->pc, tag, br + 4, bar64);
747 size = (uint64_t)PCI_MAPREG_MEM64_SIZE(
748 (((uint64_t)mask64) << 32) | mask);
749 width = 8;
750 break;
751 default:
752 print_tag(pb->pc, tag);
753 printf("reserved mapping type 0x%x\n",
754 PCI_MAPREG_MEM_TYPE(mask));
755 continue;
756 }
757
758 if (size == 0) {
759 if (pci_conf_debug) {
760 print_tag(pb->pc, tag);
761 printf("MEM%d BAR 0x%x is void\n",
762 PCI_MAPREG_MEM_TYPE(mask) ==
763 PCI_MAPREG_MEM_TYPE_64BIT ?
764 64 : 32, br);
765 }
766 continue;
767 } else {
768 if (pci_conf_debug) {
769 print_tag(pb->pc, tag);
770 printf("MEM%d BAR 0x%x has size %#lx\n",
771 PCI_MAPREG_MEM_TYPE(mask) ==
772 PCI_MAPREG_MEM_TYPE_64BIT ?
773 64 : 32,
774 br, (unsigned long)size);
775 }
776 }
777
778 if (pb->nmemwin >= MAX_CONF_MEM) {
779 printf("pciconf: too many memory windows\n");
780 return -1;
781 }
782
783 pm = get_mem_desc(pb, size);
784 pm->dev = pd;
785 pm->reg = br;
786 pm->size = size;
787 pm->align = 4;
788 pm->prefetch = PCI_MAPREG_MEM_PREFETCHABLE(mask);
789 if (pci_conf_debug) {
790 print_tag(pb->pc, tag);
791 printf("Register 0x%x, memory size %"
792 PRIu64 "\n", br, pm->size);
793 }
794 pb->nmemwin++;
795 if (pm->prefetch) {
796 pb->pmem_total += size;
797 if (pb->pmem_align < pm->size)
798 pb->pmem_align = pm->size;
799 } else {
800 pb->mem_total += size;
801 if (pb->mem_align < pm->size)
802 pb->mem_align = pm->size;
803 }
804 }
805 }
806
807 if (mode & PCI_CONF_MAP_ROM) {
808 bar = pci_conf_read(pb->pc, tag, PCI_MAPREG_ROM);
809 pci_conf_write(pb->pc, tag, PCI_MAPREG_ROM, 0xfffffffe);
810 mask = pci_conf_read(pb->pc, tag, PCI_MAPREG_ROM);
811 pci_conf_write(pb->pc, tag, PCI_MAPREG_ROM, bar);
812
813 if (mask != 0 && mask != 0xffffffff) {
814 if (pb->nmemwin >= MAX_CONF_MEM) {
815 printf("pciconf: too many memory windows\n");
816 return -1;
817 }
818 size = (uint64_t)PCI_MAPREG_MEM_SIZE(mask);
819
820 pm = get_mem_desc(pb, size);
821 pm->dev = pd;
822 pm->reg = PCI_MAPREG_ROM;
823 pm->size = size;
824 pm->align = 4;
825 pm->prefetch = 0;
826 if (pci_conf_debug) {
827 print_tag(pb->pc, tag);
828 printf("Expansion ROM memory size %"
829 PRIu64 "\n", pm->size);
830 }
831 pb->nmemwin++;
832 if (pm->prefetch) {
833 pb->pmem_total += size;
834 if (pb->pmem_align < pm->size)
835 pb->pmem_align = pm->size;
836 } else {
837 pb->mem_total += size;
838 if (pb->mem_align < pm->size)
839 pb->mem_align = pm->size;
840 }
841 }
842 } else {
843 /* Don't enable ROMs if we aren't going to map them. */
844 mode &= ~PCI_CONF_ENABLE_ROM;
845 pd->enable &= ~PCI_CONF_ENABLE_ROM;
846 }
847
848 if (!(mode & PCI_CONF_ENABLE_ROM)) {
849 /* Ensure ROM is disabled */
850 bar = pci_conf_read(pb->pc, tag, PCI_MAPREG_ROM);
851 pci_conf_write(pb->pc, tag, PCI_MAPREG_ROM,
852 bar & ~PCI_MAPREG_ROM_ENABLE);
853 }
854
855 return 0;
856 }
857
858 /************************************************************************/
859 /************************************************************************/
860 /******************** Bus configuration routines ********************/
861 /************************************************************************/
862 /************************************************************************/
863 static uint64_t
864 pci_allocate_range(struct pciconf_resource * const r, const uint64_t amt,
865 const int align, const bool ok64 __used_only_lp64)
866 {
867 vmem_size_t const size = (vmem_size_t) amt;
868 vmem_addr_t result;
869 int error;
870
871 #ifdef _LP64
872 /*
873 * If a 64-bit range IS OK, then we prefer allocating above 4GB.
874 *
875 * XXX We guard this with _LP64 because vmem uses uintptr_t
876 * internally.
877 */
878 if (!ok64) {
879 error = vmem_xalloc(r->arena, size, align, 0, 0,
880 VMEM_ADDR_MIN, 0xffffffffUL,
881 VM_BESTFIT | VM_NOSLEEP,
882 &result);
883 } else {
884 error = vmem_xalloc(r->arena, size, align, 0, 0,
885 (1UL << 32), VMEM_ADDR_MAX,
886 VM_BESTFIT | VM_NOSLEEP,
887 &result);
888 if (error) {
889 error = vmem_xalloc(r->arena, size, align, 0, 0,
890 VMEM_ADDR_MIN, VMEM_ADDR_MAX,
891 VM_BESTFIT | VM_NOSLEEP,
892 &result);
893 }
894 }
895 #else
896 error = vmem_xalloc(r->arena, size, align, 0, 0,
897 VMEM_ADDR_MIN, 0xffffffffUL,
898 VM_BESTFIT | VM_NOSLEEP,
899 &result);
900 #endif /* _L64 */
901
902 if (error)
903 return ~0ULL;
904
905 return result;
906 }
907
908 static int
909 setup_iowins(pciconf_bus_t *pb)
910 {
911 pciconf_win_t *pi;
912 pciconf_dev_t *pd;
913 int error;
914
915 for (pi = pb->pciiowin; pi < &pb->pciiowin[pb->niowin]; pi++) {
916 if (pi->size == 0)
917 continue;
918
919 pd = pi->dev;
920 if (pb->io_res.arena == NULL) {
921 /* Bus has no IO ranges, disable IO BAR */
922 pi->address = 0;
923 pd->enable &= ~PCI_CONF_ENABLE_IO;
924 goto write_ioaddr;
925 }
926 pi->address = pci_allocate_range(&pb->io_res, pi->size,
927 pi->align, false);
928 if (~pi->address == 0) {
929 print_tag(pd->pc, pd->tag);
930 printf("Failed to allocate PCI I/O space (%"
931 PRIu64 " req)\n", pi->size);
932 return -1;
933 }
934 if (pd->ppb && pi->reg == 0) {
935 error = init_range_resource(&pd->ppb->io_res,
936 "ppb-io", pi->address, pi->size);
937 if (error) {
938 print_tag(pd->pc, pd->tag);
939 printf("Failed to alloc I/O arena for bus %d\n",
940 pd->ppb->busno);
941 return -1;
942 }
943 continue;
944 }
945 if (!pb->io_32bit && pi->address > 0xFFFF) {
946 pi->address = 0;
947 pd->enable &= ~PCI_CONF_ENABLE_IO;
948 } else {
949 pd->enable |= PCI_CONF_ENABLE_IO;
950 }
951 write_ioaddr:
952 if (pci_conf_debug) {
953 print_tag(pd->pc, pd->tag);
954 printf("Putting %" PRIu64 " I/O bytes @ %#" PRIx64
955 " (reg %x)\n", pi->size, pi->address, pi->reg);
956 }
957 pci_conf_write(pd->pc, pd->tag, pi->reg,
958 PCI_MAPREG_IO_ADDR(pi->address) | PCI_MAPREG_TYPE_IO);
959 }
960 return 0;
961 }
962
963 static int
964 setup_memwins(pciconf_bus_t *pb)
965 {
966 pciconf_win_t *pm;
967 pciconf_dev_t *pd;
968 pcireg_t base;
969 struct pciconf_resource *r;
970 bool ok64;
971 int error;
972
973 for (pm = pb->pcimemwin; pm < &pb->pcimemwin[pb->nmemwin]; pm++) {
974 if (pm->size == 0)
975 continue;
976
977 ok64 = false;
978 pd = pm->dev;
979 if (pm->prefetch) {
980 r = &pb->pmem_res;
981 ok64 = pb->pmem_64bit;
982 } else {
983 r = &pb->mem_res;
984 ok64 = pb->mem_64bit && pd->ppb == NULL;
985 }
986
987 /*
988 * We need to figure out if the memory BAR is 64-bit
989 * capable or not. If it's not, then we need to constrain
990 * the address allocation.
991 */
992 if (pm->reg == PCI_MAPREG_ROM) {
993 ok64 = false;
994 } else if (ok64) {
995 base = pci_conf_read(pd->pc, pd->tag, pm->reg);
996 ok64 = PCI_MAPREG_MEM_TYPE(base) ==
997 PCI_MAPREG_MEM_TYPE_64BIT;
998 }
999
1000 pm->address = pci_allocate_range(r, pm->size, pm->align,
1001 ok64);
1002 if (~pm->address == 0) {
1003 print_tag(pd->pc, pd->tag);
1004 printf(
1005 "Failed to allocate PCI memory space (%" PRIu64
1006 " req, prefetch=%d ok64=%d)\n", pm->size,
1007 pm->prefetch, (int)ok64);
1008 return -1;
1009 }
1010 if (pd->ppb && pm->reg == 0) {
1011 const char *name = pm->prefetch ? "ppb-pmem"
1012 : "ppb-mem";
1013 r = pm->prefetch ? &pd->ppb->pmem_res
1014 : &pd->ppb->mem_res;
1015 error = init_range_resource(r, name,
1016 pm->address, pm->size);
1017 if (error) {
1018 print_tag(pd->pc, pd->tag);
1019 printf("Failed to alloc MEM arena for bus %d\n",
1020 pd->ppb->busno);
1021 return -1;
1022 }
1023 continue;
1024 }
1025 if (!ok64 && pm->address > 0xFFFFFFFFULL) {
1026 pm->address = 0;
1027 pd->enable &= ~PCI_CONF_ENABLE_MEM;
1028 } else
1029 pd->enable |= PCI_CONF_ENABLE_MEM;
1030
1031 if (pm->reg != PCI_MAPREG_ROM) {
1032 if (pci_conf_debug) {
1033 print_tag(pd->pc, pd->tag);
1034 printf(
1035 "Putting %" PRIu64 " MEM bytes @ %#"
1036 PRIx64 " (reg %x)\n", pm->size,
1037 pm->address, pm->reg);
1038 }
1039 base = pci_conf_read(pd->pc, pd->tag, pm->reg);
1040 base = PCI_MAPREG_MEM_ADDR(pm->address) |
1041 PCI_MAPREG_MEM_TYPE(base);
1042 pci_conf_write(pd->pc, pd->tag, pm->reg, base);
1043 if (PCI_MAPREG_MEM_TYPE(base) ==
1044 PCI_MAPREG_MEM_TYPE_64BIT) {
1045 base = (pcireg_t)
1046 (PCI_MAPREG_MEM64_ADDR(pm->address) >> 32);
1047 pci_conf_write(pd->pc, pd->tag, pm->reg + 4,
1048 base);
1049 }
1050 }
1051 }
1052 for (pm = pb->pcimemwin; pm < &pb->pcimemwin[pb->nmemwin]; pm++) {
1053 if (pm->reg == PCI_MAPREG_ROM && pm->address != -1) {
1054 pd = pm->dev;
1055 if (!(pd->enable & PCI_CONF_MAP_ROM))
1056 continue;
1057 if (pci_conf_debug) {
1058 print_tag(pd->pc, pd->tag);
1059 printf(
1060 "Putting %" PRIu64 " ROM bytes @ %#"
1061 PRIx64 " (reg %x)\n", pm->size,
1062 pm->address, pm->reg);
1063 }
1064 base = (pcireg_t) pm->address;
1065 if (pd->enable & PCI_CONF_ENABLE_ROM)
1066 base |= PCI_MAPREG_ROM_ENABLE;
1067
1068 pci_conf_write(pd->pc, pd->tag, pm->reg, base);
1069 }
1070 }
1071 return 0;
1072 }
1073
1074 static bool
1075 constrain_bridge_mem_range(struct pciconf_resource * const r,
1076 u_long * const base,
1077 u_long * const limit,
1078 const bool ok64 __used_only_lp64)
1079 {
1080
1081 *base = r->min_addr;
1082 *limit = r->max_addr;
1083
1084 #ifdef _LP64
1085 if (!ok64) {
1086 if (r->min_addr >= (1UL << 32)) {
1087 return true;
1088 }
1089 if (r->max_addr > 0xffffffffUL) {
1090 *limit = 0xffffffffUL;
1091 }
1092 }
1093 #endif /* _LP64 */
1094
1095 return false;
1096 }
1097
1098 /*
1099 * Configure I/O, memory, and prefetcable memory spaces, then make
1100 * a call to configure_bus().
1101 */
1102 static int
1103 configure_bridge(pciconf_dev_t *pd)
1104 {
1105 unsigned long io_base, io_limit, mem_base, mem_limit;
1106 pciconf_bus_t *pb;
1107 pcireg_t io, iohigh, mem, cmd;
1108 int rv;
1109 bool isprefetchmem64;
1110 bool bad_range;
1111
1112 pb = pd->ppb;
1113 /* Configure I/O base & limit*/
1114 if (pb->io_res.arena) {
1115 io_base = pb->io_res.min_addr;
1116 io_limit = pb->io_res.max_addr;
1117 } else {
1118 io_base = 0x1000; /* 4K */
1119 io_limit = 0x0000;
1120 }
1121 if (pb->io_32bit) {
1122 iohigh = __SHIFTIN(io_base >> 16, PCI_BRIDGE_IOHIGH_BASE) |
1123 __SHIFTIN(io_limit >> 16, PCI_BRIDGE_IOHIGH_LIMIT);
1124 } else {
1125 if (io_limit > 0xFFFF) {
1126 printf("Bus %d bridge does not support 32-bit I/O. ",
1127 pb->busno);
1128 printf("Disabling I/O accesses\n");
1129 io_base = 0x1000; /* 4K */
1130 io_limit = 0x0000;
1131 }
1132 iohigh = 0;
1133 }
1134 io = pci_conf_read(pb->pc, pd->tag, PCI_BRIDGE_STATIO_REG) &
1135 PCI_BRIDGE_STATIO_STATUS;
1136 io |= __SHIFTIN((io_base >> 8) & PCI_BRIDGE_STATIO_IOADDR,
1137 PCI_BRIDGE_STATIO_IOBASE);
1138 io |= __SHIFTIN((io_limit >> 8) & PCI_BRIDGE_STATIO_IOADDR,
1139 PCI_BRIDGE_STATIO_IOLIMIT);
1140 pci_conf_write(pb->pc, pd->tag, PCI_BRIDGE_STATIO_REG, io);
1141 pci_conf_write(pb->pc, pd->tag, PCI_BRIDGE_IOHIGH_REG, iohigh);
1142
1143 /* Configure mem base & limit */
1144 bad_range = false;
1145 if (pb->mem_res.arena) {
1146 bad_range = constrain_bridge_mem_range(&pb->mem_res,
1147 &mem_base,
1148 &mem_limit,
1149 false);
1150 } else {
1151 mem_base = 0x100000; /* 1M */
1152 mem_limit = 0x000000;
1153 }
1154 if (bad_range) {
1155 printf("Bus %d bridge MEM range out of range. ", pb->busno);
1156 printf("Disabling MEM accesses\n");
1157 mem_base = 0x100000; /* 1M */
1158 mem_limit = 0x000000;
1159 }
1160 mem = __SHIFTIN((mem_base >> 16) & PCI_BRIDGE_MEMORY_ADDR,
1161 PCI_BRIDGE_MEMORY_BASE);
1162 mem |= __SHIFTIN((mem_limit >> 16) & PCI_BRIDGE_MEMORY_ADDR,
1163 PCI_BRIDGE_MEMORY_LIMIT);
1164 pci_conf_write(pb->pc, pd->tag, PCI_BRIDGE_MEMORY_REG, mem);
1165
1166 /* Configure prefetchable mem base & limit */
1167 mem = pci_conf_read(pb->pc, pd->tag, PCI_BRIDGE_PREFETCHMEM_REG);
1168 isprefetchmem64 = PCI_BRIDGE_PREFETCHMEM_64BITS(mem);
1169 bad_range = false;
1170 if (pb->pmem_res.arena) {
1171 bad_range = constrain_bridge_mem_range(&pb->pmem_res,
1172 &mem_base,
1173 &mem_limit,
1174 isprefetchmem64);
1175 } else {
1176 mem_base = 0x100000; /* 1M */
1177 mem_limit = 0x000000;
1178 }
1179 if (bad_range) {
1180 printf("Bus %d bridge does not support 64-bit PMEM. ",
1181 pb->busno);
1182 printf("Disabling prefetchable-MEM accesses\n");
1183 mem_base = 0x100000; /* 1M */
1184 mem_limit = 0x000000;
1185 }
1186 mem = __SHIFTIN((mem_base >> 16) & PCI_BRIDGE_PREFETCHMEM_ADDR,
1187 PCI_BRIDGE_PREFETCHMEM_BASE);
1188 mem |= __SHIFTIN((mem_limit >> 16) & PCI_BRIDGE_PREFETCHMEM_ADDR,
1189 PCI_BRIDGE_PREFETCHMEM_LIMIT);
1190 pci_conf_write(pb->pc, pd->tag, PCI_BRIDGE_PREFETCHMEM_REG, mem);
1191 /*
1192 * XXX -- 64-bit systems need a lot more than just this...
1193 */
1194 if (isprefetchmem64) {
1195 mem_base = (uint64_t)mem_base >> 32;
1196 mem_limit = (uint64_t)mem_limit >> 32;
1197 pci_conf_write(pb->pc, pd->tag,
1198 PCI_BRIDGE_PREFETCHBASEUP32_REG, mem_base & 0xffffffff);
1199 pci_conf_write(pb->pc, pd->tag,
1200 PCI_BRIDGE_PREFETCHLIMITUP32_REG, mem_limit & 0xffffffff);
1201 }
1202
1203 rv = configure_bus(pb);
1204
1205 fini_range_resource(&pb->io_res);
1206 fini_range_resource(&pb->mem_res);
1207 fini_range_resource(&pb->pmem_res);
1208
1209 if (rv == 0) {
1210 cmd = pci_conf_read(pd->pc, pd->tag, PCI_BRIDGE_CONTROL_REG);
1211 cmd &= ~PCI_BRIDGE_CONTROL; /* Clear control bit first */
1212 cmd |= PCI_BRIDGE_CONTROL_PERE | PCI_BRIDGE_CONTROL_SERR;
1213 if (pb->fast_b2b)
1214 cmd |= PCI_BRIDGE_CONTROL_SECFASTB2B;
1215
1216 pci_conf_write(pd->pc, pd->tag, PCI_BRIDGE_CONTROL_REG, cmd);
1217 cmd = pci_conf_read(pd->pc, pd->tag, PCI_COMMAND_STATUS_REG);
1218 cmd |= PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE;
1219 pci_conf_write(pd->pc, pd->tag, PCI_COMMAND_STATUS_REG, cmd);
1220 }
1221
1222 return rv;
1223 }
1224
1225 /*
1226 * Calculate latency values, allocate I/O and MEM segments, then set them
1227 * up. If a PCI-PCI bridge is found, configure the bridge separately,
1228 * which will cause a recursive call back here.
1229 */
1230 static int
1231 configure_bus(pciconf_bus_t *pb)
1232 {
1233 pciconf_dev_t *pd;
1234 int def_ltim, max_ltim, band, bus_mhz;
1235
1236 if (pb->ndevs == 0) {
1237 if (pci_conf_debug)
1238 printf("PCI bus %d - no devices\n", pb->busno);
1239 return 1;
1240 }
1241 bus_mhz = pb->freq_66 ? 66 : 33;
1242 max_ltim = pb->max_mingnt * bus_mhz / 4; /* cvt to cycle count */
1243 band = 4000000; /* 0.25us cycles/sec */
1244 if (band < pb->bandwidth_used) {
1245 printf("PCI bus %d: Warning: Total bandwidth exceeded!? (%d)\n",
1246 pb->busno, pb->bandwidth_used);
1247 def_ltim = -1;
1248 } else {
1249 def_ltim = (band - pb->bandwidth_used) / pb->ndevs;
1250 if (def_ltim > pb->min_maxlat)
1251 def_ltim = pb->min_maxlat;
1252 def_ltim = def_ltim * bus_mhz / 4;
1253 }
1254 def_ltim = (def_ltim + 7) & ~7;
1255 max_ltim = (max_ltim + 7) & ~7;
1256
1257 pb->def_ltim = MIN(def_ltim, 255);
1258 pb->max_ltim = MIN(MAX(max_ltim, def_ltim), 255);
1259
1260 /*
1261 * Now we have what we need to initialize the devices.
1262 * It would probably be better if we could allocate all of these
1263 * for all busses at once, but "not right now". First, get a list
1264 * of free memory ranges from the m.d. system.
1265 */
1266 if (setup_iowins(pb) || setup_memwins(pb)) {
1267 printf("PCI bus configuration failed: "
1268 "unable to assign all I/O and memory ranges.\n");
1269 return -1;
1270 }
1271
1272 /*
1273 * Configure the latency for the devices, and enable them.
1274 */
1275 for (pd = pb->device; pd < &pb->device[pb->ndevs]; pd++) {
1276 pcireg_t cmd, classreg, misc;
1277 int ltim;
1278
1279 if (pci_conf_debug) {
1280 print_tag(pd->pc, pd->tag);
1281 printf("Configuring device.\n");
1282 }
1283 classreg = pci_conf_read(pd->pc, pd->tag, PCI_CLASS_REG);
1284 misc = pci_conf_read(pd->pc, pd->tag, PCI_BHLC_REG);
1285 cmd = pci_conf_read(pd->pc, pd->tag, PCI_COMMAND_STATUS_REG);
1286 if (pd->enable & PCI_CONF_ENABLE_PARITY)
1287 cmd |= PCI_COMMAND_PARITY_ENABLE;
1288 if (pd->enable & PCI_CONF_ENABLE_SERR)
1289 cmd |= PCI_COMMAND_SERR_ENABLE;
1290 if (pb->fast_b2b)
1291 cmd |= PCI_COMMAND_BACKTOBACK_ENABLE;
1292 if (PCI_CLASS(classreg) != PCI_CLASS_BRIDGE ||
1293 PCI_SUBCLASS(classreg) != PCI_SUBCLASS_BRIDGE_PCI) {
1294 if (pd->enable & PCI_CONF_ENABLE_IO)
1295 cmd |= PCI_COMMAND_IO_ENABLE;
1296 if (pd->enable & PCI_CONF_ENABLE_MEM)
1297 cmd |= PCI_COMMAND_MEM_ENABLE;
1298 if (pd->enable & PCI_CONF_ENABLE_BM)
1299 cmd |= PCI_COMMAND_MASTER_ENABLE;
1300 ltim = pd->min_gnt * bus_mhz / 4;
1301 ltim = MIN (MAX (pb->def_ltim, ltim), pb->max_ltim);
1302 } else {
1303 cmd |= PCI_COMMAND_MASTER_ENABLE;
1304 ltim = MIN (pb->def_ltim, pb->max_ltim);
1305 }
1306 if ((pd->enable &
1307 (PCI_CONF_ENABLE_MEM | PCI_CONF_ENABLE_IO)) == 0) {
1308 print_tag(pd->pc, pd->tag);
1309 printf("Disabled due to lack of resources.\n");
1310 cmd &= ~(PCI_COMMAND_MASTER_ENABLE |
1311 PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE);
1312 }
1313 pci_conf_write(pd->pc, pd->tag, PCI_COMMAND_STATUS_REG, cmd);
1314
1315 misc &= ~((PCI_LATTIMER_MASK << PCI_LATTIMER_SHIFT) |
1316 (PCI_CACHELINE_MASK << PCI_CACHELINE_SHIFT));
1317 misc |= (ltim & PCI_LATTIMER_MASK) << PCI_LATTIMER_SHIFT;
1318 misc |= ((pb->cacheline_size >> 2) & PCI_CACHELINE_MASK) <<
1319 PCI_CACHELINE_SHIFT;
1320 pci_conf_write(pd->pc, pd->tag, PCI_BHLC_REG, misc);
1321
1322 if (pd->ppb) {
1323 if (configure_bridge(pd) < 0)
1324 return -1;
1325 continue;
1326 }
1327 }
1328
1329 if (pci_conf_debug)
1330 printf("PCI bus %d configured\n", pb->busno);
1331
1332 return 0;
1333 }
1334
1335 static bool
1336 mem_region_ok64(struct pciconf_resource * const r __used_only_lp64)
1337 {
1338 bool rv = false;
1339
1340 #ifdef _LP64
1341 /*
1342 * XXX We need to guard this with _LP64 because vmem uses
1343 * uintptr_t internally.
1344 */
1345 vmem_size_t result;
1346 if (vmem_xalloc(r->arena, 1/*size*/, 1/*align*/, 0/*phase*/,
1347 0/*nocross*/, (1UL << 32), VMEM_ADDR_MAX,
1348 VM_INSTANTFIT | VM_NOSLEEP, &result) == 0) {
1349 vmem_free(r->arena, result, 1);
1350 rv = true;
1351 }
1352 #endif /* _LP64 */
1353
1354 return rv;
1355 }
1356
1357 /*
1358 * pciconf_resource_init:
1359 *
1360 * Allocate and initilize a pci configuration resources container.
1361 */
1362 struct pciconf_resources *
1363 pciconf_resource_init(void)
1364 {
1365 struct pciconf_resources *rs;
1366
1367 rs = kmem_zalloc(sizeof(*rs), KM_SLEEP);
1368
1369 return (rs);
1370 }
1371
1372 /*
1373 * pciconf_resource_fini:
1374 *
1375 * Dispose of a pci configuration resources container.
1376 */
1377 void
1378 pciconf_resource_fini(struct pciconf_resources *rs)
1379 {
1380 int i;
1381
1382 for (i = 0; i < PCICONF_RESOURCE_NTYPES; i++) {
1383 fini_range_resource(&rs->resources[i]);
1384 }
1385
1386 kmem_free(rs, sizeof(*rs));
1387 }
1388
1389 /*
1390 * pciconf_resource_add:
1391 *
1392 * Add a pci configuration resource to a container.
1393 */
1394 int
1395 pciconf_resource_add(struct pciconf_resources *rs, int type,
1396 bus_addr_t start, bus_size_t size)
1397 {
1398 bus_addr_t end = start + (size - 1);
1399 struct pciconf_resource *r;
1400 struct pciconf_resource_rsvd *rsvd;
1401 int error, rsvd_type, align;
1402 vmem_addr_t result;
1403 bool first;
1404
1405 if (size == 0 || end <= start)
1406 return EINVAL;
1407
1408 if (type < 0 || type >= PCICONF_RESOURCE_NTYPES)
1409 return EINVAL;
1410
1411 r = &rs->resources[type];
1412
1413 first = r->arena == NULL;
1414 if (first) {
1415 r->arena = create_vmem_arena(pciconf_resource_names[type],
1416 0, 0, VM_SLEEP);
1417 r->min_addr = VMEM_ADDR_MAX;
1418 r->max_addr = VMEM_ADDR_MIN;
1419 }
1420
1421 error = vmem_add(r->arena, start, size, VM_SLEEP);
1422 if (error == 0) {
1423 if (start < r->min_addr)
1424 r->min_addr = start;
1425 if (end > r->max_addr)
1426 r->max_addr = end;
1427 }
1428
1429 r->total_size += size;
1430
1431 switch (type) {
1432 case PCICONF_RESOURCE_IO:
1433 rsvd_type = PCI_CONF_MAP_IO;
1434 align = 0x1000;
1435 break;
1436 case PCICONF_RESOURCE_MEM:
1437 case PCICONF_RESOURCE_PREFETCHABLE_MEM:
1438 rsvd_type = PCI_CONF_MAP_MEM;
1439 align = 0x100000;
1440 break;
1441 default:
1442 rsvd_type = 0;
1443 align = 0;
1444 break;
1445 }
1446
1447 /*
1448 * Exclude reserved ranges from available resources
1449 */
1450 LIST_FOREACH(rsvd, &pciconf_resource_reservations, next) {
1451 if (rsvd->type != rsvd_type)
1452 continue;
1453 /*
1454 * The reserved range may not be within our resource window.
1455 * That's fine, so ignore the error.
1456 */
1457 (void)vmem_xalloc(r->arena, rsvd->size, align, 0, 0,
1458 rsvd->start, rsvd->start + rsvd->size,
1459 VM_BESTFIT | VM_NOSLEEP,
1460 &result);
1461 }
1462
1463 return 0;
1464 }
1465
1466 /*
1467 * pciconf_resource_reserve:
1468 *
1469 * Mark a pci configuration resource as in-use. Devices
1470 * already configured to use these resources are skipped
1471 * during resource assignment.
1472 */
1473 void
1474 pciconf_resource_reserve(int type, bus_addr_t start, bus_size_t size)
1475 {
1476 struct pciconf_resource_rsvd *rsvd;
1477
1478 rsvd = kmem_zalloc(sizeof(*rsvd), KM_SLEEP);
1479 rsvd->type = type;
1480 rsvd->start = start;
1481 rsvd->size = size;
1482 LIST_INSERT_HEAD(&pciconf_resource_reservations, rsvd, next);
1483 }
1484
1485 /*
1486 * Let's configure the PCI bus.
1487 * This consists of basically scanning for all existing devices,
1488 * identifying their needs, and then making another pass over them
1489 * to set:
1490 * 1. I/O addresses
1491 * 2. Memory addresses (Prefetchable and not)
1492 * 3. PCI command register
1493 * 4. The latency part of the PCI BHLC (BIST (Built-In Self Test),
1494 * Header type, Latency timer, Cache line size) register
1495 *
1496 * The command register is set to enable fast back-to-back transactions
1497 * if the host bridge says it can handle it. We also configure
1498 * Master Enable, SERR enable, parity enable, and (if this is not a
1499 * PCI-PCI bridge) the I/O and Memory spaces. Apparently some devices
1500 * will not report some I/O space.
1501 *
1502 * The latency is computed to be a "fair share" of the bus bandwidth.
1503 * The bus bandwidth variable is initialized to the number of PCI cycles
1504 * in one second. The number of cycles taken for one transaction by each
1505 * device (MAX_LAT + MIN_GNT) is then subtracted from the bandwidth.
1506 * Care is taken to ensure that the latency timer won't be set such that
1507 * it would exceed the critical time for any device.
1508 *
1509 * This is complicated somewhat due to the presence of bridges. PCI-PCI
1510 * bridges are probed and configured recursively.
1511 */
1512 int
1513 pci_configure_bus(pci_chipset_tag_t pc, struct pciconf_resources *rs,
1514 int firstbus, int cacheline_size)
1515 {
1516 pciconf_bus_t *pb;
1517 int rv;
1518
1519 pb = kmem_zalloc(sizeof (pciconf_bus_t), KM_SLEEP);
1520 pb->busno = firstbus;
1521 pb->next_busno = pb->busno + 1;
1522 pb->last_busno = 255;
1523 pb->cacheline_size = cacheline_size;
1524 pb->parent_bus = NULL;
1525 pb->swiz = 0;
1526 pb->io_32bit = 1;
1527 pb->io_res = rs->resources[PCICONF_RESOURCE_IO];
1528
1529 pb->mem_res = rs->resources[PCICONF_RESOURCE_MEM];
1530 if (pb->mem_res.arena == NULL)
1531 pb->mem_res = rs->resources[PCICONF_RESOURCE_PREFETCHABLE_MEM];
1532
1533 pb->pmem_res = rs->resources[PCICONF_RESOURCE_PREFETCHABLE_MEM];
1534 if (pb->pmem_res.arena == NULL)
1535 pb->pmem_res = rs->resources[PCICONF_RESOURCE_MEM];
1536
1537 /*
1538 * Probe the memory region arenas to see if allocation of
1539 * 64-bit addresses is possible.
1540 */
1541 pb->mem_64bit = mem_region_ok64(&pb->mem_res);
1542 pb->pmem_64bit = mem_region_ok64(&pb->pmem_res);
1543
1544 pb->pc = pc;
1545 pb->io_total = pb->mem_total = pb->pmem_total = 0;
1546
1547 rv = probe_bus(pb);
1548 pb->last_busno = pb->next_busno - 1;
1549 if (rv == 0)
1550 rv = configure_bus(pb);
1551
1552 /*
1553 * All done!
1554 */
1555 kmem_free(pb, sizeof(*pb));
1556 return rv;
1557 }
1558