pci_resource.c revision 1.6 1 /* $NetBSD: pci_resource.c,v 1.6 2025/03/03 19:02:30 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2022 Jared McNeill <jmcneill (at) invisible.ca>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 /*
30 * pci_resource.c --
31 *
32 * Scan current PCI resource allocations and attempt to assign resources
33 * to devices that are not configured WITHOUT changing any configuration
34 * performed by system firmware.
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: pci_resource.c,v 1.6 2025/03/03 19:02:30 riastradh Exp $");
39
40 #include <sys/param.h>
41 #include <sys/types.h>
42
43 #include <sys/bus.h>
44 #include <sys/kmem.h>
45 #include <sys/queue.h>
46 #include <sys/systm.h>
47 #include <sys/vmem.h>
48
49 #include <dev/pci/pcireg.h>
50 #include <dev/pci/pcivar.h>
51 #include <dev/pci/pcidevs.h>
52 #include <dev/pci/pci_resource.h>
53
54 #define DPRINT aprint_debug
55
56 #if defined(PCI_RESOURCE_TEST_VENDOR_ID) && \
57 defined(PCI_RESOURCE_TEST_PRODUCT_ID)
58 #define IS_TEST_DEVICE(_pd) \
59 (PCI_VENDOR(pd->pd_id) == PCI_RESOURCE_TEST_VENDOR_ID && \
60 PCI_PRODUCT(pd->pd_id) == PCI_RESOURCE_TEST_PRODUCT_ID)
61 #else
62 #define IS_TEST_DEVICE(_pd) 0
63 #endif
64
65 #define PCI_MAX_DEVICE 32
66 #define PCI_MAX_FUNC 8
67
68 #define PCI_MAX_IORES 6
69
70 #define PCI_RANGE_FOREACH(_type) \
71 for (u_int _type = PCI_RANGE_BUS; _type < NUM_PCI_RANGES; _type++)
72
73 static const char *pci_range_typenames[NUM_PCI_RANGES] = {
74 [PCI_RANGE_BUS] = "bus",
75 [PCI_RANGE_IO] = "io",
76 [PCI_RANGE_MEM] = "mem",
77 [PCI_RANGE_PMEM] = "pmem",
78 };
79
80 struct pci_bus;
81
82 struct pci_iores {
83 uint64_t pi_base; /* Base address */
84 uint64_t pi_size; /* Resource size */
85 uint8_t pi_type; /* PCI_MAPREG_TYPE_* */
86 u_int pi_bar; /* PCI bar number */
87 union {
88 struct {
89 uint8_t memtype;
90 bool prefetch;
91 } pi_mem;
92 };
93 };
94
95 struct pci_device {
96 bool pd_present; /* Device is present */
97 bool pd_configured; /* Device is configured */
98 struct pci_bus *pd_bus; /* Parent bus */
99 uint8_t pd_devno; /* Device number */
100 uint8_t pd_funcno; /* Function number */
101 pcitag_t pd_tag; /* PCI tag */
102
103 pcireg_t pd_id; /* Vendor ID, Device ID */
104 pcireg_t pd_class; /* Revision ID, Class Code */
105 pcireg_t pd_bhlc; /* BIST, Header Type, Primary Latency
106 * Timer, Cache Line Size */
107
108 struct pci_iores pd_iores[PCI_MAX_IORES];
109 u_int pd_niores;
110
111 bool pd_ppb; /* PCI-PCI bridge */
112 union {
113 struct {
114 pcireg_t bridge_bus;
115 struct pci_resource_arena *ranges[NUM_PCI_RANGES];
116 } pd_bridge;
117 };
118 };
119
120 struct pci_bus {
121 uint8_t pb_busno; /* Bus number */
122 struct pci_device *pb_bridge; /* Parent bridge, or NULL */
123
124 struct pci_device pb_device[PCI_MAX_DEVICE * PCI_MAX_FUNC];
125 /* Devices on bus */
126 u_int pb_lastdevno; /* Last device found */
127
128 /* XXX Nothing seems to use pb_ranges? */
129 struct pci_resource_arena *pb_ranges[NUM_PCI_RANGES];
130 struct pci_resource_arena *pb_res[NUM_PCI_RANGES];
131 };
132
133 struct pci_resources {
134 struct pci_bus **pr_bus; /* Bus list */
135 pci_chipset_tag_t pr_pc; /* Chipset tag */
136 uint8_t pr_startbus; /* First bus number */
137 uint8_t pr_endbus; /* Last bus number */
138
139 struct pci_resource_arena *pr_ranges[NUM_PCI_RANGES];
140 };
141
142 struct pci_resource_arena {
143 vmem_t *vmem;
144 SLIST_HEAD(, pci_resource_range) list;
145 };
146
147 struct pci_resource_range {
148 uint64_t start;
149 uint64_t end;
150 SLIST_ENTRY(pci_resource_range) entry;
151 };
152
153 static int pci_resource_scan_bus(struct pci_resources *,
154 struct pci_device *, uint8_t);
155
156 #define PCI_SBDF_FMT "%04x:%02x:%02x.%u"
157 #define PCI_SBDF_FMT_ARGS(_pr, _pd) \
158 pci_get_segment((_pr)->pr_pc), \
159 (_pd)->pd_bus->pb_busno, \
160 (_pd)->pd_devno, \
161 (_pd)->pd_funcno
162
163 #define PCICONF_RES_BUS(_pr, _busno) \
164 ((_pr)->pr_bus[(_busno) - (_pr)->pr_startbus])
165 #define PCICONF_BUS_DEVICE(_pb, _devno, _funcno) \
166 (&(_pb)->pb_device[(_devno) * PCI_MAX_FUNC + (_funcno)])
167
168 static void
169 pci_resource_arena_add_range(struct pci_resource_arena **arenas,
170 enum pci_range_type type, uint64_t start, uint64_t end)
171 {
172 struct pci_resource_arena *arena;
173 struct pci_resource_range *new, *range, *prev;
174 int error;
175
176 /*
177 * Create an arena if we haven't already.
178 */
179 if ((arena = arenas[type]) == NULL) {
180 arena = arenas[type] = kmem_zalloc(sizeof(*arenas[type]),
181 KM_SLEEP);
182 arena->vmem = vmem_create(pci_resource_typename(type),
183 0, 0, 1, NULL, NULL, NULL, 0, VM_SLEEP, IPL_NONE);
184 SLIST_INIT(&arena->list);
185 }
186
187 /*
188 * Warn if this is a bus range and there already is a bus
189 * range, or if the start/end are bad. The other types of
190 * ranges can have more than one range and larger addresses.
191 *
192 * XXX Not accurate: some machines do have multiple bus ranges.
193 * But currently this logic can't handle that -- requires some
194 * extra work to iterate over all the bus ranges. TBD.
195 */
196 if (type == PCI_RANGE_BUS &&
197 (start > UINT8_MAX || end > UINT8_MAX ||
198 !SLIST_EMPTY(&arena->list))) {
199 aprint_error("PCI: unexpected bus range"
200 " %" PRIu64 "-%" PRIu64 ", ignoring\n",
201 start, end);
202 return;
203 }
204
205 /*
206 * Reserve the range in the vmem for allocation. If there's
207 * already an overlapping range, just drop this one.
208 */
209 error = vmem_add(arena->vmem, start, end - start + 1, VM_SLEEP);
210 if (error) {
211 /* XXX show some more context */
212 aprint_error("overlapping %s range: %#" PRIx64 "-%#" PRIx64 ","
213 " discarding\n",
214 pci_resource_typename(type), start, end);
215 return;
216 }
217
218 /*
219 * Add an entry to the list so we can iterate over them, in
220 * ascending address order for the sake of legible printing.
221 * (We don't expect to have so many entries that the linear
222 * time of insertion will cause trouble.)
223 */
224 new = kmem_zalloc(sizeof(*new), KM_SLEEP);
225 new->start = start;
226 new->end = end;
227 prev = NULL;
228 SLIST_FOREACH(range, &arena->list, entry) {
229 if (new->start < range->start)
230 break;
231 prev = range;
232 }
233 if (prev) {
234 SLIST_INSERT_AFTER(prev, new, entry);
235 } else {
236 SLIST_INSERT_HEAD(&arena->list, new, entry);
237 }
238 }
239
240 /*
241 * pci_resource_add_range --
242 *
243 * Add a contiguous range of addresses (inclusive of both bounds) for
244 * the specified type of resource.
245 */
246 void
247 pci_resource_add_range(struct pci_resource_info *info,
248 enum pci_range_type type, uint64_t start, uint64_t end)
249 {
250
251 pci_resource_arena_add_range(info->ranges, type, start, end);
252 }
253
254 /*
255 * pci_new_bus --
256 *
257 * Create a new PCI bus and initialize its resource ranges.
258 */
259 static struct pci_bus *
260 pci_new_bus(struct pci_resources *pr, uint8_t busno, struct pci_device *bridge)
261 {
262 struct pci_bus *pb;
263 struct pci_resource_arena **ranges;
264
265 pb = kmem_zalloc(sizeof(*pb), KM_SLEEP);
266 pb->pb_busno = busno;
267 pb->pb_bridge = bridge;
268 if (bridge == NULL) {
269 /*
270 * No additional constraints on resource allocations for
271 * the root bus.
272 */
273 ranges = pr->pr_ranges;
274 } else {
275 /*
276 * Resource allocations for this bus are constrained by the
277 * bridge forwarding settings.
278 */
279 ranges = bridge->pd_bridge.ranges;
280 }
281 memcpy(pb->pb_ranges, ranges, sizeof(pb->pb_ranges));
282
283 return pb;
284 }
285
286 /*
287 * pci_resource_device_functions --
288 *
289 * Returns the number of PCI functions for a a given bus and device.
290 */
291 static uint8_t
292 pci_resource_device_functions(struct pci_resources *pr,
293 uint8_t busno, uint8_t devno)
294 {
295 struct pci_bus *pb;
296 struct pci_device *pd;
297
298 pb = PCICONF_RES_BUS(pr, busno);
299 pd = PCICONF_BUS_DEVICE(pb, devno, 0);
300 if (!pd->pd_present) {
301 return 0;
302 }
303
304 return PCI_HDRTYPE_MULTIFN(pd->pd_bhlc) ? 8 : 1;
305 }
306
307 /*
308 * pci_resource_device_print --
309 *
310 * Log details about a device.
311 */
312 static void
313 pci_resource_device_print(struct pci_resources *pr,
314 struct pci_device *pd)
315 {
316 struct pci_iores *pi;
317 struct pci_resource_range *range;
318 u_int res;
319
320 DPRINT("PCI: " PCI_SBDF_FMT " %04x:%04x %02x 0x%06x",
321 PCI_SBDF_FMT_ARGS(pr, pd),
322 PCI_VENDOR(pd->pd_id), PCI_PRODUCT(pd->pd_id),
323 PCI_REVISION(pd->pd_class), (pd->pd_class >> 8) & 0xffffff);
324
325 switch (PCI_HDRTYPE_TYPE(pd->pd_bhlc)) {
326 case PCI_HDRTYPE_DEVICE:
327 DPRINT(" (device)\n");
328 break;
329 case PCI_HDRTYPE_PPB:
330 DPRINT(" (bridge %u -> %u-%u)\n",
331 PCI_BRIDGE_BUS_NUM_PRIMARY(pd->pd_bridge.bridge_bus),
332 PCI_BRIDGE_BUS_NUM_SECONDARY(pd->pd_bridge.bridge_bus),
333 PCI_BRIDGE_BUS_NUM_SUBORDINATE(pd->pd_bridge.bridge_bus));
334
335 if (pd->pd_bridge.ranges[PCI_RANGE_IO]) {
336 SLIST_FOREACH(range,
337 &pd->pd_bridge.ranges[PCI_RANGE_IO]->list,
338 entry) {
339 DPRINT("PCI: " PCI_SBDF_FMT
340 " [bridge] window io "
341 " %#" PRIx64 "-%#" PRIx64
342 "\n",
343 PCI_SBDF_FMT_ARGS(pr, pd),
344 range->start,
345 range->end);
346 }
347 }
348 if (pd->pd_bridge.ranges[PCI_RANGE_MEM]) {
349 SLIST_FOREACH(range,
350 &pd->pd_bridge.ranges[PCI_RANGE_MEM]->list,
351 entry) {
352 DPRINT("PCI: " PCI_SBDF_FMT
353 " [bridge] window mem"
354 " %#" PRIx64 "-%#" PRIx64
355 " (non-prefetchable)\n",
356 PCI_SBDF_FMT_ARGS(pr, pd),
357 range->start,
358 range->end);
359 }
360 }
361 if (pd->pd_bridge.ranges[PCI_RANGE_PMEM]) {
362 SLIST_FOREACH(range,
363 &pd->pd_bridge.ranges[PCI_RANGE_PMEM]->list,
364 entry) {
365 DPRINT("PCI: " PCI_SBDF_FMT
366 " [bridge] window mem"
367 " %#" PRIx64 "-%#" PRIx64
368 " (prefetchable)\n",
369 PCI_SBDF_FMT_ARGS(pr, pd),
370 range->start,
371 range->end);
372 }
373 }
374
375 break;
376 default:
377 DPRINT(" (0x%02x)\n", PCI_HDRTYPE_TYPE(pd->pd_bhlc));
378 }
379
380 for (res = 0; res < pd->pd_niores; res++) {
381 pi = &pd->pd_iores[res];
382
383 DPRINT("PCI: " PCI_SBDF_FMT
384 " [device] resource BAR%u: %s @ %#" PRIx64 " size %#"
385 PRIx64,
386 PCI_SBDF_FMT_ARGS(pr, pd), pi->pi_bar,
387 pi->pi_type == PCI_MAPREG_TYPE_MEM ? "mem" : "io ",
388 pi->pi_base, pi->pi_size);
389
390 if (pi->pi_type == PCI_MAPREG_TYPE_MEM) {
391 switch (pi->pi_mem.memtype) {
392 case PCI_MAPREG_MEM_TYPE_32BIT:
393 DPRINT(", 32-bit");
394 break;
395 case PCI_MAPREG_MEM_TYPE_32BIT_1M:
396 DPRINT(", 32-bit (1M)");
397 break;
398 case PCI_MAPREG_MEM_TYPE_64BIT:
399 DPRINT(", 64-bit");
400 break;
401 }
402 DPRINT(" %sprefetchable",
403 pi->pi_mem.prefetch ? "" : "non-");
404 }
405 DPRINT("\n");
406 }
407 }
408
409 /*
410 * pci_resource_scan_bar --
411 *
412 * Determine the current BAR configuration for a given device.
413 */
414 static void
415 pci_resource_scan_bar(struct pci_resources *pr,
416 struct pci_device *pd, pcireg_t mapreg_start, pcireg_t mapreg_end,
417 bool is_ppb)
418 {
419 pci_chipset_tag_t pc = pr->pr_pc;
420 pcitag_t tag = pd->pd_tag;
421 pcireg_t mapreg = mapreg_start;
422 pcireg_t ocmd, cmd, bar[2], mask[2];
423 uint64_t addr, size;
424 struct pci_iores *pi;
425
426 if (!is_ppb) {
427 ocmd = cmd = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
428 cmd &= ~(PCI_COMMAND_MASTER_ENABLE |
429 PCI_COMMAND_MEM_ENABLE |
430 PCI_COMMAND_IO_ENABLE);
431 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, cmd);
432 }
433
434 while (mapreg < mapreg_end) {
435 u_int width = 4;
436
437 bar[0] = pci_conf_read(pc, tag, mapreg);
438 pci_conf_write(pc, tag, mapreg, 0xffffffff);
439 mask[0] = pci_conf_read(pc, tag, mapreg);
440 pci_conf_write(pc, tag, mapreg, bar[0]);
441
442 switch (PCI_MAPREG_TYPE(mask[0])) {
443 case PCI_MAPREG_TYPE_MEM:
444 switch (PCI_MAPREG_MEM_TYPE(mask[0])) {
445 case PCI_MAPREG_MEM_TYPE_32BIT:
446 case PCI_MAPREG_MEM_TYPE_32BIT_1M:
447 size = PCI_MAPREG_MEM_SIZE(mask[0]);
448 addr = PCI_MAPREG_MEM_ADDR(bar[0]);
449 break;
450 case PCI_MAPREG_MEM_TYPE_64BIT:
451 bar[1] = pci_conf_read(pc, tag, mapreg + 4);
452 pci_conf_write(pc, tag, mapreg + 4, 0xffffffff);
453 mask[1] = pci_conf_read(pc, tag, mapreg + 4);
454 pci_conf_write(pc, tag, mapreg + 4, bar[1]);
455
456 size = PCI_MAPREG_MEM64_SIZE(
457 ((uint64_t)mask[1] << 32) | mask[0]);
458 addr = PCI_MAPREG_MEM64_ADDR(
459 ((uint64_t)bar[1] << 32) | bar[0]);
460 width = 8;
461 break;
462 default:
463 size = 0;
464 }
465 if (size > 0) {
466 pi = &pd->pd_iores[pd->pd_niores++];
467 pi->pi_type = PCI_MAPREG_TYPE_MEM;
468 pi->pi_base = addr;
469 pi->pi_size = size;
470 pi->pi_bar = (mapreg - mapreg_start) / 4;
471 pi->pi_mem.memtype =
472 PCI_MAPREG_MEM_TYPE(mask[0]);
473 pi->pi_mem.prefetch =
474 PCI_MAPREG_MEM_PREFETCHABLE(mask[0]);
475 }
476 break;
477 case PCI_MAPREG_TYPE_IO:
478 size = PCI_MAPREG_IO_SIZE(mask[0] | 0xffff0000);
479 addr = PCI_MAPREG_IO_ADDR(bar[0]);
480 if (size > 0) {
481 pi = &pd->pd_iores[pd->pd_niores++];
482 pi->pi_type = PCI_MAPREG_TYPE_IO;
483 pi->pi_base = addr;
484 pi->pi_size = size;
485 pi->pi_bar = (mapreg - mapreg_start) / 4;
486 }
487 break;
488 }
489
490 KASSERT(pd->pd_niores <= PCI_MAX_IORES);
491
492 mapreg += width;
493 }
494
495 if (!is_ppb) {
496 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, ocmd);
497 }
498 }
499
500 /*
501 * pci_resource_scan_bridge --
502 *
503 * Determine the current configuration of a PCI-PCI bridge.
504 */
505 static void
506 pci_resource_scan_bridge(struct pci_resources *pr,
507 struct pci_device *pd)
508 {
509 pci_chipset_tag_t pc = pr->pr_pc;
510 pcitag_t tag = pd->pd_tag;
511 pcireg_t res, reshigh;
512 uint64_t iostart, ioend;
513 uint64_t memstart, memend;
514 uint64_t pmemstart, pmemend;
515
516 pd->pd_ppb = true;
517
518 res = pci_conf_read(pc, tag, PCI_BRIDGE_BUS_REG);
519 pd->pd_bridge.bridge_bus = res;
520 pci_resource_arena_add_range(pd->pd_bridge.ranges,
521 PCI_RANGE_BUS,
522 PCI_BRIDGE_BUS_NUM_SECONDARY(res),
523 PCI_BRIDGE_BUS_NUM_SUBORDINATE(res));
524
525 res = pci_conf_read(pc, tag, PCI_BRIDGE_STATIO_REG);
526 iostart = PCI_BRIDGE_STATIO_IOBASE_ADDR(res);
527 ioend = PCI_BRIDGE_STATIO_IOLIMIT_ADDR(res);
528 if (PCI_BRIDGE_IO_32BITS(res)) {
529 reshigh = pci_conf_read(pc, tag, PCI_BRIDGE_IOHIGH_REG);
530 iostart |= __SHIFTOUT(reshigh, PCI_BRIDGE_IOHIGH_BASE) << 16;
531 ioend |= __SHIFTOUT(reshigh, PCI_BRIDGE_IOHIGH_LIMIT) << 16;
532 }
533 if (iostart < ioend) {
534 pci_resource_arena_add_range(pd->pd_bridge.ranges,
535 PCI_RANGE_IO, iostart, ioend);
536 }
537
538 res = pci_conf_read(pc, tag, PCI_BRIDGE_MEMORY_REG);
539 memstart = PCI_BRIDGE_MEMORY_BASE_ADDR(res);
540 memend = PCI_BRIDGE_MEMORY_LIMIT_ADDR(res);
541 if (memstart < memend) {
542 pci_resource_arena_add_range(pd->pd_bridge.ranges,
543 PCI_RANGE_MEM, memstart, memend);
544 }
545
546 res = pci_conf_read(pc, tag, PCI_BRIDGE_PREFETCHMEM_REG);
547 pmemstart = PCI_BRIDGE_PREFETCHMEM_BASE_ADDR(res);
548 pmemend = PCI_BRIDGE_PREFETCHMEM_LIMIT_ADDR(res);
549 if (PCI_BRIDGE_PREFETCHMEM_64BITS(res)) {
550 reshigh = pci_conf_read(pc, tag,
551 PCI_BRIDGE_PREFETCHBASEUP32_REG);
552 pmemstart |= (uint64_t)reshigh << 32;
553 reshigh = pci_conf_read(pc, tag,
554 PCI_BRIDGE_PREFETCHLIMITUP32_REG);
555 pmemend |= (uint64_t)reshigh << 32;
556 }
557 if (pmemstart < pmemend) {
558 pci_resource_arena_add_range(pd->pd_bridge.ranges,
559 PCI_RANGE_PMEM, pmemstart, pmemend);
560 }
561 }
562
563 /*
564 * pci_resource_scan_device --
565 *
566 * Determine the current configuration of a PCI device.
567 */
568 static bool
569 pci_resource_scan_device(struct pci_resources *pr,
570 struct pci_bus *parent_bus, uint8_t devno, uint8_t funcno)
571 {
572 struct pci_device *pd;
573 pcitag_t tag;
574 pcireg_t id, bridge_bus;
575 uint8_t sec_bus;
576
577 tag = pci_make_tag(pr->pr_pc, parent_bus->pb_busno, devno, funcno);
578 id = pci_conf_read(pr->pr_pc, tag, PCI_ID_REG);
579 if (PCI_VENDOR(id) == PCI_VENDOR_INVALID) {
580 return false;
581 }
582
583 pd = PCICONF_BUS_DEVICE(parent_bus, devno, funcno);
584 pd->pd_present = true;
585 pd->pd_bus = parent_bus;
586 pd->pd_tag = tag;
587 pd->pd_devno = devno;
588 pd->pd_funcno = funcno;
589 pd->pd_id = id;
590 pd->pd_class = pci_conf_read(pr->pr_pc, tag, PCI_CLASS_REG);
591 pd->pd_bhlc = pci_conf_read(pr->pr_pc, tag, PCI_BHLC_REG);
592
593 switch (PCI_HDRTYPE_TYPE(pd->pd_bhlc)) {
594 case PCI_HDRTYPE_DEVICE:
595 pci_resource_scan_bar(pr, pd, PCI_MAPREG_START,
596 PCI_MAPREG_END, false);
597 break;
598 case PCI_HDRTYPE_PPB:
599 pci_resource_scan_bar(pr, pd, PCI_MAPREG_START,
600 PCI_MAPREG_PPB_END, true);
601 pci_resource_scan_bridge(pr, pd);
602 break;
603 }
604
605 pci_resource_device_print(pr, pd);
606
607 if (PCI_HDRTYPE_TYPE(pd->pd_bhlc) == PCI_HDRTYPE_PPB &&
608 PCI_CLASS(pd->pd_class) == PCI_CLASS_BRIDGE &&
609 PCI_SUBCLASS(pd->pd_class) == PCI_SUBCLASS_BRIDGE_PCI) {
610 bridge_bus = pci_conf_read(pr->pr_pc, tag, PCI_BRIDGE_BUS_REG);
611 sec_bus = PCI_BRIDGE_BUS_NUM_SECONDARY(bridge_bus);
612 if (sec_bus <= pr->pr_endbus) {
613 if (pci_resource_scan_bus(pr, pd, sec_bus) != 0) {
614 DPRINT("PCI: " PCI_SBDF_FMT " bus %u "
615 "already scanned (firmware bug!)\n",
616 PCI_SBDF_FMT_ARGS(pr, pd), sec_bus);
617 }
618 }
619 }
620
621 return true;
622 }
623
624 /*
625 * pci_resource_scan_bus --
626 *
627 * Enumerate devices on a bus, recursively.
628 */
629 static int
630 pci_resource_scan_bus(struct pci_resources *pr,
631 struct pci_device *bridge_dev, uint8_t busno)
632 {
633 struct pci_bus *pb;
634 uint8_t devno, funcno;
635 uint8_t nfunc;
636
637 KASSERT(busno >= pr->pr_startbus);
638 KASSERT(busno <= pr->pr_endbus);
639
640 if (PCICONF_RES_BUS(pr, busno) != NULL) {
641 /*
642 * Firmware has configured more than one bridge with the
643 * same secondary bus number.
644 */
645 return EINVAL;
646 }
647
648 pb = pci_new_bus(pr, busno, bridge_dev);
649 PCICONF_RES_BUS(pr, busno) = pb;
650
651 for (devno = 0; devno < PCI_MAX_DEVICE; devno++) {
652 if (!pci_resource_scan_device(pr, pb, devno, 0)) {
653 continue;
654 }
655 pb->pb_lastdevno = devno;
656
657 nfunc = pci_resource_device_functions(pr, busno, devno);
658 for (funcno = 1; funcno < nfunc; funcno++) {
659 pci_resource_scan_device(pr, pb, devno, funcno);
660 }
661 }
662
663 return 0;
664 }
665
666 /*
667 * pci_resource_claim --
668 *
669 * Claim a resource from a vmem arena. This is called to inform the
670 * resource manager about resources already configured by system firmware.
671 */
672 static int
673 pci_resource_claim(struct pci_resource_arena *arena,
674 vmem_addr_t start, vmem_addr_t end)
675 {
676 KASSERT(end >= start);
677
678 return vmem_xalloc(arena->vmem, end - start + 1, 0, 0, 0, start, end,
679 VM_BESTFIT | VM_NOSLEEP, NULL);
680 }
681
682 /*
683 * pci_resource_alloc --
684 *
685 * Allocate a resource from a vmem arena. This is called when configuring
686 * devices that were not already configured by system firmware.
687 */
688 static int
689 pci_resource_alloc(struct pci_resource_arena *arena, vmem_size_t size,
690 vmem_size_t align,
691 uint64_t *base)
692 {
693 vmem_addr_t addr;
694 int error;
695
696 KASSERT(size != 0);
697
698 error = vmem_xalloc(arena->vmem, size, align, 0, 0, VMEM_ADDR_MIN,
699 VMEM_ADDR_MAX, VM_BESTFIT | VM_NOSLEEP, &addr);
700 if (error == 0) {
701 *base = (uint64_t)addr;
702 }
703
704 return error;
705 }
706
707 /*
708 * pci_resource_init_device --
709 *
710 * Discover resources assigned by system firmware, notify the resource
711 * manager of these ranges, and determine if the device has additional
712 * resources that need to be allocated.
713 */
714 static void
715 pci_resource_init_device(struct pci_resources *pr,
716 struct pci_device *pd)
717 {
718 struct pci_iores *pi;
719 struct pci_bus *pb = pd->pd_bus;
720 struct pci_resource_arena *res_io = pb->pb_res[PCI_RANGE_IO];
721 struct pci_resource_arena *res_mem = pb->pb_res[PCI_RANGE_MEM];
722 struct pci_resource_arena *res_pmem = pb->pb_res[PCI_RANGE_PMEM];
723 pcireg_t cmd;
724 u_int enabled, required;
725 u_int iores;
726 int error;
727
728 KASSERT(pd->pd_present);
729
730 if (IS_TEST_DEVICE(pd)) {
731 cmd = pci_conf_read(pr->pr_pc, pd->pd_tag,
732 PCI_COMMAND_STATUS_REG);
733 cmd &= ~(PCI_COMMAND_MEM_ENABLE|PCI_COMMAND_IO_ENABLE|
734 PCI_COMMAND_MASTER_ENABLE);
735 pci_conf_write(pr->pr_pc, pd->pd_tag, PCI_COMMAND_STATUS_REG,
736 cmd);
737 }
738
739 enabled = required = 0;
740 cmd = pci_conf_read(pr->pr_pc, pd->pd_tag, PCI_COMMAND_STATUS_REG);
741 if ((cmd & PCI_COMMAND_MEM_ENABLE) != 0) {
742 enabled |= __BIT(PCI_MAPREG_TYPE_MEM);
743 }
744 if ((cmd & PCI_COMMAND_IO_ENABLE) != 0) {
745 enabled |= __BIT(PCI_MAPREG_TYPE_IO);
746 }
747
748 for (iores = 0; iores < pd->pd_niores; iores++) {
749 pi = &pd->pd_iores[iores];
750
751 required |= __BIT(pi->pi_type);
752
753 if (IS_TEST_DEVICE(pd)) {
754 pci_conf_write(pr->pr_pc, pd->pd_tag,
755 PCI_BAR(pi->pi_bar), 0);
756 continue;
757 }
758 if ((enabled & __BIT(pi->pi_type)) == 0) {
759 continue;
760 }
761
762 if (pi->pi_type == PCI_MAPREG_TYPE_IO) {
763 error = res_io == NULL ? ERANGE :
764 pci_resource_claim(res_io, pi->pi_base,
765 pi->pi_base + pi->pi_size - 1);
766 if (error) {
767 DPRINT("PCI: " PCI_SBDF_FMT " [device] io "
768 " %#" PRIx64 "-%#" PRIx64
769 " invalid (%d)\n",
770 PCI_SBDF_FMT_ARGS(pr, pd),
771 pi->pi_base,
772 pi->pi_base + pi->pi_size - 1,
773 error);
774 }
775 continue;
776 }
777
778 KASSERT(pi->pi_type == PCI_MAPREG_TYPE_MEM);
779 error = ERANGE;
780 if (pi->pi_mem.prefetch) {
781 /*
782 * Prefetchable memory must be allocated from the
783 * bridge's prefetchable region.
784 */
785 if (res_pmem != NULL) {
786 error = pci_resource_claim(res_pmem, pi->pi_base,
787 pi->pi_base + pi->pi_size - 1);
788 }
789 } else if (pi->pi_mem.memtype == PCI_MAPREG_MEM_TYPE_64BIT) {
790 /*
791 * Non-prefetchable 64-bit memory can be allocated from
792 * any range. Prefer allocations from the prefetchable
793 * region to save 32-bit only resources for 32-bit BARs.
794 */
795 if (res_pmem != NULL) {
796 error = pci_resource_claim(res_pmem, pi->pi_base,
797 pi->pi_base + pi->pi_size - 1);
798 }
799 if (error && res_mem != NULL) {
800 error = pci_resource_claim(res_mem, pi->pi_base,
801 pi->pi_base + pi->pi_size - 1);
802 }
803 } else {
804 /*
805 * Non-prefetchable 32-bit memory can be allocated from
806 * any range, provided that the range is below 4GB. Try
807 * the non-prefetchable range first, and if that fails,
808 * make one last attempt at allocating from the
809 * prefetchable range in case the platform provides
810 * memory below 4GB.
811 */
812 if (res_mem != NULL) {
813 error = pci_resource_claim(res_mem, pi->pi_base,
814 pi->pi_base + pi->pi_size - 1);
815 }
816 if (error && res_pmem != NULL) {
817 error = pci_resource_claim(res_pmem, pi->pi_base,
818 pi->pi_base + pi->pi_size - 1);
819 }
820 }
821 if (error) {
822 DPRINT("PCI: " PCI_SBDF_FMT " [device] mem"
823 " (%sprefetchable)"
824 " %#" PRIx64 "-%#" PRIx64
825 " invalid (%d)\n",
826 PCI_SBDF_FMT_ARGS(pr, pd),
827 pi->pi_mem.prefetch ? "" : "non-",
828 pi->pi_base,
829 pi->pi_base + pi->pi_size - 1,
830 error);
831 }
832 }
833
834 pd->pd_configured = (enabled & required) == required;
835
836 if (!pd->pd_configured) {
837 DPRINT("PCI: " PCI_SBDF_FMT " [device] "
838 "not configured by firmware\n",
839 PCI_SBDF_FMT_ARGS(pr, pd));
840 }
841 }
842
843 /*
844 * pci_resource_init_bus --
845 *
846 * Discover resources in use on a given bus, recursively.
847 */
848 static void
849 pci_resource_init_bus(struct pci_resources *pr, uint8_t busno)
850 {
851 struct pci_bus *pb, *parent_bus;
852 struct pci_device *pd, *bridge;
853 uint8_t devno, funcno;
854 uint8_t nfunc;
855 int error;
856
857 KASSERT(busno >= pr->pr_startbus);
858 KASSERT(busno <= pr->pr_endbus);
859
860 pb = PCICONF_RES_BUS(pr, busno);
861 bridge = pb->pb_bridge;
862
863 KASSERT(pb != NULL);
864 KASSERT((busno == pr->pr_startbus) == (bridge == NULL));
865
866 if (bridge == NULL) {
867 /* Use resources provided by firmware. */
868 PCI_RANGE_FOREACH(prtype) {
869 pb->pb_res[prtype] = pr->pr_ranges[prtype];
870 pr->pr_ranges[prtype] = NULL;
871 }
872 } else {
873 /*
874 * Using the resources configured in to the bridge by
875 * firmware, claim the resources on the parent bus and
876 * create a new vmem arena for the secondary bus.
877 */
878 KASSERT(bridge->pd_bus != NULL);
879 parent_bus = bridge->pd_bus;
880 PCI_RANGE_FOREACH(prtype) {
881 struct pci_resource_range *range;
882
883 if (parent_bus->pb_res[prtype] == NULL ||
884 bridge->pd_bridge.ranges[prtype] == NULL) {
885 continue;
886 }
887 SLIST_FOREACH(range,
888 &bridge->pd_bridge.ranges[prtype]->list,
889 entry) {
890 error = pci_resource_claim(
891 parent_bus->pb_res[prtype],
892 range->start, range->end);
893 if (error) {
894 DPRINT("PCI: " PCI_SBDF_FMT
895 " bridge (bus %u)"
896 " %-4s %#" PRIx64 "-%#" PRIx64
897 " invalid\n",
898 PCI_SBDF_FMT_ARGS(pr, bridge),
899 busno,
900 pci_resource_typename(prtype),
901 range->start, range->end);
902 continue;
903 }
904 pci_resource_arena_add_range(
905 pb->pb_res, prtype,
906 range->start, range->end);
907 KASSERT(pb->pb_res[prtype] != NULL);
908 }
909 }
910 }
911
912 for (devno = 0; devno <= pb->pb_lastdevno; devno++) {
913 KASSERT(devno < PCI_MAX_DEVICE);
914 nfunc = pci_resource_device_functions(pr, busno, devno);
915 for (funcno = 0; funcno < nfunc; funcno++) {
916 pd = PCICONF_BUS_DEVICE(pb, devno, funcno);
917 if (!pd->pd_present) {
918 continue;
919 }
920 if (pd->pd_ppb) {
921 uint8_t sec_bus = PCI_BRIDGE_BUS_NUM_SECONDARY(
922 pd->pd_bridge.bridge_bus);
923 pci_resource_init_bus(pr, sec_bus);
924 }
925 pci_resource_init_device(pr, pd);
926 }
927 }
928 }
929
930 /*
931 * pci_resource_probe --
932 *
933 * Scan for PCI devices and initialize the resource manager.
934 */
935 static void
936 pci_resource_probe(struct pci_resources *pr,
937 const struct pci_resource_info *info)
938 {
939 struct pci_resource_arena *busarena = info->ranges[PCI_RANGE_BUS];
940 struct pci_resource_range *busrange = SLIST_FIRST(&busarena->list);
941 uint8_t startbus = (uint8_t)busrange->start;
942 uint8_t endbus = (uint8_t)busrange->end;
943 u_int nbus;
944
945 KASSERT(startbus <= endbus);
946 KASSERT(pr->pr_bus == NULL);
947
948 nbus = endbus - startbus + 1;
949
950 pr->pr_pc = info->pc;
951 pr->pr_startbus = startbus;
952 pr->pr_endbus = endbus;
953 pr->pr_bus = kmem_zalloc(nbus * sizeof(struct pci_bus *), KM_SLEEP);
954 memcpy(pr->pr_ranges, info->ranges, sizeof(pr->pr_ranges));
955
956 /* Scan devices */
957 pci_resource_scan_bus(pr, NULL, pr->pr_startbus);
958
959 /*
960 * Create per-bus resource pools and remove ranges that are already
961 * in use by devices and downstream bridges.
962 */
963 pci_resource_init_bus(pr, pr->pr_startbus);
964 }
965
966 /*
967 * pci_resource_alloc_device --
968 *
969 * Attempt to allocate resources for a given device.
970 */
971 static void
972 pci_resource_alloc_device(struct pci_resources *pr, struct pci_device *pd)
973 {
974 struct pci_iores *pi;
975 struct pci_resource_arena *arena;
976 pcireg_t cmd, ocmd, base;
977 uint64_t addr;
978 u_int enabled;
979 u_int res;
980 u_int align;
981 int error;
982
983 enabled = 0;
984 ocmd = cmd = pci_conf_read(pr->pr_pc, pd->pd_tag,
985 PCI_COMMAND_STATUS_REG);
986 if ((cmd & PCI_COMMAND_MEM_ENABLE) != 0) {
987 enabled |= __BIT(PCI_MAPREG_TYPE_MEM);
988 }
989 if ((cmd & PCI_COMMAND_IO_ENABLE) != 0) {
990 enabled |= __BIT(PCI_MAPREG_TYPE_IO);
991 }
992
993 for (res = 0; res < pd->pd_niores; res++) {
994 pi = &pd->pd_iores[res];
995
996 if ((enabled & __BIT(pi->pi_type)) != 0) {
997 continue;
998 }
999
1000 if (pi->pi_type == PCI_MAPREG_TYPE_IO) {
1001 arena = pd->pd_bus->pb_res[PCI_RANGE_IO];
1002 align = uimax(pi->pi_size, 4);
1003 } else {
1004 KASSERT(pi->pi_type == PCI_MAPREG_TYPE_MEM);
1005 arena = NULL;
1006 align = uimax(pi->pi_size, 16);
1007 if (pi->pi_mem.prefetch) {
1008 arena = pd->pd_bus->pb_res[PCI_RANGE_PMEM];
1009 }
1010 if (arena == NULL) {
1011 arena = pd->pd_bus->pb_res[PCI_RANGE_MEM];
1012 }
1013 }
1014 if (arena == NULL) {
1015 DPRINT("PCI: " PCI_SBDF_FMT " BAR%u failed to"
1016 " allocate %#" PRIx64 " bytes (no arena)\n",
1017 PCI_SBDF_FMT_ARGS(pr, pd),
1018 pi->pi_bar, pi->pi_size);
1019 return;
1020 }
1021 error = pci_resource_alloc(arena, pi->pi_size, align, &addr);
1022 if (error != 0) {
1023 DPRINT("PCI: " PCI_SBDF_FMT " BAR%u failed to"
1024 " allocate %#" PRIx64 " bytes (no space)\n",
1025 PCI_SBDF_FMT_ARGS(pr, pd),
1026 pi->pi_bar, pi->pi_size);
1027 return;
1028 }
1029 DPRINT("PCI: " PCI_SBDF_FMT " BAR%u assigned range"
1030 " %#" PRIx64 "-%#" PRIx64 "\n",
1031 PCI_SBDF_FMT_ARGS(pr, pd),
1032 pi->pi_bar, addr, addr + pi->pi_size - 1);
1033
1034 if (pi->pi_type == PCI_MAPREG_TYPE_IO) {
1035 cmd |= PCI_COMMAND_IO_ENABLE;
1036 pci_conf_write(pr->pr_pc, pd->pd_tag,
1037 PCI_BAR(pi->pi_bar),
1038 PCI_MAPREG_IO_ADDR(addr) | PCI_MAPREG_TYPE_IO);
1039 } else {
1040 cmd |= PCI_COMMAND_MEM_ENABLE;
1041 base = pci_conf_read(pr->pr_pc, pd->pd_tag,
1042 PCI_BAR(pi->pi_bar));
1043 base = PCI_MAPREG_MEM_ADDR(addr) |
1044 PCI_MAPREG_MEM_TYPE(base);
1045 pci_conf_write(pr->pr_pc, pd->pd_tag,
1046 PCI_BAR(pi->pi_bar), base);
1047 if (pi->pi_mem.memtype == PCI_MAPREG_MEM_TYPE_64BIT) {
1048 base = (pcireg_t)
1049 (PCI_MAPREG_MEM64_ADDR(addr) >> 32);
1050 pci_conf_write(pr->pr_pc, pd->pd_tag,
1051 PCI_BAR(pi->pi_bar + 1), base);
1052 }
1053 }
1054 }
1055
1056 if (ocmd != cmd) {
1057 pci_conf_write(pr->pr_pc, pd->pd_tag,
1058 PCI_COMMAND_STATUS_REG, cmd);
1059 }
1060 }
1061
1062 /*
1063 * pci_resource_alloc_bus --
1064 *
1065 * Attempt to assign resources to all devices on a given bus, recursively.
1066 */
1067 static void
1068 pci_resource_alloc_bus(struct pci_resources *pr, uint8_t busno)
1069 {
1070 struct pci_bus *pb = PCICONF_RES_BUS(pr, busno);
1071 struct pci_device *pd;
1072 uint8_t devno, funcno;
1073
1074 for (devno = 0; devno <= pb->pb_lastdevno; devno++) {
1075 for (funcno = 0; funcno < 8; funcno++) {
1076 pd = PCICONF_BUS_DEVICE(pb, devno, funcno);
1077 if (!pd->pd_present) {
1078 if (funcno == 0) {
1079 break;
1080 }
1081 continue;
1082 }
1083 if (!pd->pd_configured) {
1084 pci_resource_alloc_device(pr, pd);
1085 }
1086 if (pd->pd_ppb) {
1087 uint8_t sec_bus = PCI_BRIDGE_BUS_NUM_SECONDARY(
1088 pd->pd_bridge.bridge_bus);
1089 pci_resource_alloc_bus(pr, sec_bus);
1090 }
1091 }
1092 }
1093 }
1094
1095 /*
1096 * pci_resource_init --
1097 *
1098 * Public interface to PCI resource manager. Scans for available devices
1099 * and assigns resources.
1100 */
1101 void
1102 pci_resource_init(const struct pci_resource_info *info)
1103 {
1104 struct pci_resources pr = {};
1105
1106 if (info->ranges[PCI_RANGE_BUS] == NULL) {
1107 aprint_error("PCI: no buses\n");
1108 return;
1109 }
1110 KASSERT(!SLIST_EMPTY(&info->ranges[PCI_RANGE_BUS]->list));
1111 pci_resource_probe(&pr, info);
1112 pci_resource_alloc_bus(&pr, pr.pr_startbus);
1113 }
1114
1115 /*
1116 * pci_resource_typename --
1117 *
1118 * Return a string description of a PCI range type.
1119 */
1120 const char *
1121 pci_resource_typename(enum pci_range_type prtype)
1122 {
1123 KASSERT(prtype < NUM_PCI_RANGES);
1124 return pci_range_typenames[prtype];
1125 }
1126