pci_resource.c revision 1.7 1 /* $NetBSD: pci_resource.c,v 1.7 2025/03/03 19:38:43 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2022 Jared McNeill <jmcneill (at) invisible.ca>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 /*
30 * pci_resource.c --
31 *
32 * Scan current PCI resource allocations and attempt to assign resources
33 * to devices that are not configured WITHOUT changing any configuration
34 * performed by system firmware.
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: pci_resource.c,v 1.7 2025/03/03 19:38:43 riastradh Exp $");
39
40 #include <sys/param.h>
41 #include <sys/types.h>
42
43 #include <sys/bus.h>
44 #include <sys/kmem.h>
45 #include <sys/queue.h>
46 #include <sys/systm.h>
47 #include <sys/vmem.h>
48
49 #include <dev/pci/pcireg.h>
50 #include <dev/pci/pcivar.h>
51 #include <dev/pci/pcidevs.h>
52 #include <dev/pci/pci_resource.h>
53
54 #define DPRINT aprint_debug
55
56 #if defined(PCI_RESOURCE_TEST_VENDOR_ID) && \
57 defined(PCI_RESOURCE_TEST_PRODUCT_ID)
58 #define IS_TEST_DEVICE(_pd) \
59 (PCI_VENDOR(pd->pd_id) == PCI_RESOURCE_TEST_VENDOR_ID && \
60 PCI_PRODUCT(pd->pd_id) == PCI_RESOURCE_TEST_PRODUCT_ID)
61 #else
62 #define IS_TEST_DEVICE(_pd) 0
63 #endif
64
65 #define PCI_MAX_DEVICE 32
66 #define PCI_MAX_FUNC 8
67
68 #define PCI_MAX_IORES 6
69
70 #define PCI_RANGE_FOREACH(_type) \
71 for (u_int _type = PCI_RANGE_BUS; _type < NUM_PCI_RANGES; _type++)
72
73 static const char *pci_range_typenames[NUM_PCI_RANGES] = {
74 [PCI_RANGE_BUS] = "bus",
75 [PCI_RANGE_IO] = "io",
76 [PCI_RANGE_MEM] = "mem",
77 [PCI_RANGE_PMEM] = "pmem",
78 };
79
80 struct pci_bus;
81
82 struct pci_iores {
83 uint64_t pi_base; /* Base address */
84 uint64_t pi_size; /* Resource size */
85 uint8_t pi_type; /* PCI_MAPREG_TYPE_* */
86 u_int pi_bar; /* PCI bar number */
87 union {
88 struct {
89 uint8_t memtype;
90 bool prefetch;
91 } pi_mem;
92 };
93 };
94
95 struct pci_device {
96 bool pd_present; /* Device is present */
97 bool pd_configured; /* Device is configured */
98 struct pci_bus *pd_bus; /* Parent bus */
99 uint8_t pd_devno; /* Device number */
100 uint8_t pd_funcno; /* Function number */
101 pcitag_t pd_tag; /* PCI tag */
102
103 pcireg_t pd_id; /* Vendor ID, Device ID */
104 pcireg_t pd_class; /* Revision ID, Class Code */
105 pcireg_t pd_bhlc; /* BIST, Header Type, Primary Latency
106 * Timer, Cache Line Size */
107
108 struct pci_iores pd_iores[PCI_MAX_IORES];
109 u_int pd_niores;
110
111 bool pd_ppb; /* PCI-PCI bridge */
112 union {
113 struct {
114 pcireg_t bridge_bus;
115 struct pci_resource_arena *ranges[NUM_PCI_RANGES];
116 } pd_bridge;
117 };
118 };
119
120 struct pci_bus {
121 uint8_t pb_busno; /* Bus number */
122 struct pci_device *pb_bridge; /* Parent bridge, or NULL */
123
124 struct pci_device pb_device[PCI_MAX_DEVICE * PCI_MAX_FUNC];
125 /* Devices on bus */
126 u_int pb_lastdevno; /* Last device found */
127
128 /* XXX Nothing seems to use pb_ranges? */
129 struct pci_resource_arena *pb_ranges[NUM_PCI_RANGES];
130 struct pci_resource_arena *pb_res[NUM_PCI_RANGES];
131 };
132
133 struct pci_resources {
134 struct pci_bus **pr_bus; /* Bus list */
135 pci_chipset_tag_t pr_pc; /* Chipset tag */
136 uint8_t pr_startbus; /* First bus number */
137 struct pci_resource_arena *pr_busranges;
138
139 struct pci_resource_arena *pr_ranges[NUM_PCI_RANGES];
140 };
141
142 struct pci_resource_arena {
143 vmem_t *vmem;
144 SIMPLEQ_HEAD(, pci_resource_range) list;
145 };
146
147 struct pci_resource_range {
148 uint64_t start;
149 uint64_t end;
150 SIMPLEQ_ENTRY(pci_resource_range) entry;
151 };
152
153 static int pci_resource_scan_bus(struct pci_resources *,
154 struct pci_device *, uint8_t);
155
156 #define PCI_SBDF_FMT "%04x:%02x:%02x.%u"
157 #define PCI_SBDF_FMT_ARGS(_pr, _pd) \
158 pci_get_segment((_pr)->pr_pc), \
159 (_pd)->pd_bus->pb_busno, \
160 (_pd)->pd_devno, \
161 (_pd)->pd_funcno
162
163 #define PCICONF_RES_BUS(_pr, _busno) \
164 ((_pr)->pr_bus[(_busno) - (_pr)->pr_startbus])
165 #define PCICONF_BUS_DEVICE(_pb, _devno, _funcno) \
166 (&(_pb)->pb_device[(_devno) * PCI_MAX_FUNC + (_funcno)])
167
168 static bool
169 pci_bus_in_range(struct pci_resources *pr, int busno)
170 {
171 struct pci_resource_range *range;
172
173 SIMPLEQ_FOREACH(range, &pr->pr_busranges->list, entry) {
174 if (busno >= range->start && busno <= range->end)
175 return true;
176 }
177 return false;
178 }
179
180 static void
181 pci_resource_arena_add_range(struct pci_resource_arena **arenas,
182 enum pci_range_type type, uint64_t start, uint64_t end)
183 {
184 struct pci_resource_arena *arena;
185 struct pci_resource_range *new, *range, *prev;
186 int error;
187
188 KASSERTMSG(start <= end, "type=%d start=%" PRIu64 " end=%" PRIu64,
189 type, start, end);
190
191 /*
192 * Warn if this is a bus range and the start/end are bad. The
193 * other types of ranges can have larger addresses.
194 */
195 if (type == PCI_RANGE_BUS &&
196 (start > UINT8_MAX || end > UINT8_MAX)) {
197 aprint_error("PCI: unexpected bus range"
198 " %" PRIu64 "-%" PRIu64 ", ignoring\n",
199 start, end);
200 return;
201 }
202
203 /*
204 * Create an arena if we haven't already.
205 */
206 if ((arena = arenas[type]) == NULL) {
207 arena = arenas[type] = kmem_zalloc(sizeof(*arenas[type]),
208 KM_SLEEP);
209 arena->vmem = vmem_create(pci_resource_typename(type),
210 0, 0, 1, NULL, NULL, NULL, 0, VM_SLEEP, IPL_NONE);
211 SIMPLEQ_INIT(&arena->list);
212 }
213
214 /*
215 * Reserve the range in the vmem for allocation. If there's
216 * already an overlapping range, just drop this one.
217 */
218 error = vmem_add(arena->vmem, start, end - start + 1, VM_SLEEP);
219 if (error) {
220 /* XXX show some more context */
221 aprint_error("overlapping %s range: %#" PRIx64 "-%#" PRIx64 ","
222 " discarding\n",
223 pci_resource_typename(type), start, end);
224 return;
225 }
226
227 /*
228 * Add an entry to the list so we can iterate over them, in
229 * ascending address order for the sake of legible printing.
230 * (We don't expect to have so many entries that the linear
231 * time of insertion will cause trouble.)
232 */
233 new = kmem_zalloc(sizeof(*new), KM_SLEEP);
234 new->start = start;
235 new->end = end;
236 prev = NULL;
237 SIMPLEQ_FOREACH(range, &arena->list, entry) {
238 if (new->start < range->start)
239 break;
240 KASSERT(new->start > range->end);
241 prev = range;
242 }
243 if (prev) {
244 SIMPLEQ_INSERT_AFTER(&arena->list, prev, new, entry);
245 } else {
246 SIMPLEQ_INSERT_HEAD(&arena->list, new, entry);
247 }
248 }
249
250 /*
251 * pci_resource_add_range --
252 *
253 * Add a contiguous range of addresses (inclusive of both bounds) for
254 * the specified type of resource.
255 */
256 void
257 pci_resource_add_range(struct pci_resource_info *info,
258 enum pci_range_type type, uint64_t start, uint64_t end)
259 {
260
261 pci_resource_arena_add_range(info->ranges, type, start, end);
262 }
263
264 /*
265 * pci_new_bus --
266 *
267 * Create a new PCI bus and initialize its resource ranges.
268 */
269 static struct pci_bus *
270 pci_new_bus(struct pci_resources *pr, uint8_t busno, struct pci_device *bridge)
271 {
272 struct pci_bus *pb;
273 struct pci_resource_arena **ranges;
274
275 pb = kmem_zalloc(sizeof(*pb), KM_SLEEP);
276 pb->pb_busno = busno;
277 pb->pb_bridge = bridge;
278 if (bridge == NULL) {
279 /*
280 * No additional constraints on resource allocations for
281 * the root bus.
282 */
283 ranges = pr->pr_ranges;
284 } else {
285 /*
286 * Resource allocations for this bus are constrained by the
287 * bridge forwarding settings.
288 */
289 ranges = bridge->pd_bridge.ranges;
290 }
291 memcpy(pb->pb_ranges, ranges, sizeof(pb->pb_ranges));
292
293 return pb;
294 }
295
296 /*
297 * pci_resource_device_functions --
298 *
299 * Returns the number of PCI functions for a a given bus and device.
300 */
301 static uint8_t
302 pci_resource_device_functions(struct pci_resources *pr,
303 uint8_t busno, uint8_t devno)
304 {
305 struct pci_bus *pb;
306 struct pci_device *pd;
307
308 pb = PCICONF_RES_BUS(pr, busno);
309 pd = PCICONF_BUS_DEVICE(pb, devno, 0);
310 if (!pd->pd_present) {
311 return 0;
312 }
313
314 return PCI_HDRTYPE_MULTIFN(pd->pd_bhlc) ? 8 : 1;
315 }
316
317 /*
318 * pci_resource_device_print --
319 *
320 * Log details about a device.
321 */
322 static void
323 pci_resource_device_print(struct pci_resources *pr,
324 struct pci_device *pd)
325 {
326 struct pci_iores *pi;
327 struct pci_resource_range *range;
328 u_int res;
329
330 DPRINT("PCI: " PCI_SBDF_FMT " %04x:%04x %02x 0x%06x",
331 PCI_SBDF_FMT_ARGS(pr, pd),
332 PCI_VENDOR(pd->pd_id), PCI_PRODUCT(pd->pd_id),
333 PCI_REVISION(pd->pd_class), (pd->pd_class >> 8) & 0xffffff);
334
335 switch (PCI_HDRTYPE_TYPE(pd->pd_bhlc)) {
336 case PCI_HDRTYPE_DEVICE:
337 DPRINT(" (device)\n");
338 break;
339 case PCI_HDRTYPE_PPB:
340 DPRINT(" (bridge %u -> %u-%u)\n",
341 PCI_BRIDGE_BUS_NUM_PRIMARY(pd->pd_bridge.bridge_bus),
342 PCI_BRIDGE_BUS_NUM_SECONDARY(pd->pd_bridge.bridge_bus),
343 PCI_BRIDGE_BUS_NUM_SUBORDINATE(pd->pd_bridge.bridge_bus));
344
345 if (pd->pd_bridge.ranges[PCI_RANGE_IO]) {
346 SIMPLEQ_FOREACH(range,
347 &pd->pd_bridge.ranges[PCI_RANGE_IO]->list,
348 entry) {
349 DPRINT("PCI: " PCI_SBDF_FMT
350 " [bridge] window io "
351 " %#" PRIx64 "-%#" PRIx64
352 "\n",
353 PCI_SBDF_FMT_ARGS(pr, pd),
354 range->start,
355 range->end);
356 }
357 }
358 if (pd->pd_bridge.ranges[PCI_RANGE_MEM]) {
359 SIMPLEQ_FOREACH(range,
360 &pd->pd_bridge.ranges[PCI_RANGE_MEM]->list,
361 entry) {
362 DPRINT("PCI: " PCI_SBDF_FMT
363 " [bridge] window mem"
364 " %#" PRIx64 "-%#" PRIx64
365 " (non-prefetchable)\n",
366 PCI_SBDF_FMT_ARGS(pr, pd),
367 range->start,
368 range->end);
369 }
370 }
371 if (pd->pd_bridge.ranges[PCI_RANGE_PMEM]) {
372 SIMPLEQ_FOREACH(range,
373 &pd->pd_bridge.ranges[PCI_RANGE_PMEM]->list,
374 entry) {
375 DPRINT("PCI: " PCI_SBDF_FMT
376 " [bridge] window mem"
377 " %#" PRIx64 "-%#" PRIx64
378 " (prefetchable)\n",
379 PCI_SBDF_FMT_ARGS(pr, pd),
380 range->start,
381 range->end);
382 }
383 }
384
385 break;
386 default:
387 DPRINT(" (0x%02x)\n", PCI_HDRTYPE_TYPE(pd->pd_bhlc));
388 }
389
390 for (res = 0; res < pd->pd_niores; res++) {
391 pi = &pd->pd_iores[res];
392
393 DPRINT("PCI: " PCI_SBDF_FMT
394 " [device] resource BAR%u: %s @ %#" PRIx64 " size %#"
395 PRIx64,
396 PCI_SBDF_FMT_ARGS(pr, pd), pi->pi_bar,
397 pi->pi_type == PCI_MAPREG_TYPE_MEM ? "mem" : "io ",
398 pi->pi_base, pi->pi_size);
399
400 if (pi->pi_type == PCI_MAPREG_TYPE_MEM) {
401 switch (pi->pi_mem.memtype) {
402 case PCI_MAPREG_MEM_TYPE_32BIT:
403 DPRINT(", 32-bit");
404 break;
405 case PCI_MAPREG_MEM_TYPE_32BIT_1M:
406 DPRINT(", 32-bit (1M)");
407 break;
408 case PCI_MAPREG_MEM_TYPE_64BIT:
409 DPRINT(", 64-bit");
410 break;
411 }
412 DPRINT(" %sprefetchable",
413 pi->pi_mem.prefetch ? "" : "non-");
414 }
415 DPRINT("\n");
416 }
417 }
418
419 /*
420 * pci_resource_scan_bar --
421 *
422 * Determine the current BAR configuration for a given device.
423 */
424 static void
425 pci_resource_scan_bar(struct pci_resources *pr,
426 struct pci_device *pd, pcireg_t mapreg_start, pcireg_t mapreg_end,
427 bool is_ppb)
428 {
429 pci_chipset_tag_t pc = pr->pr_pc;
430 pcitag_t tag = pd->pd_tag;
431 pcireg_t mapreg = mapreg_start;
432 pcireg_t ocmd, cmd, bar[2], mask[2];
433 uint64_t addr, size;
434 struct pci_iores *pi;
435
436 if (!is_ppb) {
437 ocmd = cmd = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
438 cmd &= ~(PCI_COMMAND_MASTER_ENABLE |
439 PCI_COMMAND_MEM_ENABLE |
440 PCI_COMMAND_IO_ENABLE);
441 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, cmd);
442 }
443
444 while (mapreg < mapreg_end) {
445 u_int width = 4;
446
447 bar[0] = pci_conf_read(pc, tag, mapreg);
448 pci_conf_write(pc, tag, mapreg, 0xffffffff);
449 mask[0] = pci_conf_read(pc, tag, mapreg);
450 pci_conf_write(pc, tag, mapreg, bar[0]);
451
452 switch (PCI_MAPREG_TYPE(mask[0])) {
453 case PCI_MAPREG_TYPE_MEM:
454 switch (PCI_MAPREG_MEM_TYPE(mask[0])) {
455 case PCI_MAPREG_MEM_TYPE_32BIT:
456 case PCI_MAPREG_MEM_TYPE_32BIT_1M:
457 size = PCI_MAPREG_MEM_SIZE(mask[0]);
458 addr = PCI_MAPREG_MEM_ADDR(bar[0]);
459 break;
460 case PCI_MAPREG_MEM_TYPE_64BIT:
461 bar[1] = pci_conf_read(pc, tag, mapreg + 4);
462 pci_conf_write(pc, tag, mapreg + 4, 0xffffffff);
463 mask[1] = pci_conf_read(pc, tag, mapreg + 4);
464 pci_conf_write(pc, tag, mapreg + 4, bar[1]);
465
466 size = PCI_MAPREG_MEM64_SIZE(
467 ((uint64_t)mask[1] << 32) | mask[0]);
468 addr = PCI_MAPREG_MEM64_ADDR(
469 ((uint64_t)bar[1] << 32) | bar[0]);
470 width = 8;
471 break;
472 default:
473 size = 0;
474 }
475 if (size > 0) {
476 pi = &pd->pd_iores[pd->pd_niores++];
477 pi->pi_type = PCI_MAPREG_TYPE_MEM;
478 pi->pi_base = addr;
479 pi->pi_size = size;
480 pi->pi_bar = (mapreg - mapreg_start) / 4;
481 pi->pi_mem.memtype =
482 PCI_MAPREG_MEM_TYPE(mask[0]);
483 pi->pi_mem.prefetch =
484 PCI_MAPREG_MEM_PREFETCHABLE(mask[0]);
485 }
486 break;
487 case PCI_MAPREG_TYPE_IO:
488 size = PCI_MAPREG_IO_SIZE(mask[0] | 0xffff0000);
489 addr = PCI_MAPREG_IO_ADDR(bar[0]);
490 if (size > 0) {
491 pi = &pd->pd_iores[pd->pd_niores++];
492 pi->pi_type = PCI_MAPREG_TYPE_IO;
493 pi->pi_base = addr;
494 pi->pi_size = size;
495 pi->pi_bar = (mapreg - mapreg_start) / 4;
496 }
497 break;
498 }
499
500 KASSERT(pd->pd_niores <= PCI_MAX_IORES);
501
502 mapreg += width;
503 }
504
505 if (!is_ppb) {
506 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, ocmd);
507 }
508 }
509
510 /*
511 * pci_resource_scan_bridge --
512 *
513 * Determine the current configuration of a PCI-PCI bridge.
514 */
515 static void
516 pci_resource_scan_bridge(struct pci_resources *pr,
517 struct pci_device *pd)
518 {
519 pci_chipset_tag_t pc = pr->pr_pc;
520 pcitag_t tag = pd->pd_tag;
521 pcireg_t res, reshigh;
522 uint64_t iostart, ioend;
523 uint64_t memstart, memend;
524 uint64_t pmemstart, pmemend;
525
526 pd->pd_ppb = true;
527
528 res = pci_conf_read(pc, tag, PCI_BRIDGE_BUS_REG);
529 pd->pd_bridge.bridge_bus = res;
530 pci_resource_arena_add_range(pd->pd_bridge.ranges,
531 PCI_RANGE_BUS,
532 PCI_BRIDGE_BUS_NUM_SECONDARY(res),
533 PCI_BRIDGE_BUS_NUM_SUBORDINATE(res));
534
535 res = pci_conf_read(pc, tag, PCI_BRIDGE_STATIO_REG);
536 iostart = PCI_BRIDGE_STATIO_IOBASE_ADDR(res);
537 ioend = PCI_BRIDGE_STATIO_IOLIMIT_ADDR(res);
538 if (PCI_BRIDGE_IO_32BITS(res)) {
539 reshigh = pci_conf_read(pc, tag, PCI_BRIDGE_IOHIGH_REG);
540 iostart |= __SHIFTOUT(reshigh, PCI_BRIDGE_IOHIGH_BASE) << 16;
541 ioend |= __SHIFTOUT(reshigh, PCI_BRIDGE_IOHIGH_LIMIT) << 16;
542 }
543 if (iostart < ioend) {
544 pci_resource_arena_add_range(pd->pd_bridge.ranges,
545 PCI_RANGE_IO, iostart, ioend);
546 }
547
548 res = pci_conf_read(pc, tag, PCI_BRIDGE_MEMORY_REG);
549 memstart = PCI_BRIDGE_MEMORY_BASE_ADDR(res);
550 memend = PCI_BRIDGE_MEMORY_LIMIT_ADDR(res);
551 if (memstart < memend) {
552 pci_resource_arena_add_range(pd->pd_bridge.ranges,
553 PCI_RANGE_MEM, memstart, memend);
554 }
555
556 res = pci_conf_read(pc, tag, PCI_BRIDGE_PREFETCHMEM_REG);
557 pmemstart = PCI_BRIDGE_PREFETCHMEM_BASE_ADDR(res);
558 pmemend = PCI_BRIDGE_PREFETCHMEM_LIMIT_ADDR(res);
559 if (PCI_BRIDGE_PREFETCHMEM_64BITS(res)) {
560 reshigh = pci_conf_read(pc, tag,
561 PCI_BRIDGE_PREFETCHBASEUP32_REG);
562 pmemstart |= (uint64_t)reshigh << 32;
563 reshigh = pci_conf_read(pc, tag,
564 PCI_BRIDGE_PREFETCHLIMITUP32_REG);
565 pmemend |= (uint64_t)reshigh << 32;
566 }
567 if (pmemstart < pmemend) {
568 pci_resource_arena_add_range(pd->pd_bridge.ranges,
569 PCI_RANGE_PMEM, pmemstart, pmemend);
570 }
571 }
572
573 /*
574 * pci_resource_scan_device --
575 *
576 * Determine the current configuration of a PCI device.
577 */
578 static bool
579 pci_resource_scan_device(struct pci_resources *pr,
580 struct pci_bus *parent_bus, uint8_t devno, uint8_t funcno)
581 {
582 struct pci_device *pd;
583 pcitag_t tag;
584 pcireg_t id, bridge_bus;
585 uint8_t sec_bus;
586
587 tag = pci_make_tag(pr->pr_pc, parent_bus->pb_busno, devno, funcno);
588 id = pci_conf_read(pr->pr_pc, tag, PCI_ID_REG);
589 if (PCI_VENDOR(id) == PCI_VENDOR_INVALID) {
590 return false;
591 }
592
593 pd = PCICONF_BUS_DEVICE(parent_bus, devno, funcno);
594 pd->pd_present = true;
595 pd->pd_bus = parent_bus;
596 pd->pd_tag = tag;
597 pd->pd_devno = devno;
598 pd->pd_funcno = funcno;
599 pd->pd_id = id;
600 pd->pd_class = pci_conf_read(pr->pr_pc, tag, PCI_CLASS_REG);
601 pd->pd_bhlc = pci_conf_read(pr->pr_pc, tag, PCI_BHLC_REG);
602
603 switch (PCI_HDRTYPE_TYPE(pd->pd_bhlc)) {
604 case PCI_HDRTYPE_DEVICE:
605 pci_resource_scan_bar(pr, pd, PCI_MAPREG_START,
606 PCI_MAPREG_END, false);
607 break;
608 case PCI_HDRTYPE_PPB:
609 pci_resource_scan_bar(pr, pd, PCI_MAPREG_START,
610 PCI_MAPREG_PPB_END, true);
611 pci_resource_scan_bridge(pr, pd);
612 break;
613 }
614
615 pci_resource_device_print(pr, pd);
616
617 if (PCI_HDRTYPE_TYPE(pd->pd_bhlc) == PCI_HDRTYPE_PPB &&
618 PCI_CLASS(pd->pd_class) == PCI_CLASS_BRIDGE &&
619 PCI_SUBCLASS(pd->pd_class) == PCI_SUBCLASS_BRIDGE_PCI) {
620 bridge_bus = pci_conf_read(pr->pr_pc, tag, PCI_BRIDGE_BUS_REG);
621 sec_bus = PCI_BRIDGE_BUS_NUM_SECONDARY(bridge_bus);
622 if (pci_bus_in_range(pr, sec_bus)) {
623 if (pci_resource_scan_bus(pr, pd, sec_bus) != 0) {
624 DPRINT("PCI: " PCI_SBDF_FMT " bus %u "
625 "already scanned (firmware bug!)\n",
626 PCI_SBDF_FMT_ARGS(pr, pd), sec_bus);
627 }
628 } else {
629 DPRINT("PCI: " PCI_SBDF_FMT " bus %u "
630 "out of range (firmware bug!)\n",
631 PCI_SBDF_FMT_ARGS(pr, pd), sec_bus);
632 }
633 }
634
635 return true;
636 }
637
638 /*
639 * pci_resource_scan_bus --
640 *
641 * Enumerate devices on a bus, recursively.
642 */
643 static int
644 pci_resource_scan_bus(struct pci_resources *pr,
645 struct pci_device *bridge_dev, uint8_t busno)
646 {
647 struct pci_bus *pb;
648 uint8_t devno, funcno;
649 uint8_t nfunc;
650
651 KASSERT(busno >= pr->pr_startbus);
652 KASSERT(pci_bus_in_range(pr, busno));
653
654 if (PCICONF_RES_BUS(pr, busno) != NULL) {
655 /*
656 * Firmware has configured more than one bridge with the
657 * same secondary bus number.
658 */
659 return EINVAL;
660 }
661
662 pb = pci_new_bus(pr, busno, bridge_dev);
663 PCICONF_RES_BUS(pr, busno) = pb;
664
665 for (devno = 0; devno < PCI_MAX_DEVICE; devno++) {
666 if (!pci_resource_scan_device(pr, pb, devno, 0)) {
667 continue;
668 }
669 pb->pb_lastdevno = devno;
670
671 nfunc = pci_resource_device_functions(pr, busno, devno);
672 for (funcno = 1; funcno < nfunc; funcno++) {
673 pci_resource_scan_device(pr, pb, devno, funcno);
674 }
675 }
676
677 return 0;
678 }
679
680 /*
681 * pci_resource_claim --
682 *
683 * Claim a resource from a vmem arena. This is called to inform the
684 * resource manager about resources already configured by system firmware.
685 */
686 static int
687 pci_resource_claim(struct pci_resource_arena *arena,
688 vmem_addr_t start, vmem_addr_t end)
689 {
690 KASSERT(end >= start);
691
692 return vmem_xalloc(arena->vmem, end - start + 1, 0, 0, 0, start, end,
693 VM_BESTFIT | VM_NOSLEEP, NULL);
694 }
695
696 /*
697 * pci_resource_alloc --
698 *
699 * Allocate a resource from a vmem arena. This is called when configuring
700 * devices that were not already configured by system firmware.
701 */
702 static int
703 pci_resource_alloc(struct pci_resource_arena *arena, vmem_size_t size,
704 vmem_size_t align,
705 uint64_t *base)
706 {
707 vmem_addr_t addr;
708 int error;
709
710 KASSERT(size != 0);
711
712 error = vmem_xalloc(arena->vmem, size, align, 0, 0, VMEM_ADDR_MIN,
713 VMEM_ADDR_MAX, VM_BESTFIT | VM_NOSLEEP, &addr);
714 if (error == 0) {
715 *base = (uint64_t)addr;
716 }
717
718 return error;
719 }
720
721 /*
722 * pci_resource_init_device --
723 *
724 * Discover resources assigned by system firmware, notify the resource
725 * manager of these ranges, and determine if the device has additional
726 * resources that need to be allocated.
727 */
728 static void
729 pci_resource_init_device(struct pci_resources *pr,
730 struct pci_device *pd)
731 {
732 struct pci_iores *pi;
733 struct pci_bus *pb = pd->pd_bus;
734 struct pci_resource_arena *res_io = pb->pb_res[PCI_RANGE_IO];
735 struct pci_resource_arena *res_mem = pb->pb_res[PCI_RANGE_MEM];
736 struct pci_resource_arena *res_pmem = pb->pb_res[PCI_RANGE_PMEM];
737 pcireg_t cmd;
738 u_int enabled, required;
739 u_int iores;
740 int error;
741
742 KASSERT(pd->pd_present);
743
744 if (IS_TEST_DEVICE(pd)) {
745 cmd = pci_conf_read(pr->pr_pc, pd->pd_tag,
746 PCI_COMMAND_STATUS_REG);
747 cmd &= ~(PCI_COMMAND_MEM_ENABLE|PCI_COMMAND_IO_ENABLE|
748 PCI_COMMAND_MASTER_ENABLE);
749 pci_conf_write(pr->pr_pc, pd->pd_tag, PCI_COMMAND_STATUS_REG,
750 cmd);
751 }
752
753 enabled = required = 0;
754 cmd = pci_conf_read(pr->pr_pc, pd->pd_tag, PCI_COMMAND_STATUS_REG);
755 if ((cmd & PCI_COMMAND_MEM_ENABLE) != 0) {
756 enabled |= __BIT(PCI_MAPREG_TYPE_MEM);
757 }
758 if ((cmd & PCI_COMMAND_IO_ENABLE) != 0) {
759 enabled |= __BIT(PCI_MAPREG_TYPE_IO);
760 }
761
762 for (iores = 0; iores < pd->pd_niores; iores++) {
763 pi = &pd->pd_iores[iores];
764
765 required |= __BIT(pi->pi_type);
766
767 if (IS_TEST_DEVICE(pd)) {
768 pci_conf_write(pr->pr_pc, pd->pd_tag,
769 PCI_BAR(pi->pi_bar), 0);
770 continue;
771 }
772 if ((enabled & __BIT(pi->pi_type)) == 0) {
773 continue;
774 }
775
776 if (pi->pi_type == PCI_MAPREG_TYPE_IO) {
777 error = res_io == NULL ? ERANGE :
778 pci_resource_claim(res_io, pi->pi_base,
779 pi->pi_base + pi->pi_size - 1);
780 if (error) {
781 DPRINT("PCI: " PCI_SBDF_FMT " [device] io "
782 " %#" PRIx64 "-%#" PRIx64
783 " invalid (%d)\n",
784 PCI_SBDF_FMT_ARGS(pr, pd),
785 pi->pi_base,
786 pi->pi_base + pi->pi_size - 1,
787 error);
788 }
789 continue;
790 }
791
792 KASSERT(pi->pi_type == PCI_MAPREG_TYPE_MEM);
793 error = ERANGE;
794 if (pi->pi_mem.prefetch) {
795 /*
796 * Prefetchable memory must be allocated from the
797 * bridge's prefetchable region.
798 */
799 if (res_pmem != NULL) {
800 error = pci_resource_claim(res_pmem, pi->pi_base,
801 pi->pi_base + pi->pi_size - 1);
802 }
803 } else if (pi->pi_mem.memtype == PCI_MAPREG_MEM_TYPE_64BIT) {
804 /*
805 * Non-prefetchable 64-bit memory can be allocated from
806 * any range. Prefer allocations from the prefetchable
807 * region to save 32-bit only resources for 32-bit BARs.
808 */
809 if (res_pmem != NULL) {
810 error = pci_resource_claim(res_pmem, pi->pi_base,
811 pi->pi_base + pi->pi_size - 1);
812 }
813 if (error && res_mem != NULL) {
814 error = pci_resource_claim(res_mem, pi->pi_base,
815 pi->pi_base + pi->pi_size - 1);
816 }
817 } else {
818 /*
819 * Non-prefetchable 32-bit memory can be allocated from
820 * any range, provided that the range is below 4GB. Try
821 * the non-prefetchable range first, and if that fails,
822 * make one last attempt at allocating from the
823 * prefetchable range in case the platform provides
824 * memory below 4GB.
825 */
826 if (res_mem != NULL) {
827 error = pci_resource_claim(res_mem, pi->pi_base,
828 pi->pi_base + pi->pi_size - 1);
829 }
830 if (error && res_pmem != NULL) {
831 error = pci_resource_claim(res_pmem, pi->pi_base,
832 pi->pi_base + pi->pi_size - 1);
833 }
834 }
835 if (error) {
836 DPRINT("PCI: " PCI_SBDF_FMT " [device] mem"
837 " (%sprefetchable)"
838 " %#" PRIx64 "-%#" PRIx64
839 " invalid (%d)\n",
840 PCI_SBDF_FMT_ARGS(pr, pd),
841 pi->pi_mem.prefetch ? "" : "non-",
842 pi->pi_base,
843 pi->pi_base + pi->pi_size - 1,
844 error);
845 }
846 }
847
848 pd->pd_configured = (enabled & required) == required;
849
850 if (!pd->pd_configured) {
851 DPRINT("PCI: " PCI_SBDF_FMT " [device] "
852 "not configured by firmware\n",
853 PCI_SBDF_FMT_ARGS(pr, pd));
854 }
855 }
856
857 /*
858 * pci_resource_init_bus --
859 *
860 * Discover resources in use on a given bus, recursively.
861 */
862 static void
863 pci_resource_init_bus(struct pci_resources *pr, uint8_t busno)
864 {
865 struct pci_bus *pb, *parent_bus;
866 struct pci_device *pd, *bridge;
867 uint8_t devno, funcno;
868 uint8_t nfunc;
869 int error;
870
871 KASSERT(busno >= pr->pr_startbus);
872 KASSERT(pci_bus_in_range(pr, busno));
873
874 pb = PCICONF_RES_BUS(pr, busno);
875 bridge = pb->pb_bridge;
876
877 KASSERT(pb != NULL);
878 KASSERT((busno == pr->pr_startbus) == (bridge == NULL));
879
880 if (bridge == NULL) {
881 /* Use resources provided by firmware. */
882 PCI_RANGE_FOREACH(prtype) {
883 pb->pb_res[prtype] = pr->pr_ranges[prtype];
884 pr->pr_ranges[prtype] = NULL;
885 }
886 } else {
887 /*
888 * Using the resources configured in to the bridge by
889 * firmware, claim the resources on the parent bus and
890 * create a new vmem arena for the secondary bus.
891 */
892 KASSERT(bridge->pd_bus != NULL);
893 parent_bus = bridge->pd_bus;
894 PCI_RANGE_FOREACH(prtype) {
895 struct pci_resource_range *range;
896
897 if (parent_bus->pb_res[prtype] == NULL ||
898 bridge->pd_bridge.ranges[prtype] == NULL) {
899 continue;
900 }
901 SIMPLEQ_FOREACH(range,
902 &bridge->pd_bridge.ranges[prtype]->list,
903 entry) {
904 error = pci_resource_claim(
905 parent_bus->pb_res[prtype],
906 range->start, range->end);
907 if (error) {
908 DPRINT("PCI: " PCI_SBDF_FMT
909 " bridge (bus %u)"
910 " %-4s %#" PRIx64 "-%#" PRIx64
911 " invalid\n",
912 PCI_SBDF_FMT_ARGS(pr, bridge),
913 busno,
914 pci_resource_typename(prtype),
915 range->start, range->end);
916 continue;
917 }
918 pci_resource_arena_add_range(
919 pb->pb_res, prtype,
920 range->start, range->end);
921 KASSERT(pb->pb_res[prtype] != NULL);
922 }
923 }
924 }
925
926 for (devno = 0; devno <= pb->pb_lastdevno; devno++) {
927 KASSERT(devno < PCI_MAX_DEVICE);
928 nfunc = pci_resource_device_functions(pr, busno, devno);
929 for (funcno = 0; funcno < nfunc; funcno++) {
930 pd = PCICONF_BUS_DEVICE(pb, devno, funcno);
931 if (!pd->pd_present) {
932 continue;
933 }
934 if (pd->pd_ppb) {
935 uint8_t sec_bus = PCI_BRIDGE_BUS_NUM_SECONDARY(
936 pd->pd_bridge.bridge_bus);
937 KASSERT(pci_bus_in_range(pr, sec_bus));
938 pci_resource_init_bus(pr, sec_bus);
939 }
940 pci_resource_init_device(pr, pd);
941 }
942 }
943 }
944
945 /*
946 * pci_resource_probe --
947 *
948 * Scan for PCI devices and initialize the resource manager.
949 */
950 static void
951 pci_resource_probe(struct pci_resources *pr,
952 const struct pci_resource_info *info)
953 {
954 struct pci_resource_arena *busarena = info->ranges[PCI_RANGE_BUS];
955 uint8_t startbus = SIMPLEQ_FIRST(&busarena->list)->start;
956 uint8_t endbus = SIMPLEQ_LAST(&busarena->list, pci_resource_range,
957 entry)->end;
958 u_int nbus;
959
960 KASSERT(startbus <= endbus);
961 KASSERT(pr->pr_bus == NULL);
962
963 nbus = endbus - startbus + 1;
964
965 pr->pr_pc = info->pc;
966 pr->pr_startbus = startbus;
967 pr->pr_busranges = busarena;
968 pr->pr_bus = kmem_zalloc(nbus * sizeof(pr->pr_bus[0]), KM_SLEEP);
969 memcpy(pr->pr_ranges, info->ranges, sizeof(pr->pr_ranges));
970
971 /* Scan devices */
972 pci_resource_scan_bus(pr, NULL, pr->pr_startbus);
973
974 /*
975 * Create per-bus resource pools and remove ranges that are already
976 * in use by devices and downstream bridges.
977 */
978 pci_resource_init_bus(pr, pr->pr_startbus);
979 }
980
981 /*
982 * pci_resource_alloc_device --
983 *
984 * Attempt to allocate resources for a given device.
985 */
986 static void
987 pci_resource_alloc_device(struct pci_resources *pr, struct pci_device *pd)
988 {
989 struct pci_iores *pi;
990 struct pci_resource_arena *arena;
991 pcireg_t cmd, ocmd, base;
992 uint64_t addr;
993 u_int enabled;
994 u_int res;
995 u_int align;
996 int error;
997
998 enabled = 0;
999 ocmd = cmd = pci_conf_read(pr->pr_pc, pd->pd_tag,
1000 PCI_COMMAND_STATUS_REG);
1001 if ((cmd & PCI_COMMAND_MEM_ENABLE) != 0) {
1002 enabled |= __BIT(PCI_MAPREG_TYPE_MEM);
1003 }
1004 if ((cmd & PCI_COMMAND_IO_ENABLE) != 0) {
1005 enabled |= __BIT(PCI_MAPREG_TYPE_IO);
1006 }
1007
1008 for (res = 0; res < pd->pd_niores; res++) {
1009 pi = &pd->pd_iores[res];
1010
1011 if ((enabled & __BIT(pi->pi_type)) != 0) {
1012 continue;
1013 }
1014
1015 if (pi->pi_type == PCI_MAPREG_TYPE_IO) {
1016 arena = pd->pd_bus->pb_res[PCI_RANGE_IO];
1017 align = uimax(pi->pi_size, 4);
1018 } else {
1019 KASSERT(pi->pi_type == PCI_MAPREG_TYPE_MEM);
1020 arena = NULL;
1021 align = uimax(pi->pi_size, 16);
1022 if (pi->pi_mem.prefetch) {
1023 arena = pd->pd_bus->pb_res[PCI_RANGE_PMEM];
1024 }
1025 if (arena == NULL) {
1026 arena = pd->pd_bus->pb_res[PCI_RANGE_MEM];
1027 }
1028 }
1029 if (arena == NULL) {
1030 DPRINT("PCI: " PCI_SBDF_FMT " BAR%u failed to"
1031 " allocate %#" PRIx64 " bytes (no arena)\n",
1032 PCI_SBDF_FMT_ARGS(pr, pd),
1033 pi->pi_bar, pi->pi_size);
1034 return;
1035 }
1036 error = pci_resource_alloc(arena, pi->pi_size, align, &addr);
1037 if (error != 0) {
1038 DPRINT("PCI: " PCI_SBDF_FMT " BAR%u failed to"
1039 " allocate %#" PRIx64 " bytes (no space)\n",
1040 PCI_SBDF_FMT_ARGS(pr, pd),
1041 pi->pi_bar, pi->pi_size);
1042 return;
1043 }
1044 DPRINT("PCI: " PCI_SBDF_FMT " BAR%u assigned range"
1045 " %#" PRIx64 "-%#" PRIx64 "\n",
1046 PCI_SBDF_FMT_ARGS(pr, pd),
1047 pi->pi_bar, addr, addr + pi->pi_size - 1);
1048
1049 if (pi->pi_type == PCI_MAPREG_TYPE_IO) {
1050 cmd |= PCI_COMMAND_IO_ENABLE;
1051 pci_conf_write(pr->pr_pc, pd->pd_tag,
1052 PCI_BAR(pi->pi_bar),
1053 PCI_MAPREG_IO_ADDR(addr) | PCI_MAPREG_TYPE_IO);
1054 } else {
1055 cmd |= PCI_COMMAND_MEM_ENABLE;
1056 base = pci_conf_read(pr->pr_pc, pd->pd_tag,
1057 PCI_BAR(pi->pi_bar));
1058 base = PCI_MAPREG_MEM_ADDR(addr) |
1059 PCI_MAPREG_MEM_TYPE(base);
1060 pci_conf_write(pr->pr_pc, pd->pd_tag,
1061 PCI_BAR(pi->pi_bar), base);
1062 if (pi->pi_mem.memtype == PCI_MAPREG_MEM_TYPE_64BIT) {
1063 base = (pcireg_t)
1064 (PCI_MAPREG_MEM64_ADDR(addr) >> 32);
1065 pci_conf_write(pr->pr_pc, pd->pd_tag,
1066 PCI_BAR(pi->pi_bar + 1), base);
1067 }
1068 }
1069 }
1070
1071 if (ocmd != cmd) {
1072 pci_conf_write(pr->pr_pc, pd->pd_tag,
1073 PCI_COMMAND_STATUS_REG, cmd);
1074 }
1075 }
1076
1077 /*
1078 * pci_resource_alloc_bus --
1079 *
1080 * Attempt to assign resources to all devices on a given bus, recursively.
1081 */
1082 static void
1083 pci_resource_alloc_bus(struct pci_resources *pr, uint8_t busno)
1084 {
1085 struct pci_bus *pb = PCICONF_RES_BUS(pr, busno);
1086 struct pci_device *pd;
1087 uint8_t devno, funcno;
1088
1089 for (devno = 0; devno <= pb->pb_lastdevno; devno++) {
1090 for (funcno = 0; funcno < 8; funcno++) {
1091 pd = PCICONF_BUS_DEVICE(pb, devno, funcno);
1092 if (!pd->pd_present) {
1093 if (funcno == 0) {
1094 break;
1095 }
1096 continue;
1097 }
1098 if (!pd->pd_configured) {
1099 pci_resource_alloc_device(pr, pd);
1100 }
1101 if (pd->pd_ppb) {
1102 uint8_t sec_bus = PCI_BRIDGE_BUS_NUM_SECONDARY(
1103 pd->pd_bridge.bridge_bus);
1104 pci_resource_alloc_bus(pr, sec_bus);
1105 }
1106 }
1107 }
1108 }
1109
1110 /*
1111 * pci_resource_init --
1112 *
1113 * Public interface to PCI resource manager. Scans for available devices
1114 * and assigns resources.
1115 */
1116 void
1117 pci_resource_init(const struct pci_resource_info *info)
1118 {
1119 struct pci_resources pr = {};
1120
1121 if (info->ranges[PCI_RANGE_BUS] == NULL) {
1122 aprint_error("PCI: no buses\n");
1123 return;
1124 }
1125 KASSERT(!SIMPLEQ_EMPTY(&info->ranges[PCI_RANGE_BUS]->list));
1126 pci_resource_probe(&pr, info);
1127 pci_resource_alloc_bus(&pr, pr.pr_startbus);
1128 }
1129
1130 /*
1131 * pci_resource_typename --
1132 *
1133 * Return a string description of a PCI range type.
1134 */
1135 const char *
1136 pci_resource_typename(enum pci_range_type prtype)
1137 {
1138 KASSERT(prtype < NUM_PCI_RANGES);
1139 return pci_range_typenames[prtype];
1140 }
1141