pci_resource.c revision 1.1 1 /* $NetBSD: pci_resource.c,v 1.1 2022/10/14 22:10:15 jmcneill Exp $ */
2
3 /*-
4 * Copyright (c) 2022 Jared McNeill <jmcneill (at) invisible.ca>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 /*
30 * pci_resource.c --
31 *
32 * Scan current PCI resource allocations and attempt to assign resources
33 * to devices that are not configured WITHOUT changing any configuration
34 * performed by system firmware.
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: pci_resource.c,v 1.1 2022/10/14 22:10:15 jmcneill Exp $");
39
40 #include <sys/param.h>
41 #include <sys/bus.h>
42 #include <sys/systm.h>
43 #include <sys/kmem.h>
44 #include <sys/vmem.h>
45
46 #include <dev/pci/pcireg.h>
47 #include <dev/pci/pcivar.h>
48 #include <dev/pci/pcidevs.h>
49 #include <dev/pci/pci_resource.h>
50
51 #define DPRINT aprint_debug
52
53 #if defined(PCI_RESOURCE_TEST_VENDOR_ID) && \
54 defined(PCI_RESOURCE_TEST_PRODUCT_ID)
55 #define IS_TEST_DEVICE(_pd) \
56 (PCI_VENDOR(pd->pd_id) == PCI_RESOURCE_TEST_VENDOR_ID && \
57 PCI_PRODUCT(pd->pd_id) == PCI_RESOURCE_TEST_PRODUCT_ID)
58 #else
59 #define IS_TEST_DEVICE(_pd) 0
60 #endif
61
62 #define PCI_MAX_DEVICE 32
63 #define PCI_MAX_FUNC 8
64
65 #define PCI_MAX_IORES 6
66
67 #define PCI_RANGE_FOREACH(_type) \
68 for (u_int _type = PCI_RANGE_BUS; _type < NUM_PCI_RANGES; _type++)
69
70 static const char *pci_range_typenames[NUM_PCI_RANGES] = {
71 [PCI_RANGE_BUS] = "bus",
72 [PCI_RANGE_IO] = "io",
73 [PCI_RANGE_MEM] = "mem",
74 [PCI_RANGE_PMEM] = "pmem",
75 };
76
77 struct pci_bus;
78
79 struct pci_iores {
80 uint64_t pi_base; /* Base address */
81 uint64_t pi_size; /* Resource size */
82 uint8_t pi_type; /* PCI_MAPREG_TYPE_* */
83 u_int pi_bar; /* PCI bar number */
84 union {
85 struct {
86 uint8_t memtype;
87 bool prefetch;
88 } pi_mem;
89 };
90 };
91
92 struct pci_device {
93 bool pd_present; /* Device is present */
94 bool pd_configured; /* Device is configured */
95 struct pci_bus *pd_bus; /* Parent bus */
96 uint8_t pd_devno; /* Device number */
97 uint8_t pd_funcno; /* Function number */
98 pcitag_t pd_tag; /* PCI tag */
99
100 pcireg_t pd_id; /* Vendor ID, Device ID */
101 pcireg_t pd_class; /* Revision ID, Class Code */
102 pcireg_t pd_bhlc; /* BIST, Header Type, Primary Latency
103 * Timer, Cache Line Size */
104
105 struct pci_iores pd_iores[PCI_MAX_IORES];
106 u_int pd_niores;
107
108 bool pd_ppb; /* PCI-PCI bridge */
109 union {
110 struct {
111 pcireg_t bridge_bus;
112 struct pci_resource_range ranges[NUM_PCI_RANGES];
113 } pd_bridge;
114 };
115 };
116
117 struct pci_bus {
118 uint8_t pb_busno; /* Bus number */
119 struct pci_device *pb_bridge; /* Parent bridge, or NULL */
120
121 struct pci_device pb_device[PCI_MAX_DEVICE * PCI_MAX_FUNC];
122 /* Devices on bus */
123 u_int pb_lastdevno; /* Last device found */
124
125 struct pci_resource_range pb_ranges[NUM_PCI_RANGES];
126 vmem_t *pb_res[NUM_PCI_RANGES];
127 };
128
129 struct pci_resources {
130 struct pci_bus **pr_bus; /* Bus list */
131 pci_chipset_tag_t pr_pc; /* Chipset tag */
132 uint8_t pr_startbus; /* First bus number */
133 uint8_t pr_endbus; /* Last bus number */
134
135 struct pci_resource_range pr_ranges[NUM_PCI_RANGES];
136 vmem_t *pr_res[NUM_PCI_RANGES];
137 };
138
139 static void pci_resource_scan_bus(struct pci_resources *,
140 struct pci_device *, uint8_t);
141
142 #define PCI_SBDF_FMT "%04x:%02x:%02x.%u"
143 #define PCI_SBDF_FMT_ARGS(_pr, _pd) \
144 pci_get_segment((_pr)->pr_pc), \
145 (_pd)->pd_bus->pb_busno, \
146 (_pd)->pd_devno, \
147 (_pd)->pd_funcno
148
149 #define PCICONF_RES_BUS(_pr, _busno) \
150 ((_pr)->pr_bus[(_busno) - (_pr)->pr_startbus])
151 #define PCICONF_BUS_DEVICE(_pb, _devno, _funcno) \
152 (&(_pb)->pb_device[(_devno) * PCI_MAX_FUNC + (_funcno)])
153
154 /*
155 * pci_create_vmem --
156 *
157 * Create a vmem arena covering the specified range, used for tracking
158 * PCI resources.
159 */
160 static vmem_t *
161 pci_create_vmem(const char *name, bus_addr_t start, bus_addr_t end)
162 {
163 vmem_t *arena;
164
165 arena = vmem_create(name, 0, 0, 1, NULL, NULL, NULL, 0, VM_SLEEP,
166 IPL_NONE);
167 if (arena == NULL) {
168 return NULL;
169 }
170
171 if (vmem_add(arena, start, end - start + 1, VM_SLEEP) != 0) {
172 vmem_destroy(arena);
173 arena = NULL;
174 }
175
176 return arena;
177 }
178
179 /*
180 * pci_new_bus --
181 *
182 * Create a new PCI bus and initialize its resource ranges.
183 */
184 static struct pci_bus *
185 pci_new_bus(struct pci_resources *pr, uint8_t busno, struct pci_device *bridge)
186 {
187 struct pci_bus *pb;
188 struct pci_resource_range *ranges;
189
190 pb = kmem_zalloc(sizeof(*pb), KM_SLEEP);
191 pb->pb_busno = busno;
192 pb->pb_bridge = bridge;
193 if (bridge == NULL) {
194 /*
195 * No additional constraints on resource allocations for
196 * the root bus.
197 */
198 ranges = pr->pr_ranges;
199 } else {
200 /*
201 * Resource allocations for this bus are constrained by the
202 * bridge forwarding settings.
203 */
204 ranges = bridge->pd_bridge.ranges;
205 }
206 memcpy(pb->pb_ranges, ranges, sizeof(pb->pb_ranges));
207
208 return pb;
209 }
210
211 /*
212 * pci_resource_device_functions --
213 *
214 * Returns the number of PCI functions for a a given bus and device.
215 */
216 static uint8_t
217 pci_resource_device_functions(struct pci_resources *pr,
218 uint8_t busno, uint8_t devno)
219 {
220 struct pci_bus *pb;
221 struct pci_device *pd;
222
223 pb = PCICONF_RES_BUS(pr, busno);
224 pd = PCICONF_BUS_DEVICE(pb, devno, 0);
225 if (!pd->pd_present) {
226 return 0;
227 }
228
229 return PCI_HDRTYPE_MULTIFN(pd->pd_bhlc) ? 8 : 1;
230 }
231
232 /*
233 * pci_resource_device_print --
234 *
235 * Log details about a device.
236 */
237 static void
238 pci_resource_device_print(struct pci_resources *pr,
239 struct pci_device *pd)
240 {
241 struct pci_iores *pi;
242 u_int res;
243
244 DPRINT("PCI: " PCI_SBDF_FMT " %04x:%04x %02x 0x%06x",
245 PCI_SBDF_FMT_ARGS(pr, pd),
246 PCI_VENDOR(pd->pd_id), PCI_PRODUCT(pd->pd_id),
247 PCI_REVISION(pd->pd_class), (pd->pd_class >> 8) & 0xffffff);
248
249 switch (PCI_HDRTYPE_TYPE(pd->pd_bhlc)) {
250 case PCI_HDRTYPE_DEVICE:
251 DPRINT(" (device)\n");
252 break;
253 case PCI_HDRTYPE_PPB:
254 DPRINT(" (bridge %u -> %u-%u)\n",
255 PCI_BRIDGE_BUS_NUM_PRIMARY(pd->pd_bridge.bridge_bus),
256 PCI_BRIDGE_BUS_NUM_SECONDARY(pd->pd_bridge.bridge_bus),
257 PCI_BRIDGE_BUS_NUM_SUBORDINATE(pd->pd_bridge.bridge_bus));
258
259 if (pd->pd_bridge.ranges[PCI_RANGE_IO].end) {
260 DPRINT("PCI: " PCI_SBDF_FMT
261 " [bridge] window io %#" PRIx64 "-%#" PRIx64
262 "\n",
263 PCI_SBDF_FMT_ARGS(pr, pd),
264 pd->pd_bridge.ranges[PCI_RANGE_IO].start,
265 pd->pd_bridge.ranges[PCI_RANGE_IO].end);
266 }
267 if (pd->pd_bridge.ranges[PCI_RANGE_MEM].end) {
268 DPRINT("PCI: " PCI_SBDF_FMT
269 " [bridge] window mem %#" PRIx64 "-%#" PRIx64
270 " (non-prefetchable)\n",
271 PCI_SBDF_FMT_ARGS(pr, pd),
272 pd->pd_bridge.ranges[PCI_RANGE_MEM].start,
273 pd->pd_bridge.ranges[PCI_RANGE_MEM].end);
274 }
275 if (pd->pd_bridge.ranges[PCI_RANGE_PMEM].end) {
276 DPRINT("PCI: " PCI_SBDF_FMT
277 " [bridge] window mem %#" PRIx64 "-%#" PRIx64
278 " (prefetchable)\n",
279 PCI_SBDF_FMT_ARGS(pr, pd),
280 pd->pd_bridge.ranges[PCI_RANGE_PMEM].start,
281 pd->pd_bridge.ranges[PCI_RANGE_PMEM].end);
282 }
283
284 break;
285 default:
286 DPRINT(" (0x%02x)\n", PCI_HDRTYPE_TYPE(pd->pd_bhlc));
287 }
288
289 for (res = 0; res < pd->pd_niores; res++) {
290 pi = &pd->pd_iores[res];
291
292 DPRINT("PCI: " PCI_SBDF_FMT
293 " [device] resource BAR%u: %s @ %#" PRIx64 " size %#"
294 PRIx64,
295 PCI_SBDF_FMT_ARGS(pr, pd), pi->pi_bar,
296 pi->pi_type == PCI_MAPREG_TYPE_MEM ? "mem" : "io ",
297 pi->pi_base, pi->pi_size);
298
299 if (pi->pi_type == PCI_MAPREG_TYPE_MEM) {
300 switch (pi->pi_mem.memtype) {
301 case PCI_MAPREG_MEM_TYPE_32BIT:
302 DPRINT(", 32-bit");
303 break;
304 case PCI_MAPREG_MEM_TYPE_32BIT_1M:
305 DPRINT(", 32-bit (1M)");
306 break;
307 case PCI_MAPREG_MEM_TYPE_64BIT:
308 DPRINT(", 64-bit");
309 break;
310 }
311 DPRINT(" %sprefetchable",
312 pi->pi_mem.prefetch ? "" : "non-");
313 }
314 DPRINT("\n");
315 }
316 }
317
318 /*
319 * pci_resource_scan_bar --
320 *
321 * Determine the current BAR configuration for a given device.
322 */
323 static void
324 pci_resource_scan_bar(struct pci_resources *pr,
325 struct pci_device *pd, pcireg_t mapreg_start, pcireg_t mapreg_end,
326 bool is_ppb)
327 {
328 pci_chipset_tag_t pc = pr->pr_pc;
329 pcitag_t tag = pd->pd_tag;
330 pcireg_t mapreg = mapreg_start;
331 pcireg_t ocmd, cmd, bar[2], mask[2];
332 uint64_t addr, size;
333 struct pci_iores *pi;
334
335 if (!is_ppb) {
336 ocmd = cmd = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
337 cmd &= ~(PCI_COMMAND_MASTER_ENABLE |
338 PCI_COMMAND_MEM_ENABLE |
339 PCI_COMMAND_IO_ENABLE);
340 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, cmd);
341 }
342
343 while (mapreg < mapreg_end) {
344 u_int width = 4;
345
346 bar[0] = pci_conf_read(pc, tag, mapreg);
347 pci_conf_write(pc, tag, mapreg, 0xffffffff);
348 mask[0] = pci_conf_read(pc, tag, mapreg);
349 pci_conf_write(pc, tag, mapreg, bar[0]);
350
351 switch (PCI_MAPREG_TYPE(mask[0])) {
352 case PCI_MAPREG_TYPE_MEM:
353 switch (PCI_MAPREG_MEM_TYPE(mask[0])) {
354 case PCI_MAPREG_MEM_TYPE_32BIT:
355 case PCI_MAPREG_MEM_TYPE_32BIT_1M:
356 size = PCI_MAPREG_MEM_SIZE(mask[0]);
357 addr = PCI_MAPREG_MEM_ADDR(bar[0]);
358 break;
359 case PCI_MAPREG_MEM_TYPE_64BIT:
360 bar[1] = pci_conf_read(pc, tag, mapreg + 4);
361 pci_conf_write(pc, tag, mapreg + 4, 0xffffffff);
362 mask[1] = pci_conf_read(pc, tag, mapreg + 4);
363 pci_conf_write(pc, tag, mapreg + 4, bar[1]);
364
365 size = PCI_MAPREG_MEM64_SIZE(
366 ((uint64_t)mask[1] << 32) | mask[0]);
367 addr = PCI_MAPREG_MEM64_ADDR(
368 ((uint64_t)bar[1] << 32) | bar[0]);
369 width = 8;
370 break;
371 default:
372 size = 0;
373 }
374 if (size > 0) {
375 pi = &pd->pd_iores[pd->pd_niores++];
376 pi->pi_type = PCI_MAPREG_TYPE_MEM;
377 pi->pi_base = addr;
378 pi->pi_size = size;
379 pi->pi_bar = (mapreg - mapreg_start) / 4;
380 pi->pi_mem.memtype =
381 PCI_MAPREG_MEM_TYPE(mask[0]);
382 pi->pi_mem.prefetch =
383 PCI_MAPREG_MEM_PREFETCHABLE(mask[0]);
384 }
385 break;
386 case PCI_MAPREG_TYPE_IO:
387 size = PCI_MAPREG_IO_SIZE(mask[0] | 0xffff0000);
388 addr = PCI_MAPREG_IO_ADDR(bar[0]);
389 if (size > 0) {
390 pi = &pd->pd_iores[pd->pd_niores++];
391 pi->pi_type = PCI_MAPREG_TYPE_IO;
392 pi->pi_base = addr;
393 pi->pi_size = size;
394 pi->pi_bar = (mapreg - mapreg_start) / 4;
395 }
396 break;
397 }
398
399 KASSERT(pd->pd_niores <= PCI_MAX_IORES);
400
401 mapreg += width;
402 }
403
404 if (!is_ppb) {
405 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, ocmd);
406 }
407 }
408
409 /*
410 * pci_resource_scan_bridge --
411 *
412 * Determine the current configuration of a PCI-PCI bridge.
413 */
414 static void
415 pci_resource_scan_bridge(struct pci_resources *pr,
416 struct pci_device *pd)
417 {
418 pci_chipset_tag_t pc = pr->pr_pc;
419 pcitag_t tag = pd->pd_tag;
420 pcireg_t res, reshigh;
421
422 pd->pd_ppb = true;
423
424 res = pci_conf_read(pc, tag, PCI_BRIDGE_BUS_REG);
425 pd->pd_bridge.bridge_bus = res;
426 pd->pd_bridge.ranges[PCI_RANGE_BUS].start =
427 PCI_BRIDGE_BUS_NUM_SECONDARY(res);
428 pd->pd_bridge.ranges[PCI_RANGE_BUS].end =
429 PCI_BRIDGE_BUS_NUM_SUBORDINATE(res);
430
431 res = pci_conf_read(pc, tag, PCI_BRIDGE_STATIO_REG);
432 pd->pd_bridge.ranges[PCI_RANGE_IO].start =
433 PCI_BRIDGE_STATIO_IOBASE_ADDR(res);
434 pd->pd_bridge.ranges[PCI_RANGE_IO].end =
435 PCI_BRIDGE_STATIO_IOLIMIT_ADDR(res);
436 if (PCI_BRIDGE_IO_32BITS(res)) {
437 reshigh = pci_conf_read(pc, tag, PCI_BRIDGE_IOHIGH_REG);
438 pd->pd_bridge.ranges[PCI_RANGE_IO].start |=
439 __SHIFTOUT(reshigh, PCI_BRIDGE_IOHIGH_BASE) << 16;
440 pd->pd_bridge.ranges[PCI_RANGE_IO].end |=
441 __SHIFTOUT(reshigh, PCI_BRIDGE_IOHIGH_LIMIT) << 16;
442 }
443 if (pd->pd_bridge.ranges[PCI_RANGE_IO].start >=
444 pd->pd_bridge.ranges[PCI_RANGE_IO].end) {
445 pd->pd_bridge.ranges[PCI_RANGE_IO].start = 0;
446 pd->pd_bridge.ranges[PCI_RANGE_IO].end = 0;
447 }
448
449 res = pci_conf_read(pc, tag, PCI_BRIDGE_MEMORY_REG);
450 pd->pd_bridge.ranges[PCI_RANGE_MEM].start =
451 PCI_BRIDGE_MEMORY_BASE_ADDR(res);
452 pd->pd_bridge.ranges[PCI_RANGE_MEM].end =
453 PCI_BRIDGE_MEMORY_LIMIT_ADDR(res);
454 if (pd->pd_bridge.ranges[PCI_RANGE_MEM].start >=
455 pd->pd_bridge.ranges[PCI_RANGE_MEM].end) {
456 pd->pd_bridge.ranges[PCI_RANGE_MEM].start = 0;
457 pd->pd_bridge.ranges[PCI_RANGE_MEM].end = 0;
458 }
459
460 res = pci_conf_read(pc, tag, PCI_BRIDGE_PREFETCHMEM_REG);
461 pd->pd_bridge.ranges[PCI_RANGE_PMEM].start =
462 PCI_BRIDGE_PREFETCHMEM_BASE_ADDR(res);
463 pd->pd_bridge.ranges[PCI_RANGE_PMEM].end =
464 PCI_BRIDGE_PREFETCHMEM_LIMIT_ADDR(res);
465 if (PCI_BRIDGE_PREFETCHMEM_64BITS(res)) {
466 reshigh = pci_conf_read(pc, tag,
467 PCI_BRIDGE_PREFETCHBASEUP32_REG);
468 pd->pd_bridge.ranges[PCI_RANGE_PMEM].start |=
469 (uint64_t)reshigh << 32;
470 reshigh = pci_conf_read(pc, tag,
471 PCI_BRIDGE_PREFETCHLIMITUP32_REG);
472 pd->pd_bridge.ranges[PCI_RANGE_PMEM].end |=
473 (uint64_t)reshigh << 32;
474 }
475 if (pd->pd_bridge.ranges[PCI_RANGE_PMEM].start >=
476 pd->pd_bridge.ranges[PCI_RANGE_PMEM].end) {
477 pd->pd_bridge.ranges[PCI_RANGE_PMEM].start = 0;
478 pd->pd_bridge.ranges[PCI_RANGE_PMEM].end = 0;
479 }
480 }
481
482 /*
483 * pci_resource_scan_device --
484 *
485 * Determine the current configuration of a PCI device.
486 */
487 static bool
488 pci_resource_scan_device(struct pci_resources *pr,
489 struct pci_bus *parent_bus, uint8_t devno, uint8_t funcno)
490 {
491 struct pci_device *pd;
492 pcitag_t tag;
493 pcireg_t id, bridge_bus;
494 uint8_t sec_bus;
495
496 tag = pci_make_tag(pr->pr_pc, parent_bus->pb_busno, devno, funcno);
497 id = pci_conf_read(pr->pr_pc, tag, PCI_ID_REG);
498 if (PCI_VENDOR(id) == PCI_VENDOR_INVALID) {
499 return false;
500 }
501
502 pd = PCICONF_BUS_DEVICE(parent_bus, devno, funcno);
503 pd->pd_present = true;
504 pd->pd_bus = parent_bus;
505 pd->pd_tag = tag;
506 pd->pd_devno = devno;
507 pd->pd_funcno = funcno;
508 pd->pd_id = id;
509 pd->pd_class = pci_conf_read(pr->pr_pc, tag, PCI_CLASS_REG);
510 pd->pd_bhlc = pci_conf_read(pr->pr_pc, tag, PCI_BHLC_REG);
511
512 switch (PCI_HDRTYPE_TYPE(pd->pd_bhlc)) {
513 case PCI_HDRTYPE_DEVICE:
514 pci_resource_scan_bar(pr, pd, PCI_MAPREG_START,
515 PCI_MAPREG_END, false);
516 break;
517 case PCI_HDRTYPE_PPB:
518 pci_resource_scan_bar(pr, pd, PCI_MAPREG_START,
519 PCI_MAPREG_PPB_END, true);
520 pci_resource_scan_bridge(pr, pd);
521 break;
522 }
523
524 pci_resource_device_print(pr, pd);
525
526 if (PCI_HDRTYPE_TYPE(pd->pd_bhlc) == PCI_HDRTYPE_PPB &&
527 PCI_CLASS(pd->pd_class) == PCI_CLASS_BRIDGE &&
528 PCI_SUBCLASS(pd->pd_class) == PCI_SUBCLASS_BRIDGE_PCI) {
529 bridge_bus = pci_conf_read(pr->pr_pc, tag, PCI_BRIDGE_BUS_REG);
530 sec_bus = PCI_BRIDGE_BUS_NUM_SECONDARY(bridge_bus);
531 if (sec_bus <= pr->pr_endbus) {
532 pci_resource_scan_bus(pr, pd, sec_bus);
533 }
534 }
535
536 return true;
537 }
538
539 /*
540 * pci_resource_scan_bus --
541 *
542 * Enumerate devices on a bus, recursively.
543 */
544 static void
545 pci_resource_scan_bus(struct pci_resources *pr,
546 struct pci_device *bridge_dev, uint8_t busno)
547 {
548 struct pci_bus *pb;
549 uint8_t devno, funcno;
550 uint8_t nfunc;
551
552 KASSERT(busno >= pr->pr_startbus);
553 KASSERT(busno <= pr->pr_endbus);
554
555 if (PCICONF_RES_BUS(pr, busno) != NULL) {
556 /*
557 * Firmware has configured more than one bridge with the
558 * same secondary bus number.
559 */
560 panic("Bus %u already scanned (firmware bug!)", busno);
561 return;
562 }
563
564 pb = pci_new_bus(pr, busno, bridge_dev);
565 PCICONF_RES_BUS(pr, busno) = pb;
566
567 for (devno = 0; devno < PCI_MAX_DEVICE; devno++) {
568 if (!pci_resource_scan_device(pr, pb, devno, 0)) {
569 continue;
570 }
571 pb->pb_lastdevno = devno;
572
573 nfunc = pci_resource_device_functions(pr, busno, devno);
574 for (funcno = 1; funcno < nfunc; funcno++) {
575 pci_resource_scan_device(pr, pb, devno, funcno);
576 }
577 }
578 }
579
580 /*
581 * pci_resource_claim --
582 *
583 * Claim a resource from a vmem arena. This is called to inform the
584 * resource manager about resources already configured by system firmware.
585 */
586 static int
587 pci_resource_claim(vmem_t *arena, vmem_addr_t start, vmem_addr_t end)
588 {
589 KASSERT(end >= start);
590
591 return vmem_xalloc(arena, end - start + 1, 0, 0, 0, start, end,
592 VM_BESTFIT | VM_NOSLEEP, NULL);
593 }
594
595 /*
596 * pci_resource_alloc --
597 *
598 * Allocate a resource from a vmem arena. This is called when configuring
599 * devices that were not already configured by system firmware.
600 */
601 static int
602 pci_resource_alloc(vmem_t *arena, vmem_size_t size, vmem_size_t align,
603 uint64_t *base)
604 {
605 vmem_addr_t addr;
606 int error;
607
608 KASSERT(size != 0);
609
610 error = vmem_xalloc(arena, size, align, 0, 0, VMEM_ADDR_MIN,
611 VMEM_ADDR_MAX, VM_BESTFIT | VM_NOSLEEP, &addr);
612 if (error == 0) {
613 *base = (uint64_t)addr;
614 }
615
616 return error;
617 }
618
619 /*
620 * pci_resource_init_device --
621 *
622 * Discover resources assigned by system firmware, notify the resource
623 * manager of these ranges, and determine if the device has additional
624 * resources that need to be allocated.
625 */
626 static void
627 pci_resource_init_device(struct pci_resources *pr,
628 struct pci_device *pd)
629 {
630 struct pci_iores *pi;
631 struct pci_bus *pb = pd->pd_bus;
632 vmem_t *res_io = pb->pb_res[PCI_RANGE_IO];
633 vmem_t *res_mem = pb->pb_res[PCI_RANGE_MEM];
634 vmem_t *res_pmem = pb->pb_res[PCI_RANGE_PMEM];
635 pcireg_t cmd;
636 u_int enabled, required;
637 u_int iores;
638 int error;
639
640 KASSERT(pd->pd_present);
641
642 if (IS_TEST_DEVICE(pd)) {
643 cmd = pci_conf_read(pr->pr_pc, pd->pd_tag,
644 PCI_COMMAND_STATUS_REG);
645 cmd &= ~(PCI_COMMAND_MEM_ENABLE|PCI_COMMAND_IO_ENABLE|
646 PCI_COMMAND_MASTER_ENABLE);
647 pci_conf_write(pr->pr_pc, pd->pd_tag, PCI_COMMAND_STATUS_REG,
648 cmd);
649 }
650
651 enabled = required = 0;
652 cmd = pci_conf_read(pr->pr_pc, pd->pd_tag, PCI_COMMAND_STATUS_REG);
653 if ((cmd & PCI_COMMAND_MEM_ENABLE) != 0) {
654 enabled |= __BIT(PCI_MAPREG_TYPE_MEM);
655 }
656 if ((cmd & PCI_COMMAND_IO_ENABLE) != 0) {
657 enabled |= __BIT(PCI_MAPREG_TYPE_IO);
658 }
659
660 for (iores = 0; iores < pd->pd_niores; iores++) {
661 pi = &pd->pd_iores[iores];
662
663 required |= __BIT(pi->pi_type);
664
665 if (IS_TEST_DEVICE(pd)) {
666 pci_conf_write(pr->pr_pc, pd->pd_tag,
667 PCI_BAR(pi->pi_bar), 0);
668 continue;
669 }
670 if ((enabled & __BIT(pi->pi_type)) == 0) {
671 continue;
672 }
673
674 if (pi->pi_type == PCI_MAPREG_TYPE_IO) {
675 error = res_io == NULL ? ERANGE :
676 pci_resource_claim(res_io, pi->pi_base,
677 pi->pi_base + pi->pi_size - 1);
678 if (error) {
679 DPRINT("PCI: " PCI_SBDF_FMT " [device] io "
680 " %#" PRIx64 "-%#" PRIx64
681 " invalid (%d)\n",
682 PCI_SBDF_FMT_ARGS(pr, pd),
683 pi->pi_base,
684 pi->pi_base + pi->pi_size - 1,
685 error);
686 }
687 continue;
688 }
689
690 KASSERT(pi->pi_type == PCI_MAPREG_TYPE_MEM);
691 error = ERANGE;
692 if (pi->pi_mem.prefetch && res_pmem != NULL) {
693 error = pci_resource_claim(res_pmem, pi->pi_base,
694 pi->pi_base + pi->pi_size - 1);
695 }
696 if (error && res_mem != NULL) {
697 error = pci_resource_claim(res_mem, pi->pi_base,
698 pi->pi_base + pi->pi_size - 1);
699 }
700 if (error) {
701 DPRINT("PCI: " PCI_SBDF_FMT " [device] mem"
702 " (%sprefetchable)"
703 " %#" PRIx64 "-%#" PRIx64
704 " invalid (%d)\n",
705 PCI_SBDF_FMT_ARGS(pr, pd),
706 pi->pi_mem.prefetch ? "" : "non-",
707 pi->pi_base,
708 pi->pi_base + pi->pi_size - 1,
709 error);
710 }
711 }
712
713 pd->pd_configured = (enabled & required) == required;
714
715 if (!pd->pd_configured) {
716 DPRINT("PCI: " PCI_SBDF_FMT " [device] "
717 "not configured by firmware\n",
718 PCI_SBDF_FMT_ARGS(pr, pd));
719 }
720 }
721
722 /*
723 * pci_resource_init_bus --
724 *
725 * Discover resources in use on a given bus, recursively.
726 */
727 static void
728 pci_resource_init_bus(struct pci_resources *pr, uint8_t busno)
729 {
730 struct pci_bus *pb, *parent_bus;
731 struct pci_device *pd, *bridge;
732 uint8_t devno, funcno;
733 uint8_t nfunc;
734 int error;
735
736 KASSERT(busno >= pr->pr_startbus);
737 KASSERT(busno <= pr->pr_endbus);
738
739 pb = PCICONF_RES_BUS(pr, busno);
740 bridge = pb->pb_bridge;
741
742 KASSERT(pb != NULL);
743 KASSERT((busno == pr->pr_startbus) == (bridge == NULL));
744
745 if (bridge == NULL) {
746 /* Use resources provided by firmware. */
747 PCI_RANGE_FOREACH(prtype) {
748 pb->pb_res[prtype] = pr->pr_res[prtype];
749 pr->pr_res[prtype] = NULL;
750 }
751 } else {
752 /*
753 * Using the resources configured in to the bridge by
754 * firmware, claim the resources on the parent bus and
755 * create a new vmem arena for the secondary bus.
756 */
757 KASSERT(bridge->pd_bus != NULL);
758 parent_bus = bridge->pd_bus;
759 PCI_RANGE_FOREACH(prtype) {
760 if (parent_bus->pb_res[prtype] == NULL ||
761 !bridge->pd_bridge.ranges[prtype].end) {
762 continue;
763 }
764 error = pci_resource_claim(
765 parent_bus->pb_res[prtype],
766 bridge->pd_bridge.ranges[prtype].start,
767 bridge->pd_bridge.ranges[prtype].end);
768 if (error == 0) {
769 pb->pb_res[prtype] = pci_create_vmem(
770 pci_resource_typename(prtype),
771 bridge->pd_bridge.ranges[prtype].start,
772 bridge->pd_bridge.ranges[prtype].end);
773 KASSERT(pb->pb_res[prtype] != NULL);
774 } else {
775 DPRINT("PCI: " PCI_SBDF_FMT " bridge (bus %u)"
776 " %-4s %#" PRIx64 "-%#" PRIx64
777 " invalid\n",
778 PCI_SBDF_FMT_ARGS(pr, bridge), busno,
779 pci_resource_typename(prtype),
780 bridge->pd_bridge.ranges[prtype].start,
781 bridge->pd_bridge.ranges[prtype].end);
782 }
783 }
784 }
785
786 for (devno = 0; devno <= pb->pb_lastdevno; devno++) {
787 KASSERT(devno < PCI_MAX_DEVICE);
788 nfunc = pci_resource_device_functions(pr, busno, devno);
789 for (funcno = 0; funcno < nfunc; funcno++) {
790 pd = PCICONF_BUS_DEVICE(pb, devno, funcno);
791 if (!pd->pd_present) {
792 continue;
793 }
794 if (pd->pd_ppb) {
795 uint8_t sec_bus = PCI_BRIDGE_BUS_NUM_SECONDARY(
796 pd->pd_bridge.bridge_bus);
797 pci_resource_init_bus(pr, sec_bus);
798 }
799 pci_resource_init_device(pr, pd);
800 }
801 }
802 }
803
804 /*
805 * pci_resource_probe --
806 *
807 * Scan for PCI devices and initialize the resource manager.
808 */
809 static void
810 pci_resource_probe(struct pci_resources *pr,
811 const struct pci_resource_info *info)
812 {
813 uint8_t startbus = (uint8_t)info->ranges[PCI_RANGE_BUS].start;
814 uint8_t endbus = (uint8_t)info->ranges[PCI_RANGE_BUS].end;
815 u_int nbus;
816
817 KASSERT(startbus <= endbus);
818 KASSERT(pr->pr_bus == NULL);
819
820 nbus = endbus - startbus + 1;
821
822 pr->pr_pc = info->pc;
823 pr->pr_startbus = startbus;
824 pr->pr_endbus = endbus;
825 pr->pr_bus = kmem_zalloc(nbus * sizeof(struct pci_bus *), KM_SLEEP);
826 memcpy(pr->pr_ranges, info->ranges, sizeof(pr->pr_ranges));
827 PCI_RANGE_FOREACH(prtype) {
828 if (prtype == PCI_RANGE_BUS || info->ranges[prtype].end) {
829 pr->pr_res[prtype] = pci_create_vmem(
830 pci_resource_typename(prtype),
831 info->ranges[prtype].start,
832 info->ranges[prtype].end);
833 KASSERT(pr->pr_res[prtype] != NULL);
834 }
835 }
836
837 /* Scan devices */
838 pci_resource_scan_bus(pr, NULL, pr->pr_startbus);
839
840 /*
841 * Create per-bus resource pools and remove ranges that are already
842 * in use by devices and downstream bridges.
843 */
844 pci_resource_init_bus(pr, pr->pr_startbus);
845 }
846
847 /*
848 * pci_resource_alloc_device --
849 *
850 * Attempt to allocate resources for a given device.
851 */
852 static void
853 pci_resource_alloc_device(struct pci_resources *pr, struct pci_device *pd)
854 {
855 struct pci_iores *pi;
856 vmem_t *arena;
857 pcireg_t cmd, ocmd, base;
858 uint64_t addr;
859 u_int enabled;
860 u_int res;
861 u_int align;
862 int error;
863
864 enabled = 0;
865 ocmd = cmd = pci_conf_read(pr->pr_pc, pd->pd_tag,
866 PCI_COMMAND_STATUS_REG);
867 if ((cmd & PCI_COMMAND_MEM_ENABLE) != 0) {
868 enabled |= __BIT(PCI_MAPREG_TYPE_MEM);
869 }
870 if ((cmd & PCI_COMMAND_IO_ENABLE) != 0) {
871 enabled |= __BIT(PCI_MAPREG_TYPE_IO);
872 }
873
874 for (res = 0; res < pd->pd_niores; res++) {
875 pi = &pd->pd_iores[res];
876
877 if ((enabled & __BIT(pi->pi_type)) != 0) {
878 continue;
879 }
880
881 if (pi->pi_type == PCI_MAPREG_TYPE_IO) {
882 arena = pd->pd_bus->pb_res[PCI_RANGE_IO];
883 align = uimax(pi->pi_size, 4);
884 } else {
885 KASSERT(pi->pi_type == PCI_MAPREG_TYPE_MEM);
886 arena = NULL;
887 align = uimax(pi->pi_size, 16);
888 if (pi->pi_mem.prefetch) {
889 arena = pd->pd_bus->pb_res[PCI_RANGE_PMEM];
890 }
891 if (arena == NULL) {
892 arena = pd->pd_bus->pb_res[PCI_RANGE_MEM];
893 }
894 }
895 if (arena == NULL) {
896 DPRINT("PCI: " PCI_SBDF_FMT " BAR%u failed to"
897 " allocate %#" PRIx64 " bytes (no arena)\n",
898 PCI_SBDF_FMT_ARGS(pr, pd),
899 pi->pi_bar, pi->pi_size);
900 return;
901 }
902 error = pci_resource_alloc(arena, pi->pi_size, align, &addr);
903 if (error != 0) {
904 DPRINT("PCI: " PCI_SBDF_FMT " BAR%u failed to"
905 " allocate %#" PRIx64 " bytes (no space)\n",
906 PCI_SBDF_FMT_ARGS(pr, pd),
907 pi->pi_bar, pi->pi_size);
908 return;
909 }
910 DPRINT("PCI: " PCI_SBDF_FMT " BAR%u assigned range"
911 " 0x%#" PRIx64 "-0x%#" PRIx64 "\n",
912 PCI_SBDF_FMT_ARGS(pr, pd),
913 pi->pi_bar, addr, addr + pi->pi_size - 1);
914
915 if (pi->pi_type == PCI_MAPREG_TYPE_IO) {
916 cmd |= PCI_COMMAND_IO_ENABLE;
917 pci_conf_write(pr->pr_pc, pd->pd_tag,
918 PCI_BAR(pi->pi_bar),
919 PCI_MAPREG_IO_ADDR(addr) | PCI_MAPREG_TYPE_IO);
920 } else {
921 cmd |= PCI_COMMAND_MEM_ENABLE;
922 base = pci_conf_read(pr->pr_pc, pd->pd_tag,
923 PCI_BAR(pi->pi_bar));
924 base = PCI_MAPREG_MEM_ADDR(addr) |
925 PCI_MAPREG_MEM_TYPE(base);
926 pci_conf_write(pr->pr_pc, pd->pd_tag,
927 PCI_BAR(pi->pi_bar), base);
928 if (pi->pi_mem.memtype == PCI_MAPREG_MEM_TYPE_64BIT) {
929 base = (pcireg_t)
930 (PCI_MAPREG_MEM64_ADDR(addr) >> 32);
931 pci_conf_write(pr->pr_pc, pd->pd_tag,
932 PCI_BAR(pi->pi_bar + 1), base);
933 }
934 }
935 }
936
937 if (ocmd != cmd) {
938 pci_conf_write(pr->pr_pc, pd->pd_tag,
939 PCI_COMMAND_STATUS_REG, cmd);
940 }
941 }
942
943 /*
944 * pci_resource_alloc_bus --
945 *
946 * Attempt to assign resources to all devices on a given bus, recursively.
947 */
948 static void
949 pci_resource_alloc_bus(struct pci_resources *pr, uint8_t busno)
950 {
951 struct pci_bus *pb = PCICONF_RES_BUS(pr, busno);
952 struct pci_device *pd;
953 uint8_t devno, funcno;
954
955 for (devno = 0; devno <= pb->pb_lastdevno; devno++) {
956 for (funcno = 0; funcno < 8; funcno++) {
957 pd = PCICONF_BUS_DEVICE(pb, devno, funcno);
958 if (!pd->pd_present) {
959 if (funcno == 0) {
960 break;
961 }
962 continue;
963 }
964 if (!pd->pd_configured) {
965 pci_resource_alloc_device(pr, pd);
966 }
967 if (pd->pd_ppb) {
968 uint8_t sec_bus = PCI_BRIDGE_BUS_NUM_SECONDARY(
969 pd->pd_bridge.bridge_bus);
970 pci_resource_alloc_bus(pr, sec_bus);
971 }
972 }
973 }
974 }
975
976 /*
977 * pci_resource_init --
978 *
979 * Public interface to PCI resource manager. Scans for available devices
980 * and assigns resources.
981 */
982 void
983 pci_resource_init(const struct pci_resource_info *info)
984 {
985 struct pci_resources pr = {};
986
987 pci_resource_probe(&pr, info);
988 pci_resource_alloc_bus(&pr, pr.pr_startbus);
989 }
990
991 /*
992 * pci_resource_typename --
993 *
994 * Return a string description of a PCI range type.
995 */
996 const char *
997 pci_resource_typename(enum pci_range_type prtype)
998 {
999 KASSERT(prtype < NUM_PCI_RANGES);
1000 return pci_range_typenames[prtype];
1001 }
1002