pci_ranges.c revision 1.1 1 /* $NetBSD: pci_ranges.c,v 1.1 2011/08/29 22:46:04 dyoung Exp $ */
2
3 /*-
4 * Copyright (c) 2011 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by David Young <dyoung (at) NetBSD.org>.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: pci_ranges.c,v 1.1 2011/08/29 22:46:04 dyoung Exp $");
35
36 #include <sys/types.h>
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/errno.h>
40 #include <sys/bus.h>
41 #include <sys/kmem.h>
42
43 #include <prop/proplib.h>
44 #include <ppath/ppath.h>
45
46 #include <dev/pci/pcivar.h>
47 #include <dev/pci/pcireg.h>
48 #include <dev/pci/pccbbreg.h>
49
50 typedef enum pci_alloc_regtype {
51 PCI_ALLOC_REGTYPE_NONE = 0
52 , PCI_ALLOC_REGTYPE_BAR = 1
53 , PCI_ALLOC_REGTYPE_WIN = 2
54 , PCI_ALLOC_REGTYPE_CBWIN = 3
55 , PCI_ALLOC_REGTYPE_VGA_EN = 4
56 } pci_alloc_regtype_t;
57
58 typedef enum pci_alloc_space {
59 PCI_ALLOC_SPACE_IO = 0
60 , PCI_ALLOC_SPACE_MEM = 1
61 } pci_alloc_space_t;
62
63 typedef enum pci_alloc_flags {
64 PCI_ALLOC_F_PREFETCHABLE = 0x1
65 } pci_alloc_flags_t;
66
67 typedef struct pci_alloc {
68 TAILQ_ENTRY(pci_alloc) pal_link;
69 pcitag_t pal_tag;
70 uint64_t pal_addr;
71 uint64_t pal_size;
72 pci_alloc_regtype_t pal_type;
73 struct pci_alloc_reg {
74 int r_ofs;
75 pcireg_t r_val;
76 pcireg_t r_mask;
77 } pal_reg[3];
78 pci_alloc_space_t pal_space;
79 pci_alloc_flags_t pal_flags;
80 } pci_alloc_t;
81
82 typedef struct pci_alloc_reg pci_alloc_reg_t;
83
84 TAILQ_HEAD(pci_alloc_list, pci_alloc);
85
86 typedef struct pci_alloc_list pci_alloc_list_t;
87
88 static pci_alloc_t *
89 pci_alloc_dup(const pci_alloc_t *pal)
90 {
91 pci_alloc_t *npal;
92
93 if ((npal = kmem_alloc(sizeof(*npal), KM_SLEEP)) == NULL)
94 return NULL;
95
96 *npal = *pal;
97
98 return npal;
99 }
100
101 static bool
102 pci_alloc_linkdup(pci_alloc_list_t *pals, const pci_alloc_t *pal)
103 {
104 pci_alloc_t *npal;
105
106 if ((npal = pci_alloc_dup(pal)) == NULL)
107 return false;
108
109 TAILQ_INSERT_TAIL(pals, npal, pal_link);
110
111 return true;
112 }
113
114 struct range_infer_ctx {
115 pci_chipset_tag_t ric_pc;
116 pci_alloc_list_t ric_pals;
117 bus_addr_t ric_mmio_bottom;
118 bus_addr_t ric_mmio_top;
119 bus_addr_t ric_io_bottom;
120 bus_addr_t ric_io_top;
121 };
122
123 #if 1
124 static bool
125 io_range_extend(struct range_infer_ctx *ric, const pci_alloc_t *pal)
126 {
127 if (ric->ric_io_bottom > pal->pal_addr)
128 ric->ric_io_bottom = pal->pal_addr;
129 if (ric->ric_io_top < pal->pal_addr + pal->pal_size)
130 ric->ric_io_top = pal->pal_addr + pal->pal_size;
131
132 return pci_alloc_linkdup(&ric->ric_pals, pal);
133 }
134
135 static bool
136 io_range_extend_by_bar(struct range_infer_ctx *ric, int bus, int dev, int fun,
137 int ofs, pcireg_t curbar, pcireg_t sizebar)
138 {
139 pci_alloc_reg_t *r;
140 pci_alloc_t pal = {
141 .pal_flags = 0
142 , .pal_space = PCI_ALLOC_SPACE_IO
143 , .pal_type = PCI_ALLOC_REGTYPE_BAR
144 , .pal_reg = {{
145 .r_mask = ~(pcireg_t)0
146 }}
147 };
148
149 r = &pal.pal_reg[0];
150
151 pal.pal_tag = pci_make_tag(ric->ric_pc, bus, dev, fun);
152 r->r_ofs = ofs;
153 r->r_val = curbar;
154
155 pal.pal_addr = PCI_MAPREG_IO_ADDR(curbar);
156 pal.pal_size = PCI_MAPREG_IO_SIZE(sizebar);
157
158 aprint_debug("%s: %d.%d.%d base at %" PRIx64 " size %" PRIx64 "\n",
159 __func__, bus, dev, fun, pal.pal_addr, pal.pal_size);
160
161 return (pal.pal_size == 0) || io_range_extend(ric, &pal);
162 }
163
164 static bool
165 io_range_extend_by_vga_enable(struct range_infer_ctx *ric,
166 int bus, int dev, int fun, pcireg_t csr, pcireg_t bcr)
167 {
168 pci_alloc_reg_t *r;
169 pci_alloc_t tpal = {
170 .pal_flags = 0
171 , .pal_space = PCI_ALLOC_SPACE_IO
172 , .pal_type = PCI_ALLOC_REGTYPE_VGA_EN
173 , .pal_reg = {{
174 .r_ofs = PCI_COMMAND_STATUS_REG
175 , .r_mask = PCI_COMMAND_IO_ENABLE
176 }, {
177 .r_ofs = PCI_BRIDGE_CONTROL_REG
178 , .r_mask =
179 PCI_BRIDGE_CONTROL_VGA << PCI_BRIDGE_CONTROL_SHIFT
180 }}
181 }, pal[2];
182
183 aprint_debug("%s: %d.%d.%d enter\n", __func__, bus, dev, fun);
184
185 if ((csr & PCI_COMMAND_IO_ENABLE) == 0 ||
186 (bcr & (PCI_BRIDGE_CONTROL_VGA << PCI_BRIDGE_CONTROL_SHIFT)) == 0) {
187 aprint_debug("%s: %d.%d.%d I/O or VGA disabled\n",
188 __func__, bus, dev, fun);
189 return true;
190 }
191
192 r = &tpal.pal_reg[0];
193 tpal.pal_tag = pci_make_tag(ric->ric_pc, bus, dev, fun);
194 r[0].r_val = csr;
195 r[1].r_val = bcr;
196
197 pal[0] = pal[1] = tpal;
198
199 pal[0].pal_addr = 0x3b0;
200 pal[0].pal_size = 0x3bb - 0x3b0 + 1;
201
202 pal[1].pal_addr = 0x3c0;
203 pal[1].pal_size = 0x3df - 0x3c0 + 1;
204
205 /* XXX add aliases for pal[0..1] */
206
207 return io_range_extend(ric, &pal[0]) && io_range_extend(ric, &pal[1]);
208 }
209
210 static bool
211 io_range_extend_by_win(struct range_infer_ctx *ric,
212 int bus, int dev, int fun, int ofs, int ofshigh,
213 pcireg_t io, pcireg_t iohigh)
214 {
215 const int fourkb = 4 * 1024;
216 pcireg_t baser, limitr;
217 pci_alloc_reg_t *r;
218 pci_alloc_t pal = {
219 .pal_flags = 0
220 , .pal_space = PCI_ALLOC_SPACE_IO
221 , .pal_type = PCI_ALLOC_REGTYPE_WIN
222 , .pal_reg = {{
223 .r_mask = ~(pcireg_t)0
224 }}
225 };
226
227 r = &pal.pal_reg[0];
228
229 pal.pal_tag = pci_make_tag(ric->ric_pc, bus, dev, fun);
230 r[0].r_ofs = ofs;
231 r[0].r_val = io;
232
233 baser = ((io >> PCI_BRIDGE_STATIO_IOBASE_SHIFT) &
234 PCI_BRIDGE_STATIO_IOBASE_MASK) >> 4;
235 limitr = ((io >> PCI_BRIDGE_STATIO_IOLIMIT_SHIFT) &
236 PCI_BRIDGE_STATIO_IOLIMIT_MASK) >> 4;
237
238 if (PCI_BRIDGE_IO_32BITS(io)) {
239 pcireg_t baseh, limith;
240
241 r[1].r_mask = ~(pcireg_t)0;
242 r[1].r_ofs = ofshigh;
243 r[1].r_val = iohigh;
244
245 baseh = (iohigh >> PCI_BRIDGE_IOHIGH_BASE_SHIFT) & PCI_BRIDGE_IOHIGH_BASE_MASK;
246 limith = (iohigh >> PCI_BRIDGE_IOHIGH_LIMIT_SHIFT) & PCI_BRIDGE_IOHIGH_LIMIT_MASK;
247
248 baser |= baseh << 4;
249 limitr |= limith << 4;
250 }
251
252 /* XXX check with the PCI standard */
253 if (baser > limitr)
254 return true;
255
256 pal.pal_addr = baser * fourkb;
257 pal.pal_size = (limitr - baser + 1) * fourkb;
258
259 aprint_debug("%s: %d.%d.%d window at %" PRIx64 " size %" PRIx64 "\n",
260 __func__, bus, dev, fun, pal.pal_addr, pal.pal_size);
261
262 return io_range_extend(ric, &pal);
263 }
264
265 static bool
266 io_range_extend_by_cbwin(struct range_infer_ctx *ric,
267 int bus, int dev, int fun, int ofs, pcireg_t base0, pcireg_t limit0)
268 {
269 pcireg_t base, limit;
270 pci_alloc_reg_t *r;
271 pci_alloc_t pal = {
272 .pal_flags = 0
273 , .pal_space = PCI_ALLOC_SPACE_IO
274 , .pal_type = PCI_ALLOC_REGTYPE_CBWIN
275 , .pal_reg = {{
276 .r_mask = ~(pcireg_t)0
277 }, {
278 .r_mask = ~(pcireg_t)0
279 }}
280 };
281
282 r = &pal.pal_reg[0];
283
284 pal.pal_tag = pci_make_tag(ric->ric_pc, bus, dev, fun);
285 r[0].r_ofs = ofs;
286 r[0].r_val = base0;
287 r[1].r_ofs = ofs + 4;
288 r[1].r_val = limit0;
289
290 base = base0 & __BITS(31, 2);
291 limit = limit0 & __BITS(31, 2);
292
293 if (base > limit)
294 return true;
295
296 pal.pal_addr = base;
297 pal.pal_size = limit - base + 4; /* XXX */
298
299 aprint_debug("%s: %d.%d.%d window at %" PRIx64 " size %" PRIx64 "\n",
300 __func__, bus, dev, fun, pal.pal_addr, pal.pal_size);
301
302 return io_range_extend(ric, &pal);
303 }
304
305 static void
306 io_range_infer(pci_chipset_tag_t pc, pcitag_t tag, void *ctx)
307 {
308 struct range_infer_ctx *ric = ctx;
309 pcireg_t bhlcr, limit, io;
310 int bar, bus, dev, fun, hdrtype, nbar;
311 bool ok = true;
312
313 bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG);
314
315 hdrtype = PCI_HDRTYPE_TYPE(bhlcr);
316
317 pci_decompose_tag(pc, tag, &bus, &dev, &fun);
318
319 switch (hdrtype) {
320 case PCI_HDRTYPE_PPB:
321 nbar = 2;
322 /* Extract I/O windows */
323 ok = ok && io_range_extend_by_win(ric, bus, dev, fun,
324 PCI_BRIDGE_STATIO_REG,
325 PCI_BRIDGE_IOHIGH_REG,
326 pci_conf_read(pc, tag, PCI_BRIDGE_STATIO_REG),
327 pci_conf_read(pc, tag, PCI_BRIDGE_IOHIGH_REG));
328 ok = ok && io_range_extend_by_vga_enable(ric, bus, dev, fun,
329 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG),
330 pci_conf_read(pc, tag, PCI_BRIDGE_CONTROL_REG));
331 break;
332 case PCI_HDRTYPE_PCB:
333 /* Extract I/O windows */
334 io = pci_conf_read(pc, tag, PCI_CB_IOBASE0);
335 limit = pci_conf_read(pc, tag, PCI_CB_IOLIMIT0);
336 ok = ok && io_range_extend_by_cbwin(ric, bus, dev, fun,
337 PCI_CB_IOBASE0, io, limit);
338 io = pci_conf_read(pc, tag, PCI_CB_IOBASE1);
339 limit = pci_conf_read(pc, tag, PCI_CB_IOLIMIT1);
340 ok = ok && io_range_extend_by_cbwin(ric, bus, dev, fun,
341 PCI_CB_IOBASE1, io, limit);
342 nbar = 1;
343 break;
344 case PCI_HDRTYPE_DEVICE:
345 nbar = 6;
346 break;
347 default:
348 aprint_debug("%s: unknown header type %d at %d.%d.%d\n",
349 __func__, hdrtype, bus, dev, fun);
350 return;
351 }
352
353 for (bar = 0; bar < nbar; bar++) {
354 pcireg_t basebar, sizebar;
355
356 basebar = pci_conf_read(pc, tag, PCI_BAR(bar));
357 pci_conf_write(pc, tag, PCI_BAR(bar), 0xffffffff);
358 sizebar = pci_conf_read(pc, tag, PCI_BAR(bar));
359 pci_conf_write(pc, tag, PCI_BAR(bar), basebar);
360
361 if (sizebar == 0)
362 continue;
363 if (PCI_MAPREG_TYPE(sizebar) != PCI_MAPREG_TYPE_IO)
364 continue;
365
366 ok = ok && io_range_extend_by_bar(ric, bus, dev, fun,
367 PCI_BAR(bar), basebar, sizebar);
368 }
369 if (!ok) {
370 aprint_verbose("I/O range inference failed at PCI %d.%d.%d\n",
371 bus, dev, fun);
372 }
373 }
374 #endif
375
376 static bool
377 mmio_range_extend(struct range_infer_ctx *ric, const pci_alloc_t *pal)
378 {
379 if (ric->ric_mmio_bottom > pal->pal_addr)
380 ric->ric_mmio_bottom = pal->pal_addr;
381 if (ric->ric_mmio_top < pal->pal_addr + pal->pal_size)
382 ric->ric_mmio_top = pal->pal_addr + pal->pal_size;
383
384 return pci_alloc_linkdup(&ric->ric_pals, pal);
385 }
386
387 static bool
388 mmio_range_extend_by_bar(struct range_infer_ctx *ric, int bus, int dev, int fun,
389 int ofs, pcireg_t curbar, pcireg_t sizebar)
390 {
391 int type;
392 bool prefetchable;
393 pci_alloc_reg_t *r;
394 pci_alloc_t pal = {
395 .pal_flags = 0
396 , .pal_space = PCI_ALLOC_SPACE_MEM
397 , .pal_type = PCI_ALLOC_REGTYPE_BAR
398 , .pal_reg = {{
399 .r_mask = ~(pcireg_t)0
400 }}
401 };
402
403 r = &pal.pal_reg[0];
404
405 pal.pal_tag = pci_make_tag(ric->ric_pc, bus, dev, fun);
406 r->r_ofs = ofs;
407 r->r_val = curbar;
408
409 pal.pal_addr = PCI_MAPREG_MEM_ADDR(curbar);
410
411 type = PCI_MAPREG_MEM_TYPE(curbar);
412 prefetchable = PCI_MAPREG_MEM_PREFETCHABLE(curbar);
413
414 if (prefetchable)
415 pal.pal_flags |= PCI_ALLOC_F_PREFETCHABLE;
416
417 switch (type) {
418 case PCI_MAPREG_MEM_TYPE_32BIT:
419 pal.pal_size = PCI_MAPREG_MEM_SIZE(sizebar);
420 break;
421 case PCI_MAPREG_MEM_TYPE_64BIT:
422 pal.pal_size = PCI_MAPREG_MEM64_SIZE(sizebar);
423 break;
424 case PCI_MAPREG_MEM_TYPE_32BIT_1M:
425 default:
426 aprint_debug("%s: ignored memory type %d at %d.%d.%d\n",
427 __func__, type, bus, dev, fun);
428 return false;
429 }
430
431 aprint_debug("%s: %d.%d.%d base at %" PRIx64 " size %" PRIx64 "\n",
432 __func__, bus, dev, fun, pal.pal_addr, pal.pal_size);
433
434 return (pal.pal_size == 0) || mmio_range_extend(ric, &pal);
435 }
436
437 static bool
438 mmio_range_extend_by_vga_enable(struct range_infer_ctx *ric,
439 int bus, int dev, int fun, pcireg_t csr, pcireg_t bcr)
440 {
441 pci_alloc_reg_t *r;
442 pci_alloc_t tpal = {
443 .pal_flags = PCI_ALLOC_F_PREFETCHABLE /* XXX a guess */
444 , .pal_space = PCI_ALLOC_SPACE_MEM
445 , .pal_type = PCI_ALLOC_REGTYPE_VGA_EN
446 , .pal_reg = {{
447 .r_ofs = PCI_COMMAND_STATUS_REG
448 , .r_mask = PCI_COMMAND_MEM_ENABLE
449 }, {
450 .r_ofs = PCI_BRIDGE_CONTROL_REG
451 , .r_mask =
452 PCI_BRIDGE_CONTROL_VGA << PCI_BRIDGE_CONTROL_SHIFT
453 }}
454 }, pal;
455
456 aprint_debug("%s: %d.%d.%d enter\n", __func__, bus, dev, fun);
457
458 if ((csr & PCI_COMMAND_MEM_ENABLE) == 0 ||
459 (bcr & (PCI_BRIDGE_CONTROL_VGA << PCI_BRIDGE_CONTROL_SHIFT)) == 0) {
460 aprint_debug("%s: %d.%d.%d memory or VGA disabled\n",
461 __func__, bus, dev, fun);
462 return true;
463 }
464
465 r = &tpal.pal_reg[0];
466 tpal.pal_tag = pci_make_tag(ric->ric_pc, bus, dev, fun);
467 r[0].r_val = csr;
468 r[1].r_val = bcr;
469
470 pal = tpal;
471
472 pal.pal_addr = 0xa0000;
473 pal.pal_size = 0xbffff - 0xa0000 + 1;
474
475 return mmio_range_extend(ric, &pal);
476 }
477
478 static bool
479 mmio_range_extend_by_win(struct range_infer_ctx *ric,
480 int bus, int dev, int fun, int ofs, pcireg_t mem)
481 {
482 const int onemeg = 1024 * 1024;
483 pcireg_t baser, limitr;
484 pci_alloc_reg_t *r;
485 pci_alloc_t pal = {
486 .pal_flags = 0
487 , .pal_space = PCI_ALLOC_SPACE_MEM
488 , .pal_type = PCI_ALLOC_REGTYPE_WIN
489 , .pal_reg = {{
490 .r_mask = ~(pcireg_t)0
491 }}
492 };
493
494 r = &pal.pal_reg[0];
495
496 pal.pal_tag = pci_make_tag(ric->ric_pc, bus, dev, fun);
497 r->r_ofs = ofs;
498 r->r_val = mem;
499
500 baser = (mem >> PCI_BRIDGE_MEMORY_BASE_SHIFT) &
501 PCI_BRIDGE_MEMORY_BASE_MASK;
502 limitr = (mem >> PCI_BRIDGE_MEMORY_LIMIT_SHIFT) &
503 PCI_BRIDGE_MEMORY_LIMIT_MASK;
504
505 /* XXX check with the PCI standard */
506 if (baser > limitr || limitr == 0)
507 return true;
508
509 pal.pal_addr = baser * onemeg;
510 pal.pal_size = (limitr - baser + 1) * onemeg;
511
512 aprint_debug("%s: %d.%d.%d window at %" PRIx64 " size %" PRIx64 "\n",
513 __func__, bus, dev, fun, pal.pal_addr, pal.pal_size);
514
515 return mmio_range_extend(ric, &pal);
516 }
517
518 static bool
519 mmio_range_extend_by_prememwin(struct range_infer_ctx *ric,
520 int bus, int dev, int fun, int ofs, pcireg_t mem,
521 int hibaseofs, pcireg_t hibase,
522 int hilimitofs, pcireg_t hilimit)
523 {
524 const int onemeg = 1024 * 1024;
525 uint64_t baser, limitr;
526 pci_alloc_reg_t *r;
527 pci_alloc_t pal = {
528 .pal_flags = PCI_ALLOC_F_PREFETCHABLE
529 , .pal_space = PCI_ALLOC_SPACE_MEM
530 , .pal_type = PCI_ALLOC_REGTYPE_WIN
531 , .pal_reg = {{
532 .r_mask = ~(pcireg_t)0
533 }}
534 };
535
536 r = &pal.pal_reg[0];
537
538 pal.pal_tag = pci_make_tag(ric->ric_pc, bus, dev, fun);
539 r[0].r_ofs = ofs;
540 r[0].r_val = mem;
541
542 baser = (mem >> PCI_BRIDGE_PREFETCHMEM_BASE_SHIFT) &
543 PCI_BRIDGE_PREFETCHMEM_BASE_MASK;
544 limitr = (mem >> PCI_BRIDGE_PREFETCHMEM_LIMIT_SHIFT) &
545 PCI_BRIDGE_PREFETCHMEM_LIMIT_MASK;
546
547 if (PCI_BRIDGE_PREFETCHMEM_64BITS(mem)) {
548 r[1].r_mask = r[2].r_mask = ~(pcireg_t)0;
549 r[1].r_ofs = hibaseofs;
550 r[1].r_val = hibase;
551 r[2].r_ofs = hilimitofs;
552 r[2].r_val = hilimit;
553
554 baser |= hibase << 12;
555 limitr |= hibase << 12;
556 }
557
558 /* XXX check with the PCI standard */
559 if (baser > limitr || limitr == 0)
560 return true;
561
562 pal.pal_addr = baser * onemeg;
563 pal.pal_size = (limitr - baser + 1) * onemeg;
564
565 aprint_debug("%s: %d.%d.%d window at %" PRIx64 " size %" PRIx64 "\n",
566 __func__, bus, dev, fun, pal.pal_addr, pal.pal_size);
567
568 return mmio_range_extend(ric, &pal);
569 }
570
571 static bool
572 mmio_range_extend_by_cbwin(struct range_infer_ctx *ric,
573 int bus, int dev, int fun, int ofs, pcireg_t base, pcireg_t limit,
574 bool prefetchable)
575 {
576 pci_alloc_reg_t *r;
577 pci_alloc_t pal = {
578 .pal_flags = 0
579 , .pal_space = PCI_ALLOC_SPACE_MEM
580 , .pal_type = PCI_ALLOC_REGTYPE_CBWIN
581 , .pal_reg = {{
582 .r_mask = ~(pcireg_t)0
583 }, {
584 .r_mask = ~(pcireg_t)0
585 }}
586 };
587
588 r = &pal.pal_reg[0];
589
590 if (prefetchable)
591 pal.pal_flags |= PCI_ALLOC_F_PREFETCHABLE;
592
593 pal.pal_tag = pci_make_tag(ric->ric_pc, bus, dev, fun);
594 r[0].r_ofs = ofs;
595 r[0].r_val = base;
596 r[1].r_ofs = ofs + 4;
597 r[1].r_val = limit;
598
599 if (base > limit)
600 return true;
601
602 if (limit == 0)
603 return true;
604
605 pal.pal_addr = base;
606 pal.pal_size = limit - base + 4096;
607
608 aprint_debug("%s: %d.%d.%d window at %" PRIx64 " size %" PRIx64 "\n",
609 __func__, bus, dev, fun, pal.pal_addr, pal.pal_size);
610
611 return mmio_range_extend(ric, &pal);
612 }
613
614 static void
615 mmio_range_infer(pci_chipset_tag_t pc, pcitag_t tag, void *ctx)
616 {
617 struct range_infer_ctx *ric = ctx;
618 pcireg_t bcr, bhlcr, limit, mem, premem, hiprebase, hiprelimit;
619 int bar, bus, dev, fun, hdrtype, nbar;
620 bool ok = true;
621
622 bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG);
623
624 hdrtype = PCI_HDRTYPE_TYPE(bhlcr);
625
626 pci_decompose_tag(pc, tag, &bus, &dev, &fun);
627
628 switch (hdrtype) {
629 case PCI_HDRTYPE_PPB:
630 nbar = 2;
631 /* Extract memory windows */
632 ok = ok && mmio_range_extend_by_win(ric, bus, dev, fun,
633 PCI_BRIDGE_MEMORY_REG,
634 pci_conf_read(pc, tag, PCI_BRIDGE_MEMORY_REG));
635 premem = pci_conf_read(pc, tag, PCI_BRIDGE_PREFETCHMEM_REG);
636 if (PCI_BRIDGE_PREFETCHMEM_64BITS(premem)) {
637 aprint_debug("%s: 64-bit prefetchable memory window "
638 "at %d.%d.%d\n", __func__, bus, dev, fun);
639 hiprebase = pci_conf_read(pc, tag,
640 PCI_BRIDGE_PREFETCHBASE32_REG);
641 hiprelimit = pci_conf_read(pc, tag,
642 PCI_BRIDGE_PREFETCHLIMIT32_REG);
643 } else
644 hiprebase = hiprelimit = 0;
645 ok = ok &&
646 mmio_range_extend_by_prememwin(ric, bus, dev, fun,
647 PCI_BRIDGE_PREFETCHMEM_REG, premem,
648 PCI_BRIDGE_PREFETCHBASE32_REG, hiprebase,
649 PCI_BRIDGE_PREFETCHLIMIT32_REG, hiprelimit) &&
650 mmio_range_extend_by_vga_enable(ric, bus, dev, fun,
651 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG),
652 pci_conf_read(pc, tag, PCI_BRIDGE_CONTROL_REG));
653 break;
654 case PCI_HDRTYPE_PCB:
655 /* Extract memory windows */
656 bcr = pci_conf_read(pc, tag, PCI_BRIDGE_CONTROL_REG);
657 mem = pci_conf_read(pc, tag, PCI_CB_MEMBASE0);
658 limit = pci_conf_read(pc, tag, PCI_CB_MEMLIMIT0);
659 ok = ok && mmio_range_extend_by_cbwin(ric, bus, dev, fun,
660 PCI_CB_MEMBASE0, mem, limit,
661 (bcr & CB_BCR_PREFETCH_MEMWIN0) != 0);
662 mem = pci_conf_read(pc, tag, PCI_CB_MEMBASE1);
663 limit = pci_conf_read(pc, tag, PCI_CB_MEMLIMIT1);
664 ok = ok && mmio_range_extend_by_cbwin(ric, bus, dev, fun,
665 PCI_CB_MEMBASE1, mem, limit,
666 (bcr & CB_BCR_PREFETCH_MEMWIN1) != 0);
667 nbar = 1;
668 break;
669 case PCI_HDRTYPE_DEVICE:
670 nbar = 6;
671 break;
672 default:
673 aprint_debug("%s: unknown header type %d at %d.%d.%d\n",
674 __func__, hdrtype, bus, dev, fun);
675 return;
676 }
677
678 for (bar = 0; bar < nbar; bar++) {
679 pcireg_t basebar, sizebar;
680
681 basebar = pci_conf_read(pc, tag, PCI_BAR(bar));
682 pci_conf_write(pc, tag, PCI_BAR(bar), 0xffffffff);
683 sizebar = pci_conf_read(pc, tag, PCI_BAR(bar));
684 pci_conf_write(pc, tag, PCI_BAR(bar), basebar);
685
686 if (sizebar == 0)
687 continue;
688 if (PCI_MAPREG_TYPE(sizebar) != PCI_MAPREG_TYPE_MEM)
689 continue;
690
691 ok = ok && mmio_range_extend_by_bar(ric, bus, dev, fun,
692 PCI_BAR(bar), basebar, sizebar);
693 }
694 if (!ok) {
695 aprint_verbose("MMIO range inference failed at PCI %d.%d.%d\n",
696 bus, dev, fun);
697 }
698 }
699
700 static const char *
701 pci_alloc_regtype_string(const pci_alloc_regtype_t t)
702 {
703 switch (t) {
704 case PCI_ALLOC_REGTYPE_BAR:
705 return "bar";
706 case PCI_ALLOC_REGTYPE_WIN:
707 case PCI_ALLOC_REGTYPE_CBWIN:
708 return "window";
709 case PCI_ALLOC_REGTYPE_VGA_EN:
710 return "vga-enable";
711 default:
712 return "<unknown>";
713 }
714 }
715
716 static void
717 pci_alloc_print(pci_chipset_tag_t pc, const pci_alloc_t *pal)
718 {
719 int bus, dev, fun;
720 const pci_alloc_reg_t *r;
721
722 pci_decompose_tag(pc, pal->pal_tag, &bus, &dev, &fun);
723 r = &pal->pal_reg[0];
724
725 aprint_normal("%s range [0x%08" PRIx64 ", 0x%08" PRIx64 ")"
726 " at %d.%d.%d %s%s 0x%02x\n",
727 (pal->pal_space == PCI_ALLOC_SPACE_IO) ? "IO" : "MMIO",
728 pal->pal_addr, pal->pal_addr + pal->pal_size,
729 bus, dev, fun,
730 (pal->pal_flags & PCI_ALLOC_F_PREFETCHABLE) ? "prefetchable " : "",
731 pci_alloc_regtype_string(pal->pal_type),
732 r->r_ofs);
733 }
734
735 prop_dictionary_t pci_rsrc_dict = NULL;
736
737 static bool
738 pci_range_record(pci_chipset_tag_t pc, prop_array_t rsvns,
739 pci_alloc_list_t *pals, pci_alloc_space_t space)
740 {
741 int bus, dev, fun, i;
742 prop_array_t regs;
743 prop_dictionary_t reg;
744 const pci_alloc_t *pal;
745 const pci_alloc_reg_t *r;
746 prop_dictionary_t rsvn;
747
748 TAILQ_FOREACH(pal, pals, pal_link) {
749 bool ok = true;
750
751 r = &pal->pal_reg[0];
752
753 if (pal->pal_space != space)
754 continue;
755
756 if ((rsvn = prop_dictionary_create()) == NULL)
757 return false;
758
759 if ((regs = prop_array_create()) == NULL) {
760 prop_object_release(rsvn);
761 return false;
762 }
763
764 if (!prop_dictionary_set(rsvn, "regs", regs)) {
765 prop_object_release(rsvn);
766 prop_object_release(regs);
767 return false;
768 }
769
770 for (i = 0; i < __arraycount(pal->pal_reg); i++) {
771 r = &pal->pal_reg[i];
772
773 if (r->r_mask == 0)
774 break;
775
776 ok = (reg = prop_dictionary_create()) != NULL;
777 if (!ok)
778 break;
779
780 ok = prop_dictionary_set_uint16(reg, "offset",
781 r->r_ofs) &&
782 prop_dictionary_set_uint32(reg, "val", r->r_val) &&
783 prop_dictionary_set_uint32(reg, "mask",
784 r->r_mask) && prop_array_add(regs, reg);
785 if (!ok) {
786 prop_object_release(reg);
787 break;
788 }
789 }
790
791 pci_decompose_tag(pc, pal->pal_tag, &bus, &dev, &fun);
792
793 ok = ok &&
794 prop_dictionary_set_cstring_nocopy(rsvn, "type",
795 pci_alloc_regtype_string(pal->pal_type)) &&
796 prop_dictionary_set_uint64(rsvn, "address",
797 pal->pal_addr) &&
798 prop_dictionary_set_uint64(rsvn, "size", pal->pal_size) &&
799 prop_dictionary_set_uint8(rsvn, "bus", bus) &&
800 prop_dictionary_set_uint8(rsvn, "device", dev) &&
801 prop_dictionary_set_uint8(rsvn, "function", fun) &&
802 prop_array_add(rsvns, rsvn);
803 prop_object_release(rsvn);
804 if (!ok)
805 return false;
806 }
807 return true;
808 }
809
810 prop_dictionary_t
811 pci_rsrc_filter(prop_dictionary_t rsrcs0,
812 bool (*predicate)(void *, prop_dictionary_t), void *arg)
813 {
814 int i, space;
815 prop_dictionary_t rsrcs;
816 prop_array_t rsvns;
817 ppath_t *op, *p;
818
819 if ((rsrcs = prop_dictionary_copy(rsrcs0)) == NULL)
820 return NULL;
821
822 for (space = 0; space < 2; space++) {
823 op = p = ppath_create();
824 p = ppath_push_key(p, (space == 0) ? "memory" : "io");
825 p = ppath_push_key(p, "bios-reservations");
826 if (p == NULL) {
827 ppath_release(op);
828 return NULL;
829 }
830 if ((rsvns = ppath_lookup(rsrcs0, p)) == NULL) {
831 printf("%s: reservations not found\n", __func__);
832 ppath_release(p);
833 return NULL;
834 }
835 for (i = prop_array_count(rsvns); --i >= 0; ) {
836 prop_dictionary_t rsvn;
837
838 if ((p = ppath_push_idx(p, i)) == NULL) {
839 printf("%s: ppath_push_idx\n", __func__);
840 ppath_release(op);
841 prop_object_release(rsrcs);
842 return NULL;
843 }
844
845 rsvn = ppath_lookup(rsrcs0, p);
846
847 KASSERT(rsvn != NULL);
848
849 if (!(*predicate)(arg, rsvn)) {
850 ppath_copydel_object((prop_object_t)rsrcs0,
851 (prop_object_t *)&rsrcs, p);
852 }
853
854 if ((p = ppath_pop(p, NULL)) == NULL) {
855 printf("%s: ppath_pop\n", __func__);
856 ppath_release(p);
857 prop_object_release(rsrcs);
858 return NULL;
859 }
860 }
861 ppath_release(op);
862 }
863 return rsrcs;
864 }
865
866 void
867 pci_ranges_infer(pci_chipset_tag_t pc, int minbus, int maxbus,
868 bus_addr_t *iobasep, bus_size_t *iosizep,
869 bus_addr_t *membasep, bus_size_t *memsizep)
870 {
871 prop_dictionary_t iodict = NULL, memdict = NULL;
872 prop_array_t iorsvns, memrsvns;
873 struct range_infer_ctx ric = {
874 .ric_io_bottom = ~((bus_addr_t)0)
875 , .ric_io_top = 0
876 , .ric_mmio_bottom = ~((bus_addr_t)0)
877 , .ric_mmio_top = 0
878 , .ric_pals = TAILQ_HEAD_INITIALIZER(ric.ric_pals)
879 };
880 const pci_alloc_t *pal;
881
882 ric.ric_pc = pc;
883 pci_device_foreach_min(pc, minbus, maxbus, mmio_range_infer, &ric);
884 pci_device_foreach_min(pc, minbus, maxbus, io_range_infer, &ric);
885 if (membasep != NULL)
886 *membasep = ric.ric_mmio_bottom;
887 if (memsizep != NULL)
888 *memsizep = ric.ric_mmio_top - ric.ric_mmio_bottom;
889 if (iobasep != NULL)
890 *iobasep = ric.ric_io_bottom;
891 if (iosizep != NULL)
892 *iosizep = ric.ric_io_top - ric.ric_io_bottom;
893 aprint_verbose("%s: inferred %" PRIuMAX
894 " bytes of memory-mapped PCI space at 0x%" PRIxMAX "\n", __func__,
895 (uintmax_t)(ric.ric_mmio_top - ric.ric_mmio_bottom),
896 (uintmax_t)ric.ric_mmio_bottom);
897 aprint_verbose("%s: inferred %" PRIuMAX
898 " bytes of PCI I/O space at 0x%" PRIxMAX "\n", __func__,
899 (uintmax_t)(ric.ric_io_top - ric.ric_io_bottom),
900 (uintmax_t)ric.ric_io_bottom);
901 TAILQ_FOREACH(pal, &ric.ric_pals, pal_link)
902 pci_alloc_print(pc, pal);
903
904 if ((memdict = prop_dictionary_create()) == NULL) {
905 aprint_error("%s: could not create PCI MMIO "
906 "resources dictionary\n", __func__);
907 } else if ((memrsvns = prop_array_create()) == NULL) {
908 aprint_error("%s: could not create PCI BIOS memory "
909 "reservations array\n", __func__);
910 } else if (!prop_dictionary_set(memdict, "bios-reservations",
911 memrsvns)) {
912 aprint_error("%s: could not record PCI BIOS memory "
913 "reservations array\n", __func__);
914 } else if (!pci_range_record(pc, memrsvns, &ric.ric_pals,
915 PCI_ALLOC_SPACE_MEM)) {
916 aprint_error("%s: could not record PCI BIOS memory "
917 "reservations\n", __func__);
918 } else if (!prop_dictionary_set_uint64(memdict,
919 "start", ric.ric_mmio_bottom) ||
920 !prop_dictionary_set_uint64(memdict, "size",
921 ric.ric_mmio_top - ric.ric_mmio_bottom)) {
922 aprint_error("%s: could not record PCI memory min & max\n",
923 __func__);
924 } else if ((iodict = prop_dictionary_create()) == NULL) {
925 aprint_error("%s: could not create PCI I/O "
926 "resources dictionary\n", __func__);
927 } else if ((iorsvns = prop_array_create()) == NULL) {
928 aprint_error("%s: could not create PCI BIOS I/O "
929 "reservations array\n", __func__);
930 } else if (!prop_dictionary_set(iodict, "bios-reservations",
931 iorsvns)) {
932 aprint_error("%s: could not record PCI BIOS I/O "
933 "reservations array\n", __func__);
934 } else if (!pci_range_record(pc, iorsvns, &ric.ric_pals,
935 PCI_ALLOC_SPACE_IO)) {
936 aprint_error("%s: could not record PCI BIOS I/O "
937 "reservations\n", __func__);
938 } else if (!prop_dictionary_set_uint64(iodict,
939 "start", ric.ric_io_bottom) ||
940 !prop_dictionary_set_uint64(iodict, "size",
941 ric.ric_io_top - ric.ric_io_bottom)) {
942 aprint_error("%s: could not record PCI I/O min & max\n",
943 __func__);
944 } else if ((pci_rsrc_dict = prop_dictionary_create()) == NULL) {
945 aprint_error("%s: could not create PCI resources dictionary\n",
946 __func__);
947 } else if (!prop_dictionary_set(pci_rsrc_dict, "memory", memdict) ||
948 !prop_dictionary_set(pci_rsrc_dict, "io", iodict)) {
949 aprint_error("%s: could not record PCI memory- or I/O-"
950 "resources dictionary\n", __func__);
951 prop_object_release(pci_rsrc_dict);
952 pci_rsrc_dict = NULL;
953 }
954
955 if (iodict != NULL)
956 prop_object_release(iodict);
957 if (memdict != NULL)
958 prop_object_release(memdict);
959 /* XXX release iorsvns, memrsvns */
960 }
961