linux_pci.c revision 1.29 1 /* $NetBSD: linux_pci.c,v 1.29 2024/06/23 00:53:48 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2013 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #ifdef _KERNEL_OPT
33 #include "acpica.h"
34 #include "opt_pci.h"
35 #endif
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: linux_pci.c,v 1.29 2024/06/23 00:53:48 riastradh Exp $");
39
40 #if NACPICA > 0
41 #include <dev/acpi/acpivar.h>
42 #include <dev/acpi/acpi_pci.h>
43 #endif
44
45 #include <linux/pci.h>
46
47 #include <drm/drm_agp_netbsd.h>
48
49 device_t
50 pci_dev_dev(struct pci_dev *pdev)
51 {
52
53 return pdev->pd_dev;
54 }
55
56 void
57 pci_set_drvdata(struct pci_dev *pdev, void *drvdata)
58 {
59 pdev->pd_drvdata = drvdata;
60 }
61
62 void *
63 pci_get_drvdata(struct pci_dev *pdev)
64 {
65 return pdev->pd_drvdata;
66 }
67
68 const char *
69 pci_name(struct pci_dev *pdev)
70 {
71
72 /* XXX not sure this has the right format */
73 return device_xname(pci_dev_dev(pdev));
74 }
75
76 /*
77 * Setup enough of a parent that we can access config space.
78 * This is gross and grovels pci(4) and ppb(4) internals.
79 */
80 static struct pci_dev *
81 alloc_fake_parent_device(device_t parent, const struct pci_attach_args *pa)
82 {
83
84 if (parent == NULL || !device_is_a(parent, "pci"))
85 return NULL;
86
87 device_t pparent = device_parent(parent);
88 if (pparent == NULL || !device_is_a(pparent, "ppb"))
89 return NULL;
90
91 struct pci_softc *pcisc = device_private(parent);
92 struct ppb_softc *ppbsc = device_private(pparent);
93
94 struct pci_dev *parentdev = kmem_zalloc(sizeof(*parentdev), KM_SLEEP);
95
96 /* Copy this device's pci_attach_args{} as a base-line. */
97 struct pci_attach_args *npa = &parentdev->pd_pa;
98 *npa = *pa;
99
100 /* Now update with stuff found in parent. */
101 npa->pa_iot = pcisc->sc_iot;
102 npa->pa_memt = pcisc->sc_memt;
103 npa->pa_dmat = pcisc->sc_dmat;
104 npa->pa_dmat64 = pcisc->sc_dmat64;
105 npa->pa_pc = pcisc->sc_pc;
106 npa->pa_flags = 0; /* XXX? */
107
108 /* Copy the parent tag, and read some info about it. */
109 npa->pa_tag = ppbsc->sc_tag;
110 pcireg_t id = pci_conf_read(npa->pa_pc, npa->pa_tag, PCI_ID_REG);
111 pcireg_t subid = pci_conf_read(npa->pa_pc, npa->pa_tag,
112 PCI_SUBSYS_ID_REG);
113 pcireg_t class = pci_conf_read(npa->pa_pc, npa->pa_tag, PCI_CLASS_REG);
114
115 /*
116 * Fill in as much of pci_attach_args and pci_dev as reasonably possible.
117 * Most of this is not used currently.
118 */
119 int bus, device, function;
120 pci_decompose_tag(npa->pa_pc, npa->pa_tag, &bus, &device, &function);
121 npa->pa_device = device;
122 npa->pa_function = function;
123 npa->pa_bus = bus;
124 npa->pa_id = id;
125 npa->pa_class = class;
126 npa->pa_intrswiz = pcisc->sc_intrswiz;
127 npa->pa_intrtag = pcisc->sc_intrtag;
128 npa->pa_intrpin = PCI_INTERRUPT_PIN_NONE;
129
130 parentdev->pd_dev = parent;
131
132 parentdev->bus = NULL;
133 parentdev->devfn = device << 3 | function;
134 parentdev->vendor = PCI_VENDOR(id);
135 parentdev->device = PCI_PRODUCT(id);
136 parentdev->subsystem_vendor = PCI_SUBSYS_VENDOR(subid);
137 parentdev->subsystem_device = PCI_SUBSYS_ID(subid);
138 parentdev->revision = PCI_REVISION(class);
139 parentdev->class = __SHIFTOUT(class, 0xffffff00UL); /* ? */
140
141 return parentdev;
142 }
143
144 void
145 linux_pci_dev_init(struct pci_dev *pdev, device_t dev, device_t parent,
146 const struct pci_attach_args *pa, int kludges)
147 {
148 const uint32_t subsystem_id = pci_conf_read(pa->pa_pc, pa->pa_tag,
149 PCI_SUBSYS_ID_REG);
150 unsigned i;
151
152 memset(pdev, 0, sizeof(*pdev)); /* paranoia */
153
154 pdev->pd_pa = *pa;
155 pdev->pd_kludges = kludges;
156 pdev->pd_rom_vaddr = NULL;
157 pdev->pd_dev = dev;
158 #if (NACPICA > 0)
159 const int seg = pci_get_segment(pa->pa_pc);
160 pdev->pd_ad = acpi_pcidev_find(seg, pa->pa_bus,
161 pa->pa_device, pa->pa_function);
162 #else
163 pdev->pd_ad = NULL;
164 #endif
165 pdev->pd_saved_state = NULL;
166 pdev->pd_intr_handles = NULL;
167 pdev->pd_drvdata = NULL;
168 pdev->bus = kmem_zalloc(sizeof(*pdev->bus), KM_NOSLEEP);
169 pdev->bus->pb_pc = pa->pa_pc;
170 pdev->bus->pb_dev = parent;
171 pdev->bus->number = pa->pa_bus;
172 /*
173 * NetBSD doesn't have an easy "am I PCIe" or "give me PCIe speed
174 * from capability" function, but we already emulate the Linux
175 * versions that do.
176 */
177 if (pci_is_pcie(pdev)) {
178 pdev->bus->max_bus_speed = pcie_get_speed_cap(pdev);
179 } else {
180 /* XXX: Do AGP/PCI-X, etc.? */
181 pdev->bus->max_bus_speed = PCI_SPEED_UNKNOWN;
182 }
183 pdev->bus->self = alloc_fake_parent_device(parent, pa);
184 pdev->devfn = PCI_DEVFN(pa->pa_device, pa->pa_function);
185 pdev->vendor = PCI_VENDOR(pa->pa_id);
186 pdev->device = PCI_PRODUCT(pa->pa_id);
187 pdev->subsystem_vendor = PCI_SUBSYS_VENDOR(subsystem_id);
188 pdev->subsystem_device = PCI_SUBSYS_ID(subsystem_id);
189 pdev->revision = PCI_REVISION(pa->pa_class);
190 pdev->class = __SHIFTOUT(pa->pa_class, 0xffffff00UL); /* ? */
191
192 CTASSERT(__arraycount(pdev->pd_resources) == PCI_NUM_RESOURCES);
193 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
194 const int reg = PCI_BAR(i);
195
196 pdev->pd_resources[i].type = pci_mapreg_type(pa->pa_pc,
197 pa->pa_tag, reg);
198 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, reg,
199 pdev->pd_resources[i].type,
200 &pdev->pd_resources[i].addr,
201 &pdev->pd_resources[i].size,
202 &pdev->pd_resources[i].flags)) {
203 pdev->pd_resources[i].addr = 0;
204 pdev->pd_resources[i].size = 0;
205 pdev->pd_resources[i].flags = 0;
206 }
207 pdev->pd_resources[i].kva = NULL;
208 pdev->pd_resources[i].mapped = false;
209 }
210 }
211
212 int
213 pci_find_capability(struct pci_dev *pdev, int cap)
214 {
215
216 return pci_get_capability(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, cap,
217 NULL, NULL);
218 }
219
220 int
221 pci_read_config_dword(struct pci_dev *pdev, int reg, uint32_t *valuep)
222 {
223
224 KASSERT(!ISSET(reg, 3));
225 *valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg);
226 return 0;
227 }
228
229 int
230 pci_read_config_word(struct pci_dev *pdev, int reg, uint16_t *valuep)
231 {
232
233 KASSERT(!ISSET(reg, 1));
234 *valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
235 (reg &~ 2)) >> (8 * (reg & 2));
236 return 0;
237 }
238
239 int
240 pci_read_config_byte(struct pci_dev *pdev, int reg, uint8_t *valuep)
241 {
242
243 *valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
244 (reg &~ 3)) >> (8 * (reg & 3));
245 return 0;
246 }
247
248 int
249 pci_write_config_dword(struct pci_dev *pdev, int reg, uint32_t value)
250 {
251
252 KASSERT(!ISSET(reg, 3));
253 pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg, value);
254 return 0;
255 }
256
257 int
258 pci_bus_read_config_dword(struct pci_bus *bus, unsigned devfn, int reg,
259 uint32_t *valuep)
260 {
261 pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
262 PCI_FUNC(devfn));
263
264 KASSERT(!ISSET(reg, 1));
265 *valuep = pci_conf_read(bus->pb_pc, tag, reg & ~3) >> (8 * (reg & 3));
266 return 0;
267 }
268
269 int
270 pci_bus_read_config_word(struct pci_bus *bus, unsigned devfn, int reg,
271 uint16_t *valuep)
272 {
273 pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
274 PCI_FUNC(devfn));
275
276 KASSERT(!ISSET(reg, 1));
277 *valuep = pci_conf_read(bus->pb_pc, tag, reg &~ 2) >> (8 * (reg & 2));
278 return 0;
279 }
280
281 int
282 pci_bus_read_config_byte(struct pci_bus *bus, unsigned devfn, int reg,
283 uint8_t *valuep)
284 {
285 pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
286 PCI_FUNC(devfn));
287
288 *valuep = pci_conf_read(bus->pb_pc, tag, reg &~ 3) >> (8 * (reg & 3));
289 return 0;
290 }
291
292 int
293 pci_bus_write_config_dword(struct pci_bus *bus, unsigned devfn, int reg,
294 uint32_t value)
295 {
296 pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
297 PCI_FUNC(devfn));
298
299 KASSERT(!ISSET(reg, 3));
300 pci_conf_write(bus->pb_pc, tag, reg, value);
301 return 0;
302 }
303
304 static void
305 pci_rmw_config(pci_chipset_tag_t pc, pcitag_t tag, int reg, unsigned int bytes,
306 uint32_t value)
307 {
308 const uint32_t mask = ~((~0UL) << (8 * bytes));
309 const int reg32 = (reg &~ 3);
310 const unsigned int shift = (8 * (reg & 3));
311 uint32_t value32;
312
313 KASSERT(bytes <= 4);
314 KASSERT(!ISSET(value, ~mask));
315 value32 = pci_conf_read(pc, tag, reg32);
316 value32 &=~ (mask << shift);
317 value32 |= (value << shift);
318 pci_conf_write(pc, tag, reg32, value32);
319 }
320
321 int
322 pci_write_config_word(struct pci_dev *pdev, int reg, uint16_t value)
323 {
324
325 KASSERT(!ISSET(reg, 1));
326 pci_rmw_config(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg, 2, value);
327 return 0;
328 }
329
330 int
331 pci_write_config_byte(struct pci_dev *pdev, int reg, uint8_t value)
332 {
333
334 pci_rmw_config(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg, 1, value);
335 return 0;
336 }
337
338 int
339 pci_bus_write_config_word(struct pci_bus *bus, unsigned devfn, int reg,
340 uint16_t value)
341 {
342 pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
343 PCI_FUNC(devfn));
344
345 KASSERT(!ISSET(reg, 1));
346 pci_rmw_config(bus->pb_pc, tag, reg, 2, value);
347 return 0;
348 }
349
350 int
351 pci_bus_write_config_byte(struct pci_bus *bus, unsigned devfn, int reg,
352 uint8_t value)
353 {
354 pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
355 PCI_FUNC(devfn));
356
357 pci_rmw_config(bus->pb_pc, tag, reg, 1, value);
358 return 0;
359 }
360
361 int
362 pci_enable_msi(struct pci_dev *pdev)
363 {
364 const struct pci_attach_args *const pa = &pdev->pd_pa;
365
366 if (pci_msi_alloc_exact(pa, &pdev->pd_intr_handles, 1))
367 return -EINVAL;
368
369 pdev->msi_enabled = 1;
370 return 0;
371 }
372
373 void
374 pci_disable_msi(struct pci_dev *pdev __unused)
375 {
376 const struct pci_attach_args *const pa = &pdev->pd_pa;
377
378 if (pdev->pd_intr_handles != NULL) {
379 pci_intr_release(pa->pa_pc, pdev->pd_intr_handles, 1);
380 pdev->pd_intr_handles = NULL;
381 }
382 pdev->msi_enabled = 0;
383 }
384
385 void
386 pci_set_master(struct pci_dev *pdev)
387 {
388 pcireg_t csr;
389
390 csr = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
391 PCI_COMMAND_STATUS_REG);
392 csr |= PCI_COMMAND_MASTER_ENABLE;
393 pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
394 PCI_COMMAND_STATUS_REG, csr);
395 }
396
397 void
398 pci_clear_master(struct pci_dev *pdev)
399 {
400 pcireg_t csr;
401
402 csr = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
403 PCI_COMMAND_STATUS_REG);
404 csr &= ~(pcireg_t)PCI_COMMAND_MASTER_ENABLE;
405 pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
406 PCI_COMMAND_STATUS_REG, csr);
407 }
408
409 int
410 pcie_capability_read_dword(struct pci_dev *pdev, int reg, uint32_t *valuep)
411 {
412 pci_chipset_tag_t pc = pdev->pd_pa.pa_pc;
413 pcitag_t tag = pdev->pd_pa.pa_tag;
414 int off;
415
416 *valuep = 0;
417
418 /* Must have capabilities. */
419 if (pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS, &off, NULL) == 0)
420 return 1;
421
422 *valuep = pci_conf_read(pc, tag, off + reg);
423
424 return 0;
425 }
426
427 int
428 pcie_capability_read_word(struct pci_dev *pdev, int reg, uint16_t *valuep)
429 {
430 pci_chipset_tag_t pc = pdev->pd_pa.pa_pc;
431 pcitag_t tag = pdev->pd_pa.pa_tag;
432 int off;
433
434 *valuep = 0;
435
436 /* Must have capabilities. */
437 if (pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS, &off, NULL) == 0)
438 return 1;
439
440 *valuep = pci_conf_read(pc, tag, off + (reg &~ 2)) >> (8 * (reg & 2));
441
442 return 0;
443 }
444
445 int
446 pcie_capability_write_dword(struct pci_dev *pdev, int reg, uint32_t value)
447 {
448 pci_chipset_tag_t pc = pdev->pd_pa.pa_pc;
449 pcitag_t tag = pdev->pd_pa.pa_tag;
450 int off;
451
452 /* Must have capabilities. */
453 if (pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS, &off, NULL) == 0)
454 return 1;
455
456 pci_conf_write(pc, tag, off + reg, value);
457
458 return 0;
459 }
460
461 int
462 pcie_capability_write_word(struct pci_dev *pdev, int reg, uint16_t value)
463 {
464 pci_chipset_tag_t pc = pdev->pd_pa.pa_pc;
465 pcitag_t tag = pdev->pd_pa.pa_tag;
466 int off;
467
468 /* Must have capabilities. */
469 if (pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS, &off, NULL) == 0)
470 return 1;
471
472 pci_rmw_config(pc, tag, off + reg, 2, value);
473
474 return 0;
475 }
476
477 /* From PCIe 5.0 7.5.3.4 "Device Control Register" */
478 static const unsigned readrqmax[] = {
479 128,
480 256,
481 512,
482 1024,
483 2048,
484 4096,
485 };
486
487 int
488 pcie_get_readrq(struct pci_dev *pdev)
489 {
490 pci_chipset_tag_t pc = pdev->pd_pa.pa_pc;
491 pcitag_t tag = pdev->pd_pa.pa_tag;
492 unsigned val;
493 int off;
494
495 if (pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS, &off, NULL) == 0)
496 return -EINVAL; /* XXX NetBSD->Linux */
497
498 val = __SHIFTOUT(pci_conf_read(pc, tag, off + PCIE_DCSR),
499 PCIE_DCSR_MAX_READ_REQ);
500
501 if (val >= __arraycount(readrqmax))
502 val = 0;
503 return readrqmax[val];
504 }
505
506 int
507 pcie_set_readrq(struct pci_dev *pdev, int val)
508 {
509 pci_chipset_tag_t pc = pdev->pd_pa.pa_pc;
510 pcitag_t tag = pdev->pd_pa.pa_tag;
511 pcireg_t reg, newval = 0;
512 unsigned i;
513 int off;
514
515 for (i = 0; i < __arraycount(readrqmax); i++) {
516 if (readrqmax[i] == val) {
517 newval = i;
518 break;
519 }
520 }
521
522 if (i == __arraycount(readrqmax))
523 return -EINVAL;
524
525 if (pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS, &off, NULL) == 0)
526 return -EINVAL; /* XXX NetBSD->Linux */
527
528 reg = pci_conf_read(pc, tag, off + PCIE_DCSR);
529 reg &= ~PCIE_DCSR_MAX_READ_REQ | (newval << 12);
530 pci_conf_write(pc, tag, off + PCIE_DCSR, reg);
531
532 return 0;
533 }
534
535 bus_addr_t
536 pcibios_align_resource(void *p, const struct resource *resource,
537 bus_addr_t addr, bus_size_t size)
538 {
539 panic("pcibios_align_resource has accessed unaligned neurons!");
540 }
541
542 int
543 pci_bus_alloc_resource(struct pci_bus *bus, struct resource *resource,
544 bus_size_t size, bus_size_t align, bus_addr_t start, int type __unused,
545 bus_addr_t (*align_fn)(void *, const struct resource *, bus_addr_t,
546 bus_size_t) __unused,
547 struct pci_dev *pdev)
548 {
549 const struct pci_attach_args *const pa = &pdev->pd_pa;
550 bus_space_tag_t bst;
551 int error;
552
553 switch (resource->flags) {
554 case IORESOURCE_MEM:
555 bst = pa->pa_memt;
556 break;
557
558 case IORESOURCE_IO:
559 bst = pa->pa_iot;
560 break;
561
562 default:
563 panic("I don't know what kind of resource you want!");
564 }
565
566 resource->r_bst = bst;
567 error = bus_space_alloc(bst, start, __type_max(bus_addr_t),
568 size, align, 0, 0, &resource->start, &resource->r_bsh);
569 if (error)
570 return error;
571
572 resource->end = start + (size - 1);
573 return 0;
574 }
575
576 struct pci_domain_bus_and_slot {
577 int domain, bus, slot;
578 };
579
580 static int
581 pci_match_domain_bus_and_slot(void *cookie, const struct pci_attach_args *pa)
582 {
583 const struct pci_domain_bus_and_slot *C = cookie;
584
585 if (pci_get_segment(pa->pa_pc) != C->domain)
586 return 0;
587 if (pa->pa_bus != C->bus)
588 return 0;
589 if (PCI_DEVFN(pa->pa_device, pa->pa_function) != C->slot)
590 return 0;
591
592 return 1;
593 }
594
595 struct pci_dev *
596 pci_get_domain_bus_and_slot(int domain, int bus, int slot)
597 {
598 struct pci_attach_args pa;
599 struct pci_domain_bus_and_slot context = {domain, bus, slot},
600 *C = &context;
601
602 if (!pci_find_device1(&pa, &pci_match_domain_bus_and_slot, C))
603 return NULL;
604
605 struct pci_dev *const pdev = kmem_zalloc(sizeof(*pdev), KM_SLEEP);
606 linux_pci_dev_init(pdev, NULL, NULL, &pa, NBPCI_KLUDGE_GET_MUMBLE);
607
608 return pdev;
609 }
610
611 void
612 pci_dev_put(struct pci_dev *pdev)
613 {
614
615 if (pdev == NULL)
616 return;
617
618 KASSERT(ISSET(pdev->pd_kludges, NBPCI_KLUDGE_GET_MUMBLE));
619 kmem_free(pdev->bus, sizeof(*pdev->bus));
620 kmem_free(pdev, sizeof(*pdev));
621 }
622
623 struct pci_get_class_state {
624 uint32_t class_subclass_shifted;
625 const struct pci_dev *from;
626 };
627
628 static int
629 pci_get_class_match(void *cookie, const struct pci_attach_args *pa)
630 {
631 struct pci_get_class_state *C = cookie;
632
633 if (C->from) {
634 if ((pci_get_segment(C->from->pd_pa.pa_pc) ==
635 pci_get_segment(pa->pa_pc)) &&
636 C->from->pd_pa.pa_bus == pa->pa_bus &&
637 C->from->pd_pa.pa_device == pa->pa_device &&
638 C->from->pd_pa.pa_function == pa->pa_function)
639 C->from = NULL;
640 return 0;
641 }
642 if (C->class_subclass_shifted !=
643 (PCI_CLASS(pa->pa_class) << 8 | PCI_SUBCLASS(pa->pa_class)))
644 return 0;
645
646 return 1;
647 }
648
649 struct pci_dev *
650 pci_get_class(uint32_t class_subclass_shifted, struct pci_dev *from)
651 {
652 struct pci_get_class_state context = {class_subclass_shifted, from},
653 *C = &context;
654 struct pci_attach_args pa;
655 struct pci_dev *pdev = NULL;
656
657 if (!pci_find_device1(&pa, &pci_get_class_match, C))
658 goto out;
659 pdev = kmem_zalloc(sizeof(*pdev), KM_SLEEP);
660 linux_pci_dev_init(pdev, NULL, NULL, &pa, NBPCI_KLUDGE_GET_MUMBLE);
661
662 out: if (from)
663 pci_dev_put(from);
664 return pdev;
665 }
666
667 int
668 pci_dev_present(const struct pci_device_id *ids)
669 {
670
671 /* XXX implement me -- pci_find_device doesn't pass a cookie */
672 return 0;
673 }
674
675 void
676 pci_unmap_rom(struct pci_dev *pdev, void __pci_rom_iomem *vaddr __unused)
677 {
678
679 /* XXX Disable the ROM address decoder. */
680 KASSERT(ISSET(pdev->pd_kludges, NBPCI_KLUDGE_MAP_ROM));
681 KASSERT(vaddr == pdev->pd_rom_vaddr);
682 bus_space_unmap(pdev->pd_rom_bst, pdev->pd_rom_bsh, pdev->pd_rom_size);
683 pdev->pd_kludges &= ~NBPCI_KLUDGE_MAP_ROM;
684 pdev->pd_rom_vaddr = NULL;
685 }
686
687 /* XXX Whattakludge! Should move this in sys/arch/. */
688 static int
689 pci_map_rom_md(struct pci_dev *pdev)
690 {
691 #if defined(__i386__) || defined(__x86_64__) || defined(__ia64__)
692 const bus_addr_t rom_base = 0xc0000;
693 const bus_size_t rom_size = 0x20000;
694 bus_space_handle_t rom_bsh;
695 int error;
696
697 if (PCI_CLASS(pdev->pd_pa.pa_class) != PCI_CLASS_DISPLAY)
698 return ENXIO;
699 if (PCI_SUBCLASS(pdev->pd_pa.pa_class) != PCI_SUBCLASS_DISPLAY_VGA)
700 return ENXIO;
701 /* XXX Check whether this is the primary VGA card? */
702 error = bus_space_map(pdev->pd_pa.pa_memt, rom_base, rom_size,
703 (BUS_SPACE_MAP_LINEAR | BUS_SPACE_MAP_PREFETCHABLE), &rom_bsh);
704 if (error)
705 return ENXIO;
706
707 pdev->pd_rom_bst = pdev->pd_pa.pa_memt;
708 pdev->pd_rom_bsh = rom_bsh;
709 pdev->pd_rom_size = rom_size;
710 pdev->pd_kludges |= NBPCI_KLUDGE_MAP_ROM;
711
712 return 0;
713 #else
714 return ENXIO;
715 #endif
716 }
717
718 void __pci_rom_iomem *
719 pci_map_rom(struct pci_dev *pdev, size_t *sizep)
720 {
721
722 KASSERT(!ISSET(pdev->pd_kludges, NBPCI_KLUDGE_MAP_ROM));
723
724 if (pci_mapreg_map(&pdev->pd_pa, PCI_MAPREG_ROM, PCI_MAPREG_TYPE_ROM,
725 (BUS_SPACE_MAP_PREFETCHABLE | BUS_SPACE_MAP_LINEAR),
726 &pdev->pd_rom_bst, &pdev->pd_rom_bsh, NULL, &pdev->pd_rom_size)
727 != 0)
728 goto fail_mi;
729 pdev->pd_kludges |= NBPCI_KLUDGE_MAP_ROM;
730
731 /* XXX This type is obviously wrong in general... */
732 if (pci_find_rom(&pdev->pd_pa, pdev->pd_rom_bst, pdev->pd_rom_bsh,
733 pdev->pd_rom_size, PCI_ROM_CODE_TYPE_X86,
734 &pdev->pd_rom_found_bsh, &pdev->pd_rom_found_size)) {
735 pci_unmap_rom(pdev, NULL);
736 goto fail_mi;
737 }
738 goto success;
739
740 fail_mi:
741 if (pci_map_rom_md(pdev) != 0)
742 goto fail_md;
743
744 /* XXX This type is obviously wrong in general... */
745 if (pci_find_rom(&pdev->pd_pa, pdev->pd_rom_bst, pdev->pd_rom_bsh,
746 pdev->pd_rom_size, PCI_ROM_CODE_TYPE_X86,
747 &pdev->pd_rom_found_bsh, &pdev->pd_rom_found_size)) {
748 pci_unmap_rom(pdev, NULL);
749 goto fail_md;
750 }
751
752 success:
753 KASSERT(pdev->pd_rom_found_size <= SIZE_T_MAX);
754 *sizep = pdev->pd_rom_found_size;
755 pdev->pd_rom_vaddr = bus_space_vaddr(pdev->pd_rom_bst,
756 pdev->pd_rom_found_bsh);
757 return pdev->pd_rom_vaddr;
758
759 fail_md:
760 return NULL;
761 }
762
763 void __pci_rom_iomem *
764 pci_platform_rom(struct pci_dev *pdev __unused, size_t *sizep)
765 {
766
767 *sizep = 0;
768 return NULL;
769 }
770
771 int
772 pci_enable_rom(struct pci_dev *pdev)
773 {
774 const pci_chipset_tag_t pc = pdev->pd_pa.pa_pc;
775 const pcitag_t tag = pdev->pd_pa.pa_tag;
776 pcireg_t addr;
777 int s;
778
779 /* XXX Don't do anything if the ROM isn't there. */
780
781 s = splhigh();
782 addr = pci_conf_read(pc, tag, PCI_MAPREG_ROM);
783 addr |= PCI_MAPREG_ROM_ENABLE;
784 pci_conf_write(pc, tag, PCI_MAPREG_ROM, addr);
785 splx(s);
786
787 return 0;
788 }
789
790 void
791 pci_disable_rom(struct pci_dev *pdev)
792 {
793 const pci_chipset_tag_t pc = pdev->pd_pa.pa_pc;
794 const pcitag_t tag = pdev->pd_pa.pa_tag;
795 pcireg_t addr;
796 int s;
797
798 s = splhigh();
799 addr = pci_conf_read(pc, tag, PCI_MAPREG_ROM);
800 addr &= ~(pcireg_t)PCI_MAPREG_ROM_ENABLE;
801 pci_conf_write(pc, tag, PCI_MAPREG_ROM, addr);
802 splx(s);
803 }
804
805 bus_addr_t
806 pci_resource_start(struct pci_dev *pdev, unsigned i)
807 {
808
809 if (i >= PCI_NUM_RESOURCES)
810 panic("resource %d >= max %d", i, PCI_NUM_RESOURCES);
811 return pdev->pd_resources[i].addr;
812 }
813
814 bus_size_t
815 pci_resource_len(struct pci_dev *pdev, unsigned i)
816 {
817
818 if (i >= PCI_NUM_RESOURCES)
819 panic("resource %d >= max %d", i, PCI_NUM_RESOURCES);
820 return pdev->pd_resources[i].size;
821 }
822
823 bus_addr_t
824 pci_resource_end(struct pci_dev *pdev, unsigned i)
825 {
826
827 return pci_resource_start(pdev, i) + (pci_resource_len(pdev, i) - 1);
828 }
829
830 int
831 pci_resource_flags(struct pci_dev *pdev, unsigned i)
832 {
833
834 if (i >= PCI_NUM_RESOURCES)
835 panic("resource %d >= max %d", i, PCI_NUM_RESOURCES);
836 return pdev->pd_resources[i].flags;
837 }
838
839 void __pci_iomem *
840 pci_iomap(struct pci_dev *pdev, unsigned i, bus_size_t size)
841 {
842 int error;
843
844 KASSERT(i < PCI_NUM_RESOURCES);
845 KASSERT(pdev->pd_resources[i].kva == NULL);
846
847 if (PCI_MAPREG_TYPE(pdev->pd_resources[i].type) != PCI_MAPREG_TYPE_MEM)
848 return NULL;
849 if (pdev->pd_resources[i].size < size)
850 return NULL;
851 error = bus_space_map(pdev->pd_pa.pa_memt, pdev->pd_resources[i].addr,
852 size, BUS_SPACE_MAP_LINEAR | pdev->pd_resources[i].flags,
853 &pdev->pd_resources[i].bsh);
854 if (error)
855 return NULL;
856 pdev->pd_resources[i].bst = pdev->pd_pa.pa_memt;
857 pdev->pd_resources[i].kva = bus_space_vaddr(pdev->pd_resources[i].bst,
858 pdev->pd_resources[i].bsh);
859 pdev->pd_resources[i].mapped = true;
860
861 return pdev->pd_resources[i].kva;
862 }
863
864 void
865 pci_iounmap(struct pci_dev *pdev, void __pci_iomem *kva)
866 {
867 unsigned i;
868
869 CTASSERT(__arraycount(pdev->pd_resources) == PCI_NUM_RESOURCES);
870 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
871 if (pdev->pd_resources[i].kva == kva)
872 break;
873 }
874 KASSERT(i < PCI_NUM_RESOURCES);
875
876 pdev->pd_resources[i].kva = NULL;
877 bus_space_unmap(pdev->pd_resources[i].bst, pdev->pd_resources[i].bsh,
878 pdev->pd_resources[i].size);
879 }
880
881 void
882 pci_save_state(struct pci_dev *pdev)
883 {
884
885 KASSERT(pdev->pd_saved_state == NULL);
886 pdev->pd_saved_state = kmem_alloc(sizeof(*pdev->pd_saved_state),
887 KM_SLEEP);
888 pci_conf_capture(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
889 pdev->pd_saved_state);
890 }
891
892 void
893 pci_restore_state(struct pci_dev *pdev)
894 {
895
896 KASSERT(pdev->pd_saved_state != NULL);
897 pci_conf_restore(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
898 pdev->pd_saved_state);
899 kmem_free(pdev->pd_saved_state, sizeof(*pdev->pd_saved_state));
900 pdev->pd_saved_state = NULL;
901 }
902
903 bool
904 pci_is_pcie(struct pci_dev *pdev)
905 {
906
907 return (pci_find_capability(pdev, PCI_CAP_PCIEXPRESS) != 0);
908 }
909
910 bool
911 pci_dma_supported(struct pci_dev *pdev, uintmax_t mask)
912 {
913
914 /* XXX Cop-out. */
915 if (mask > DMA_BIT_MASK(32))
916 return pci_dma64_available(&pdev->pd_pa);
917 else
918 return true;
919 }
920
921 bool
922 pci_is_thunderbolt_attached(struct pci_dev *pdev)
923 {
924
925 /* XXX Cop-out. */
926 return false;
927 }
928
929 bool
930 pci_is_root_bus(struct pci_bus *bus)
931 {
932
933 return bus->number == 0;
934 }
935
936 int
937 pci_domain_nr(struct pci_bus *bus)
938 {
939
940 return pci_get_segment(bus->pb_pc);
941 }
942
943 /*
944 * We explicitly rename pci_enable/disable_device so that you have to
945 * review each use of them, since NetBSD's PCI API does _not_ respect
946 * our local enablecnt here, but there are different parts of NetBSD
947 * that automatically enable/disable like PMF, so you have to decide
948 * for each one whether to call it or not.
949 */
950
951 int
952 linux_pci_enable_device(struct pci_dev *pdev)
953 {
954 const struct pci_attach_args *pa = &pdev->pd_pa;
955 pcireg_t csr;
956 int s;
957
958 if (pdev->pd_enablecnt++)
959 return 0;
960
961 s = splhigh();
962 csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
963 /* If someone else (firmware) already enabled it, credit them. */
964 if (csr & (PCI_COMMAND_IO_ENABLE|PCI_COMMAND_MEM_ENABLE))
965 pdev->pd_enablecnt++;
966 csr |= PCI_COMMAND_IO_ENABLE;
967 csr |= PCI_COMMAND_MEM_ENABLE;
968 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, csr);
969 splx(s);
970
971 return 0;
972 }
973
974 void
975 linux_pci_disable_device(struct pci_dev *pdev)
976 {
977 const struct pci_attach_args *pa = &pdev->pd_pa;
978 pcireg_t csr;
979 int s;
980
981 if (--pdev->pd_enablecnt)
982 return;
983
984 s = splhigh();
985 csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
986 csr &= ~PCI_COMMAND_IO_ENABLE;
987 csr &= ~PCI_COMMAND_MEM_ENABLE;
988 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, csr);
989 splx(s);
990 }
991
992 void
993 linux_pci_dev_destroy(struct pci_dev *pdev)
994 {
995 unsigned i;
996
997 if (pdev->bus->self != NULL) {
998 kmem_free(pdev->bus->self, sizeof(*pdev->bus->self));
999 }
1000 if (pdev->bus != NULL) {
1001 kmem_free(pdev->bus, sizeof(*pdev->bus));
1002 pdev->bus = NULL;
1003 }
1004 if (ISSET(pdev->pd_kludges, NBPCI_KLUDGE_MAP_ROM)) {
1005 pci_unmap_rom(pdev, pdev->pd_rom_vaddr);
1006 pdev->pd_rom_vaddr = 0;
1007 }
1008 for (i = 0; i < __arraycount(pdev->pd_resources); i++) {
1009 if (!pdev->pd_resources[i].mapped)
1010 continue;
1011 bus_space_unmap(pdev->pd_resources[i].bst,
1012 pdev->pd_resources[i].bsh, pdev->pd_resources[i].size);
1013 }
1014
1015 /* There is no way these should be still in use. */
1016 KASSERT(pdev->pd_saved_state == NULL);
1017 KASSERT(pdev->pd_intr_handles == NULL);
1018 }
1019
1020 enum pci_bus_speed
1021 pcie_get_speed_cap(struct pci_dev *dev)
1022 {
1023 pci_chipset_tag_t pc = dev->pd_pa.pa_pc;
1024 pcitag_t tag = dev->pd_pa.pa_tag;
1025 pcireg_t lcap, lcap2, xcap;
1026 int off;
1027
1028 /* Must have capabilities. */
1029 if (pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS, &off, NULL) == 0)
1030 return PCI_SPEED_UNKNOWN;
1031
1032 /* Only PCIe 3.x has LCAP2. */
1033 xcap = pci_conf_read(pc, tag, off + PCIE_XCAP);
1034 if (__SHIFTOUT(xcap, PCIE_XCAP_VER_MASK) >= 2) {
1035 lcap2 = pci_conf_read(pc, tag, off + PCIE_LCAP2);
1036 if (lcap2) {
1037 if ((lcap2 & PCIE_LCAP2_SUP_LNKS64) != 0) {
1038 return PCIE_SPEED_64_0GT;
1039 }
1040 if ((lcap2 & PCIE_LCAP2_SUP_LNKS32) != 0) {
1041 return PCIE_SPEED_32_0GT;
1042 }
1043 if ((lcap2 & PCIE_LCAP2_SUP_LNKS16) != 0) {
1044 return PCIE_SPEED_16_0GT;
1045 }
1046 if ((lcap2 & PCIE_LCAP2_SUP_LNKS8) != 0) {
1047 return PCIE_SPEED_8_0GT;
1048 }
1049 if ((lcap2 & PCIE_LCAP2_SUP_LNKS5) != 0) {
1050 return PCIE_SPEED_5_0GT;
1051 }
1052 if ((lcap2 & PCIE_LCAP2_SUP_LNKS2) != 0) {
1053 return PCIE_SPEED_2_5GT;
1054 }
1055 }
1056 }
1057
1058 lcap = pci_conf_read(pc, tag, off + PCIE_LCAP);
1059 if ((lcap & PCIE_LCAP_MAX_SPEED) == PCIE_LCAP_MAX_SPEED_64) {
1060 return PCIE_SPEED_64_0GT;
1061 }
1062 if ((lcap & PCIE_LCAP_MAX_SPEED) == PCIE_LCAP_MAX_SPEED_32) {
1063 return PCIE_SPEED_32_0GT;
1064 }
1065 if ((lcap & PCIE_LCAP_MAX_SPEED) == PCIE_LCAP_MAX_SPEED_16) {
1066 return PCIE_SPEED_16_0GT;
1067 }
1068 if ((lcap & PCIE_LCAP_MAX_SPEED) == PCIE_LCAP_MAX_SPEED_8) {
1069 return PCIE_SPEED_8_0GT;
1070 }
1071 if ((lcap & PCIE_LCAP_MAX_SPEED) == PCIE_LCAP_MAX_SPEED_5) {
1072 return PCIE_SPEED_5_0GT;
1073 }
1074 if ((lcap & PCIE_LCAP_MAX_SPEED) == PCIE_LCAP_MAX_SPEED_2) {
1075 return PCIE_SPEED_2_5GT;
1076 }
1077
1078 return PCI_SPEED_UNKNOWN;
1079 }
1080
1081 /*
1082 * This should walk the tree, it only checks this device currently.
1083 * It also does not write to limiting_dev (the only caller in drm2
1084 * currently does not use it.)
1085 */
1086 unsigned
1087 pcie_bandwidth_available(struct pci_dev *dev,
1088 struct pci_dev **limiting_dev,
1089 enum pci_bus_speed *speed,
1090 enum pcie_link_width *width)
1091 {
1092 pci_chipset_tag_t pc = dev->pd_pa.pa_pc;
1093 pcitag_t tag = dev->pd_pa.pa_tag;
1094 pcireg_t lcsr;
1095 unsigned per_line_speed, num_lanes;
1096 int off;
1097
1098 /* Must have capabilities. */
1099 if (pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS, &off, NULL) == 0)
1100 return 0;
1101
1102 if (speed)
1103 *speed = PCI_SPEED_UNKNOWN;
1104 if (width)
1105 *width = 0;
1106
1107 lcsr = pci_conf_read(pc, tag, off + PCIE_LCSR);
1108
1109 switch (lcsr & PCIE_LCSR_NLW) {
1110 case PCIE_LCSR_NLW_X1:
1111 case PCIE_LCSR_NLW_X2:
1112 case PCIE_LCSR_NLW_X4:
1113 case PCIE_LCSR_NLW_X8:
1114 case PCIE_LCSR_NLW_X12:
1115 case PCIE_LCSR_NLW_X16:
1116 case PCIE_LCSR_NLW_X32:
1117 num_lanes = __SHIFTOUT(lcsr, PCIE_LCSR_NLW);
1118 if (width)
1119 *width = num_lanes;
1120 break;
1121 default:
1122 num_lanes = 0;
1123 break;
1124 }
1125
1126 switch (__SHIFTOUT(lcsr, PCIE_LCSR_LINKSPEED)) {
1127 case PCIE_LCSR_LINKSPEED_2:
1128 *speed = PCIE_SPEED_2_5GT;
1129 per_line_speed = 2500 * 8 / 10;
1130 break;
1131 case PCIE_LCSR_LINKSPEED_5:
1132 *speed = PCIE_SPEED_5_0GT;
1133 per_line_speed = 5000 * 8 / 10;
1134 break;
1135 case PCIE_LCSR_LINKSPEED_8:
1136 *speed = PCIE_SPEED_8_0GT;
1137 per_line_speed = 8000 * 128 / 130;
1138 break;
1139 case PCIE_LCSR_LINKSPEED_16:
1140 *speed = PCIE_SPEED_16_0GT;
1141 per_line_speed = 16000 * 128 / 130;
1142 break;
1143 case PCIE_LCSR_LINKSPEED_32:
1144 *speed = PCIE_SPEED_32_0GT;
1145 per_line_speed = 32000 * 128 / 130;
1146 break;
1147 case PCIE_LCSR_LINKSPEED_64:
1148 *speed = PCIE_SPEED_64_0GT;
1149 per_line_speed = 64000 * 128 / 130;
1150 break;
1151 default:
1152 per_line_speed = 0;
1153 }
1154
1155 return num_lanes * per_line_speed;
1156 }
1157