linux_pci.c revision 1.15 1 /* $NetBSD: linux_pci.c,v 1.15 2021/12/19 09:49:39 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2013 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #ifdef _KERNEL_OPT
33 #include "opt_pci.h"
34 #endif
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: linux_pci.c,v 1.15 2021/12/19 09:49:39 riastradh Exp $");
38
39 #include <linux/pci.h>
40
41 #include <drm/drm_agp_netbsd.h>
42
43 device_t
44 pci_dev_dev(struct pci_dev *pdev)
45 {
46
47 return pdev->pd_dev;
48 }
49
50 void
51 pci_set_drvdata(struct pci_dev *pdev, void *drvdata)
52 {
53 pdev->pd_drvdata = drvdata;
54 }
55
56 void *
57 pci_get_drvdata(struct pci_dev *pdev)
58 {
59 return pdev->pd_drvdata;
60 }
61
62 void
63 linux_pci_dev_init(struct pci_dev *pdev, device_t dev, device_t parent,
64 const struct pci_attach_args *pa, int kludges)
65 {
66 const uint32_t subsystem_id = pci_conf_read(pa->pa_pc, pa->pa_tag,
67 PCI_SUBSYS_ID_REG);
68 unsigned i;
69
70 memset(pdev, 0, sizeof(*pdev)); /* paranoia */
71
72 pdev->pd_pa = *pa;
73 pdev->pd_kludges = kludges;
74 pdev->pd_rom_vaddr = NULL;
75 pdev->pd_dev = dev;
76 #if (NACPICA > 0)
77 #ifdef __HAVE_PCI_GET_SEGMENT
78 const int seg = pci_get_segment(pa->pa_pc);
79 #else
80 const int seg = 0;
81 #endif
82 pdev->pd_ad = acpi_pcidev_find(seg, pa->pa_bus,
83 pa->pa_device, pa->pa_function);
84 #else
85 pdev->pd_ad = NULL;
86 #endif
87 pdev->pd_saved_state = NULL;
88 pdev->pd_intr_handles = NULL;
89 pdev->pd_drvdata = NULL;
90 pdev->bus = kmem_zalloc(sizeof(*pdev->bus), KM_NOSLEEP);
91 pdev->bus->pb_pc = pa->pa_pc;
92 pdev->bus->pb_dev = parent;
93 pdev->bus->number = pa->pa_bus;
94 pdev->devfn = PCI_DEVFN(pa->pa_device, pa->pa_function);
95 pdev->vendor = PCI_VENDOR(pa->pa_id);
96 pdev->device = PCI_PRODUCT(pa->pa_id);
97 pdev->subsystem_vendor = PCI_SUBSYS_VENDOR(subsystem_id);
98 pdev->subsystem_device = PCI_SUBSYS_ID(subsystem_id);
99 pdev->revision = PCI_REVISION(pa->pa_class);
100 pdev->class = __SHIFTOUT(pa->pa_class, 0xffffff00UL); /* ? */
101
102 CTASSERT(__arraycount(pdev->pd_resources) == PCI_NUM_RESOURCES);
103 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
104 const int reg = PCI_BAR(i);
105
106 pdev->pd_resources[i].type = pci_mapreg_type(pa->pa_pc,
107 pa->pa_tag, reg);
108 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, reg,
109 pdev->pd_resources[i].type,
110 &pdev->pd_resources[i].addr,
111 &pdev->pd_resources[i].size,
112 &pdev->pd_resources[i].flags)) {
113 pdev->pd_resources[i].addr = 0;
114 pdev->pd_resources[i].size = 0;
115 pdev->pd_resources[i].flags = 0;
116 }
117 pdev->pd_resources[i].kva = NULL;
118 pdev->pd_resources[i].mapped = false;
119 }
120 }
121
122 int
123 pci_find_capability(struct pci_dev *pdev, int cap)
124 {
125
126 return pci_get_capability(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, cap,
127 NULL, NULL);
128 }
129
130 int
131 pci_read_config_dword(struct pci_dev *pdev, int reg, uint32_t *valuep)
132 {
133
134 KASSERT(!ISSET(reg, 3));
135 *valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg);
136 return 0;
137 }
138
139 int
140 pci_read_config_word(struct pci_dev *pdev, int reg, uint16_t *valuep)
141 {
142
143 KASSERT(!ISSET(reg, 1));
144 *valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
145 (reg &~ 2)) >> (8 * (reg & 2));
146 return 0;
147 }
148
149 int
150 pci_read_config_byte(struct pci_dev *pdev, int reg, uint8_t *valuep)
151 {
152
153 *valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
154 (reg &~ 3)) >> (8 * (reg & 3));
155 return 0;
156 }
157
158 int
159 pci_write_config_dword(struct pci_dev *pdev, int reg, uint32_t value)
160 {
161
162 KASSERT(!ISSET(reg, 3));
163 pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg, value);
164 return 0;
165 }
166
167 int
168 pci_bus_read_config_dword(struct pci_bus *bus, unsigned devfn, int reg,
169 uint32_t *valuep)
170 {
171 pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
172 PCI_FUNC(devfn));
173
174 KASSERT(!ISSET(reg, 1));
175 *valuep = pci_conf_read(bus->pb_pc, tag, reg & ~3) >> (8 * (reg & 3));
176 return 0;
177 }
178
179 int
180 pci_bus_read_config_word(struct pci_bus *bus, unsigned devfn, int reg,
181 uint16_t *valuep)
182 {
183 pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
184 PCI_FUNC(devfn));
185
186 KASSERT(!ISSET(reg, 1));
187 *valuep = pci_conf_read(bus->pb_pc, tag, reg &~ 2) >> (8 * (reg & 2));
188 return 0;
189 }
190
191 int
192 pci_bus_read_config_byte(struct pci_bus *bus, unsigned devfn, int reg,
193 uint8_t *valuep)
194 {
195 pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
196 PCI_FUNC(devfn));
197
198 *valuep = pci_conf_read(bus->pb_pc, tag, reg &~ 3) >> (8 * (reg & 3));
199 return 0;
200 }
201
202 int
203 pci_bus_write_config_dword(struct pci_bus *bus, unsigned devfn, int reg,
204 uint32_t value)
205 {
206 pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
207 PCI_FUNC(devfn));
208
209 KASSERT(!ISSET(reg, 3));
210 pci_conf_write(bus->pb_pc, tag, reg, value);
211 return 0;
212 }
213
214 static void
215 pci_rmw_config(pci_chipset_tag_t pc, pcitag_t tag, int reg, unsigned int bytes,
216 uint32_t value)
217 {
218 const uint32_t mask = ~((~0UL) << (8 * bytes));
219 const int reg32 = (reg &~ 3);
220 const unsigned int shift = (8 * (reg & 3));
221 uint32_t value32;
222
223 KASSERT(bytes <= 4);
224 KASSERT(!ISSET(value, ~mask));
225 value32 = pci_conf_read(pc, tag, reg32);
226 value32 &=~ (mask << shift);
227 value32 |= (value << shift);
228 pci_conf_write(pc, tag, reg32, value32);
229 }
230
231 int
232 pci_write_config_word(struct pci_dev *pdev, int reg, uint16_t value)
233 {
234
235 KASSERT(!ISSET(reg, 1));
236 pci_rmw_config(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg, 2, value);
237 return 0;
238 }
239
240 int
241 pci_write_config_byte(struct pci_dev *pdev, int reg, uint8_t value)
242 {
243
244 pci_rmw_config(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg, 1, value);
245 return 0;
246 }
247
248 int
249 pci_bus_write_config_word(struct pci_bus *bus, unsigned devfn, int reg,
250 uint16_t value)
251 {
252 pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
253 PCI_FUNC(devfn));
254
255 KASSERT(!ISSET(reg, 1));
256 pci_rmw_config(bus->pb_pc, tag, reg, 2, value);
257 return 0;
258 }
259
260 int
261 pci_bus_write_config_byte(struct pci_bus *bus, unsigned devfn, int reg,
262 uint8_t value)
263 {
264 pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
265 PCI_FUNC(devfn));
266
267 pci_rmw_config(bus->pb_pc, tag, reg, 1, value);
268 return 0;
269 }
270
271 int
272 pci_enable_msi(struct pci_dev *pdev)
273 {
274 const struct pci_attach_args *const pa = &pdev->pd_pa;
275
276 if (pci_msi_alloc_exact(pa, &pdev->pd_intr_handles, 1))
277 return -EINVAL;
278
279 pdev->msi_enabled = 1;
280 return 0;
281 }
282
283 void
284 pci_disable_msi(struct pci_dev *pdev __unused)
285 {
286 const struct pci_attach_args *const pa = &pdev->pd_pa;
287
288 if (pdev->pd_intr_handles != NULL) {
289 pci_intr_release(pa->pa_pc, pdev->pd_intr_handles, 1);
290 pdev->pd_intr_handles = NULL;
291 }
292 pdev->msi_enabled = 0;
293 }
294
295 void
296 pci_set_master(struct pci_dev *pdev)
297 {
298 pcireg_t csr;
299
300 csr = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
301 PCI_COMMAND_STATUS_REG);
302 csr |= PCI_COMMAND_MASTER_ENABLE;
303 pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
304 PCI_COMMAND_STATUS_REG, csr);
305 }
306
307 void
308 pci_clear_master(struct pci_dev *pdev)
309 {
310 pcireg_t csr;
311
312 csr = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
313 PCI_COMMAND_STATUS_REG);
314 csr &= ~(pcireg_t)PCI_COMMAND_MASTER_ENABLE;
315 pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
316 PCI_COMMAND_STATUS_REG, csr);
317 }
318
319 bus_addr_t
320 pcibios_align_resource(void *p, const struct resource *resource,
321 bus_addr_t addr, bus_size_t size)
322 {
323 panic("pcibios_align_resource has accessed unaligned neurons!");
324 }
325
326 int
327 pci_bus_alloc_resource(struct pci_bus *bus, struct resource *resource,
328 bus_size_t size, bus_size_t align, bus_addr_t start, int type __unused,
329 bus_addr_t (*align_fn)(void *, const struct resource *, bus_addr_t,
330 bus_size_t) __unused,
331 struct pci_dev *pdev)
332 {
333 const struct pci_attach_args *const pa = &pdev->pd_pa;
334 bus_space_tag_t bst;
335 int error;
336
337 switch (resource->flags) {
338 case IORESOURCE_MEM:
339 bst = pa->pa_memt;
340 break;
341
342 case IORESOURCE_IO:
343 bst = pa->pa_iot;
344 break;
345
346 default:
347 panic("I don't know what kind of resource you want!");
348 }
349
350 resource->r_bst = bst;
351 error = bus_space_alloc(bst, start, __type_max(bus_addr_t),
352 size, align, 0, 0, &resource->start, &resource->r_bsh);
353 if (error)
354 return error;
355
356 resource->end = start + (size - 1);
357 return 0;
358 }
359
360 /*
361 * XXX Mega-kludgerific! pci_get_bus_and_slot and pci_get_class are
362 * defined only for their single purposes in i915drm, in
363 * i915_get_bridge_dev and intel_detect_pch. We can't define them more
364 * generally without adapting pci_find_device (and pci_enumerate_bus
365 * internally) to pass a cookie through.
366 */
367
368 static int
369 pci_kludgey_match_bus0_dev0_func0(const struct pci_attach_args *pa)
370 {
371
372 /* XXX domain */
373 if (pa->pa_bus != 0)
374 return 0;
375 if (pa->pa_device != 0)
376 return 0;
377 if (pa->pa_function != 0)
378 return 0;
379
380 return 1;
381 }
382
383 struct pci_dev *
384 pci_get_domain_bus_and_slot(int domain, int bus, int slot)
385 {
386 struct pci_attach_args pa;
387
388 KASSERT(domain == 0);
389 KASSERT(bus == 0);
390 KASSERT(slot == PCI_DEVFN(0, 0));
391
392 if (!pci_find_device(&pa, &pci_kludgey_match_bus0_dev0_func0))
393 return NULL;
394
395 struct pci_dev *const pdev = kmem_zalloc(sizeof(*pdev), KM_SLEEP);
396 linux_pci_dev_init(pdev, NULL, NULL, &pa, NBPCI_KLUDGE_GET_MUMBLE);
397
398 return pdev;
399 }
400
401 static int
402 pci_kludgey_match_isa_bridge(const struct pci_attach_args *pa)
403 {
404
405 if (PCI_CLASS(pa->pa_class) != PCI_CLASS_BRIDGE)
406 return 0;
407 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_BRIDGE_ISA)
408 return 0;
409
410 return 1;
411 }
412
413 void
414 pci_dev_put(struct pci_dev *pdev)
415 {
416
417 if (pdev == NULL)
418 return;
419
420 KASSERT(ISSET(pdev->pd_kludges, NBPCI_KLUDGE_GET_MUMBLE));
421 kmem_free(pdev->bus, sizeof(*pdev->bus));
422 kmem_free(pdev, sizeof(*pdev));
423 }
424
425 struct pci_dev * /* XXX i915 kludge */
426 pci_get_class(uint32_t class_subclass_shifted __unused, struct pci_dev *from)
427 {
428 struct pci_attach_args pa;
429
430 KASSERT(class_subclass_shifted == (PCI_CLASS_BRIDGE_ISA << 8));
431
432 if (from != NULL) {
433 pci_dev_put(from);
434 return NULL;
435 }
436
437 if (!pci_find_device(&pa, &pci_kludgey_match_isa_bridge))
438 return NULL;
439
440 struct pci_dev *const pdev = kmem_zalloc(sizeof(*pdev), KM_SLEEP);
441 linux_pci_dev_init(pdev, NULL, NULL, &pa, NBPCI_KLUDGE_GET_MUMBLE);
442
443 return pdev;
444 }
445
446 void
447 pci_unmap_rom(struct pci_dev *pdev, void __pci_rom_iomem *vaddr __unused)
448 {
449
450 /* XXX Disable the ROM address decoder. */
451 KASSERT(ISSET(pdev->pd_kludges, NBPCI_KLUDGE_MAP_ROM));
452 KASSERT(vaddr == pdev->pd_rom_vaddr);
453 bus_space_unmap(pdev->pd_rom_bst, pdev->pd_rom_bsh, pdev->pd_rom_size);
454 pdev->pd_kludges &= ~NBPCI_KLUDGE_MAP_ROM;
455 pdev->pd_rom_vaddr = NULL;
456 }
457
458 /* XXX Whattakludge! Should move this in sys/arch/. */
459 static int
460 pci_map_rom_md(struct pci_dev *pdev)
461 {
462 #if defined(__i386__) || defined(__x86_64__) || defined(__ia64__)
463 const bus_addr_t rom_base = 0xc0000;
464 const bus_size_t rom_size = 0x20000;
465 bus_space_handle_t rom_bsh;
466 int error;
467
468 if (PCI_CLASS(pdev->pd_pa.pa_class) != PCI_CLASS_DISPLAY)
469 return ENXIO;
470 if (PCI_SUBCLASS(pdev->pd_pa.pa_class) != PCI_SUBCLASS_DISPLAY_VGA)
471 return ENXIO;
472 /* XXX Check whether this is the primary VGA card? */
473 error = bus_space_map(pdev->pd_pa.pa_memt, rom_base, rom_size,
474 (BUS_SPACE_MAP_LINEAR | BUS_SPACE_MAP_PREFETCHABLE), &rom_bsh);
475 if (error)
476 return ENXIO;
477
478 pdev->pd_rom_bst = pdev->pd_pa.pa_memt;
479 pdev->pd_rom_bsh = rom_bsh;
480 pdev->pd_rom_size = rom_size;
481 pdev->pd_kludges |= NBPCI_KLUDGE_MAP_ROM;
482
483 return 0;
484 #else
485 return ENXIO;
486 #endif
487 }
488
489 void __pci_rom_iomem *
490 pci_map_rom(struct pci_dev *pdev, size_t *sizep)
491 {
492
493 KASSERT(!ISSET(pdev->pd_kludges, NBPCI_KLUDGE_MAP_ROM));
494
495 if (pci_mapreg_map(&pdev->pd_pa, PCI_MAPREG_ROM, PCI_MAPREG_TYPE_ROM,
496 (BUS_SPACE_MAP_PREFETCHABLE | BUS_SPACE_MAP_LINEAR),
497 &pdev->pd_rom_bst, &pdev->pd_rom_bsh, NULL, &pdev->pd_rom_size)
498 != 0)
499 goto fail_mi;
500 pdev->pd_kludges |= NBPCI_KLUDGE_MAP_ROM;
501
502 /* XXX This type is obviously wrong in general... */
503 if (pci_find_rom(&pdev->pd_pa, pdev->pd_rom_bst, pdev->pd_rom_bsh,
504 pdev->pd_rom_size, PCI_ROM_CODE_TYPE_X86,
505 &pdev->pd_rom_found_bsh, &pdev->pd_rom_found_size)) {
506 pci_unmap_rom(pdev, NULL);
507 goto fail_mi;
508 }
509 goto success;
510
511 fail_mi:
512 if (pci_map_rom_md(pdev) != 0)
513 goto fail_md;
514
515 /* XXX This type is obviously wrong in general... */
516 if (pci_find_rom(&pdev->pd_pa, pdev->pd_rom_bst, pdev->pd_rom_bsh,
517 pdev->pd_rom_size, PCI_ROM_CODE_TYPE_X86,
518 &pdev->pd_rom_found_bsh, &pdev->pd_rom_found_size)) {
519 pci_unmap_rom(pdev, NULL);
520 goto fail_md;
521 }
522
523 success:
524 KASSERT(pdev->pd_rom_found_size <= SIZE_T_MAX);
525 *sizep = pdev->pd_rom_found_size;
526 pdev->pd_rom_vaddr = bus_space_vaddr(pdev->pd_rom_bst,
527 pdev->pd_rom_found_bsh);
528 return pdev->pd_rom_vaddr;
529
530 fail_md:
531 return NULL;
532 }
533
534 void __pci_rom_iomem *
535 pci_platform_rom(struct pci_dev *pdev __unused, size_t *sizep)
536 {
537
538 *sizep = 0;
539 return NULL;
540 }
541
542 int
543 pci_enable_rom(struct pci_dev *pdev)
544 {
545 const pci_chipset_tag_t pc = pdev->pd_pa.pa_pc;
546 const pcitag_t tag = pdev->pd_pa.pa_tag;
547 pcireg_t addr;
548 int s;
549
550 /* XXX Don't do anything if the ROM isn't there. */
551
552 s = splhigh();
553 addr = pci_conf_read(pc, tag, PCI_MAPREG_ROM);
554 addr |= PCI_MAPREG_ROM_ENABLE;
555 pci_conf_write(pc, tag, PCI_MAPREG_ROM, addr);
556 splx(s);
557
558 return 0;
559 }
560
561 void
562 pci_disable_rom(struct pci_dev *pdev)
563 {
564 const pci_chipset_tag_t pc = pdev->pd_pa.pa_pc;
565 const pcitag_t tag = pdev->pd_pa.pa_tag;
566 pcireg_t addr;
567 int s;
568
569 s = splhigh();
570 addr = pci_conf_read(pc, tag, PCI_MAPREG_ROM);
571 addr &= ~(pcireg_t)PCI_MAPREG_ROM_ENABLE;
572 pci_conf_write(pc, tag, PCI_MAPREG_ROM, addr);
573 splx(s);
574 }
575
576 bus_addr_t
577 pci_resource_start(struct pci_dev *pdev, unsigned i)
578 {
579
580 KASSERT(i < PCI_NUM_RESOURCES);
581 return pdev->pd_resources[i].addr;
582 }
583
584 bus_size_t
585 pci_resource_len(struct pci_dev *pdev, unsigned i)
586 {
587
588 KASSERT(i < PCI_NUM_RESOURCES);
589 return pdev->pd_resources[i].size;
590 }
591
592 bus_addr_t
593 pci_resource_end(struct pci_dev *pdev, unsigned i)
594 {
595
596 return pci_resource_start(pdev, i) + (pci_resource_len(pdev, i) - 1);
597 }
598
599 int
600 pci_resource_flags(struct pci_dev *pdev, unsigned i)
601 {
602
603 KASSERT(i < PCI_NUM_RESOURCES);
604 return pdev->pd_resources[i].flags;
605 }
606
607 void __pci_iomem *
608 pci_iomap(struct pci_dev *pdev, unsigned i, bus_size_t size)
609 {
610 int error;
611
612 KASSERT(i < PCI_NUM_RESOURCES);
613 KASSERT(pdev->pd_resources[i].kva == NULL);
614
615 if (PCI_MAPREG_TYPE(pdev->pd_resources[i].type) != PCI_MAPREG_TYPE_MEM)
616 return NULL;
617 if (pdev->pd_resources[i].size < size)
618 return NULL;
619 error = bus_space_map(pdev->pd_pa.pa_memt, pdev->pd_resources[i].addr,
620 size, BUS_SPACE_MAP_LINEAR | pdev->pd_resources[i].flags,
621 &pdev->pd_resources[i].bsh);
622 if (error)
623 return NULL;
624 /* XXX Synchronize with drm_agp_borrow_hook in drm_agpsupport.c. */
625 pdev->pd_resources[i].bst = pdev->pd_pa.pa_memt;
626 pdev->pd_resources[i].kva = bus_space_vaddr(pdev->pd_resources[i].bst,
627 pdev->pd_resources[i].bsh);
628 pdev->pd_resources[i].mapped = true;
629
630 return pdev->pd_resources[i].kva;
631 }
632
633 void
634 pci_iounmap(struct pci_dev *pdev, void __pci_iomem *kva)
635 {
636 unsigned i;
637
638 CTASSERT(__arraycount(pdev->pd_resources) == PCI_NUM_RESOURCES);
639 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
640 if (pdev->pd_resources[i].kva == kva)
641 break;
642 }
643 KASSERT(i < PCI_NUM_RESOURCES);
644
645 pdev->pd_resources[i].kva = NULL;
646 bus_space_unmap(pdev->pd_resources[i].bst, pdev->pd_resources[i].bsh,
647 pdev->pd_resources[i].size);
648 }
649
650 void
651 pci_save_state(struct pci_dev *pdev)
652 {
653
654 KASSERT(pdev->pd_saved_state == NULL);
655 pdev->pd_saved_state = kmem_alloc(sizeof(*pdev->pd_saved_state),
656 KM_SLEEP);
657 pci_conf_capture(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
658 pdev->pd_saved_state);
659 }
660
661 void
662 pci_restore_state(struct pci_dev *pdev)
663 {
664
665 KASSERT(pdev->pd_saved_state != NULL);
666 pci_conf_restore(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
667 pdev->pd_saved_state);
668 kmem_free(pdev->pd_saved_state, sizeof(*pdev->pd_saved_state));
669 pdev->pd_saved_state = NULL;
670 }
671
672 bool
673 pci_is_pcie(struct pci_dev *pdev)
674 {
675
676 return (pci_find_capability(pdev, PCI_CAP_PCIEXPRESS) != 0);
677 }
678
679 bool
680 pci_dma_supported(struct pci_dev *pdev, uintmax_t mask)
681 {
682
683 /* XXX Cop-out. */
684 if (mask > DMA_BIT_MASK(32))
685 return pci_dma64_available(&pdev->pd_pa);
686 else
687 return true;
688 }
689
690 bool
691 pci_is_thunderbolt_attached(struct pci_dev *pdev)
692 {
693
694 /* XXX Cop-out. */
695 return false;
696 }
697
698 bool
699 pci_is_root_bus(struct pci_bus *bus)
700 {
701
702 /* XXX Cop-out. */
703 return false;
704 }
705
706 int
707 pci_domain_nr(struct pci_bus *bus)
708 {
709
710 return device_unit(bus->pb_dev);
711 }
712
713 /*
714 * We explicitly rename pci_enable/disable_device so that you have to
715 * review each use of them, since NetBSD's PCI API does _not_ respect
716 * our local enablecnt here, but there are different parts of NetBSD
717 * that automatically enable/disable like PMF, so you have to decide
718 * for each one whether to call it or not.
719 */
720
721 int
722 linux_pci_enable_device(struct pci_dev *pdev)
723 {
724 const struct pci_attach_args *pa = &pdev->pd_pa;
725 pcireg_t csr;
726 int s;
727
728 if (pdev->pd_enablecnt++)
729 return 0;
730
731 s = splhigh();
732 csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
733 /* If someone else (firmware) already enabled it, credit them. */
734 if (csr & (PCI_COMMAND_IO_ENABLE|PCI_COMMAND_MEM_ENABLE))
735 pdev->pd_enablecnt++;
736 csr |= PCI_COMMAND_IO_ENABLE;
737 csr |= PCI_COMMAND_MEM_ENABLE;
738 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, csr);
739 splx(s);
740
741 return 0;
742 }
743
744 void
745 linux_pci_disable_device(struct pci_dev *pdev)
746 {
747 const struct pci_attach_args *pa = &pdev->pd_pa;
748 pcireg_t csr;
749 int s;
750
751 if (--pdev->pd_enablecnt)
752 return;
753
754 s = splhigh();
755 csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
756 csr &= ~PCI_COMMAND_IO_ENABLE;
757 csr &= ~PCI_COMMAND_MEM_ENABLE;
758 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, csr);
759 splx(s);
760 }
761
762 void
763 linux_pci_dev_destroy(struct pci_dev *pdev)
764 {
765 unsigned i;
766
767 if (pdev->bus != NULL) {
768 kmem_free(pdev->bus, sizeof(*pdev->bus));
769 pdev->bus = NULL;
770 }
771 if (ISSET(pdev->pd_kludges, NBPCI_KLUDGE_MAP_ROM)) {
772 pci_unmap_rom(pdev, pdev->pd_rom_vaddr);
773 pdev->pd_rom_vaddr = 0;
774 }
775 for (i = 0; i < __arraycount(pdev->pd_resources); i++) {
776 if (!pdev->pd_resources[i].mapped)
777 continue;
778 bus_space_unmap(pdev->pd_resources[i].bst,
779 pdev->pd_resources[i].bsh, pdev->pd_resources[i].size);
780 }
781
782 /* There is no way these should be still in use. */
783 KASSERT(pdev->pd_saved_state == NULL);
784 KASSERT(pdev->pd_intr_handles == NULL);
785 }
786
787 bool
788 dev_is_pci(struct pci_dev *pdev)
789 {
790 return pdev != NULL;
791 }
792