linux_pci.c revision 1.6.10.2 1 /* $NetBSD: linux_pci.c,v 1.6.10.2 2020/02/29 20:20:18 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2013 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #ifdef _KERNEL_OPT
33 #include "opt_pci.h"
34 #endif
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: linux_pci.c,v 1.6.10.2 2020/02/29 20:20:18 ad Exp $");
38
39 #include <linux/pci.h>
40
41 #include <drm/drm_agp_netbsd.h>
42
43 device_t
44 pci_dev_dev(struct pci_dev *pdev)
45 {
46
47 return pdev->pd_dev;
48 }
49
50 /* XXX Nouveau kludge! */
51 struct drm_device *
52 pci_get_drvdata(struct pci_dev *pdev)
53 {
54
55 return pdev->pd_drm_dev;
56 }
57
58 void
59 linux_pci_dev_init(struct pci_dev *pdev, device_t dev, device_t parent,
60 const struct pci_attach_args *pa, int kludges)
61 {
62 const uint32_t subsystem_id = pci_conf_read(pa->pa_pc, pa->pa_tag,
63 PCI_SUBSYS_ID_REG);
64 unsigned i;
65
66 memset(pdev, 0, sizeof(*pdev)); /* paranoia */
67
68 pdev->pd_pa = *pa;
69 pdev->pd_kludges = kludges;
70 pdev->pd_rom_vaddr = NULL;
71 pdev->pd_dev = dev;
72 #if (NACPICA > 0)
73 #ifdef __HAVE_PCI_GET_SEGMENT
74 const int seg = pci_get_segment(pa->pa_pc);
75 #else
76 const int seg = 0;
77 #endif
78 pdev->pd_ad = acpi_pcidev_find(seg, pa->pa_bus,
79 pa->pa_device, pa->pa_function);
80 #else
81 pdev->pd_ad = NULL;
82 #endif
83 pdev->pd_saved_state = NULL;
84 pdev->pd_intr_handles = NULL;
85 pdev->bus = kmem_zalloc(sizeof(*pdev->bus), KM_NOSLEEP);
86 pdev->bus->pb_pc = pa->pa_pc;
87 pdev->bus->pb_dev = parent;
88 pdev->bus->number = pa->pa_bus;
89 pdev->devfn = PCI_DEVFN(pa->pa_device, pa->pa_function);
90 pdev->vendor = PCI_VENDOR(pa->pa_id);
91 pdev->device = PCI_PRODUCT(pa->pa_id);
92 pdev->subsystem_vendor = PCI_SUBSYS_VENDOR(subsystem_id);
93 pdev->subsystem_device = PCI_SUBSYS_ID(subsystem_id);
94 pdev->revision = PCI_REVISION(pa->pa_class);
95 pdev->class = __SHIFTOUT(pa->pa_class, 0xffffff00UL); /* ? */
96
97 CTASSERT(__arraycount(pdev->pd_resources) == PCI_NUM_RESOURCES);
98 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
99 const int reg = PCI_BAR(i);
100
101 pdev->pd_resources[i].type = pci_mapreg_type(pa->pa_pc,
102 pa->pa_tag, reg);
103 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, reg,
104 pdev->pd_resources[i].type,
105 &pdev->pd_resources[i].addr,
106 &pdev->pd_resources[i].size,
107 &pdev->pd_resources[i].flags)) {
108 pdev->pd_resources[i].addr = 0;
109 pdev->pd_resources[i].size = 0;
110 pdev->pd_resources[i].flags = 0;
111 }
112 pdev->pd_resources[i].kva = NULL;
113 pdev->pd_resources[i].mapped = false;
114 }
115 }
116
117 int
118 pci_find_capability(struct pci_dev *pdev, int cap)
119 {
120
121 return pci_get_capability(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, cap,
122 NULL, NULL);
123 }
124
125 int
126 pci_read_config_dword(struct pci_dev *pdev, int reg, uint32_t *valuep)
127 {
128
129 KASSERT(!ISSET(reg, 3));
130 *valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg);
131 return 0;
132 }
133
134 int
135 pci_read_config_word(struct pci_dev *pdev, int reg, uint16_t *valuep)
136 {
137
138 KASSERT(!ISSET(reg, 1));
139 *valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
140 (reg &~ 2)) >> (8 * (reg & 2));
141 return 0;
142 }
143
144 int
145 pci_read_config_byte(struct pci_dev *pdev, int reg, uint8_t *valuep)
146 {
147
148 *valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
149 (reg &~ 3)) >> (8 * (reg & 3));
150 return 0;
151 }
152
153 int
154 pci_write_config_dword(struct pci_dev *pdev, int reg, uint32_t value)
155 {
156
157 KASSERT(!ISSET(reg, 3));
158 pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg, value);
159 return 0;
160 }
161
162 int
163 pci_bus_read_config_dword(struct pci_bus *bus, unsigned devfn, int reg,
164 uint32_t *valuep)
165 {
166 pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
167 PCI_FUNC(devfn));
168
169 KASSERT(!ISSET(reg, 1));
170 *valuep = pci_conf_read(bus->pb_pc, tag, reg & ~3) >> (8 * (reg & 3));
171 return 0;
172 }
173
174 int
175 pci_bus_read_config_word(struct pci_bus *bus, unsigned devfn, int reg,
176 uint16_t *valuep)
177 {
178 pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
179 PCI_FUNC(devfn));
180
181 KASSERT(!ISSET(reg, 1));
182 *valuep = pci_conf_read(bus->pb_pc, tag, reg &~ 2) >> (8 * (reg & 2));
183 return 0;
184 }
185
186 int
187 pci_bus_read_config_byte(struct pci_bus *bus, unsigned devfn, int reg,
188 uint8_t *valuep)
189 {
190 pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
191 PCI_FUNC(devfn));
192
193 *valuep = pci_conf_read(bus->pb_pc, tag, reg &~ 3) >> (8 * (reg & 3));
194 return 0;
195 }
196
197 int
198 pci_bus_write_config_dword(struct pci_bus *bus, unsigned devfn, int reg,
199 uint32_t value)
200 {
201 pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
202 PCI_FUNC(devfn));
203
204 KASSERT(!ISSET(reg, 3));
205 pci_conf_write(bus->pb_pc, tag, reg, value);
206 return 0;
207 }
208
209 static void
210 pci_rmw_config(pci_chipset_tag_t pc, pcitag_t tag, int reg, unsigned int bytes,
211 uint32_t value)
212 {
213 const uint32_t mask = ~((~0UL) << (8 * bytes));
214 const int reg32 = (reg &~ 3);
215 const unsigned int shift = (8 * (reg & 3));
216 uint32_t value32;
217
218 KASSERT(bytes <= 4);
219 KASSERT(!ISSET(value, ~mask));
220 value32 = pci_conf_read(pc, tag, reg32);
221 value32 &=~ (mask << shift);
222 value32 |= (value << shift);
223 pci_conf_write(pc, tag, reg32, value32);
224 }
225
226 int
227 pci_write_config_word(struct pci_dev *pdev, int reg, uint16_t value)
228 {
229
230 KASSERT(!ISSET(reg, 1));
231 pci_rmw_config(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg, 2, value);
232 return 0;
233 }
234
235 int
236 pci_write_config_byte(struct pci_dev *pdev, int reg, uint8_t value)
237 {
238
239 pci_rmw_config(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg, 1, value);
240 return 0;
241 }
242
243 int
244 pci_bus_write_config_word(struct pci_bus *bus, unsigned devfn, int reg,
245 uint16_t value)
246 {
247 pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
248 PCI_FUNC(devfn));
249
250 KASSERT(!ISSET(reg, 1));
251 pci_rmw_config(bus->pb_pc, tag, reg, 2, value);
252 return 0;
253 }
254
255 int
256 pci_bus_write_config_byte(struct pci_bus *bus, unsigned devfn, int reg,
257 uint8_t value)
258 {
259 pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
260 PCI_FUNC(devfn));
261
262 pci_rmw_config(bus->pb_pc, tag, reg, 1, value);
263 return 0;
264 }
265
266 int
267 pci_enable_msi(struct pci_dev *pdev)
268 {
269 const struct pci_attach_args *const pa = &pdev->pd_pa;
270
271 if (pci_msi_alloc_exact(pa, &pdev->pd_intr_handles, 1))
272 return -EINVAL;
273
274 pdev->msi_enabled = 1;
275 return 0;
276 }
277
278 void
279 pci_disable_msi(struct pci_dev *pdev __unused)
280 {
281 const struct pci_attach_args *const pa = &pdev->pd_pa;
282
283 if (pdev->pd_intr_handles != NULL) {
284 pci_intr_release(pa->pa_pc, pdev->pd_intr_handles, 1);
285 pdev->pd_intr_handles = NULL;
286 }
287 pdev->msi_enabled = 0;
288 }
289
290 void
291 pci_set_master(struct pci_dev *pdev)
292 {
293 pcireg_t csr;
294
295 csr = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
296 PCI_COMMAND_STATUS_REG);
297 csr |= PCI_COMMAND_MASTER_ENABLE;
298 pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
299 PCI_COMMAND_STATUS_REG, csr);
300 }
301
302 void
303 pci_clear_master(struct pci_dev *pdev)
304 {
305 pcireg_t csr;
306
307 csr = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
308 PCI_COMMAND_STATUS_REG);
309 csr &= ~(pcireg_t)PCI_COMMAND_MASTER_ENABLE;
310 pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
311 PCI_COMMAND_STATUS_REG, csr);
312 }
313
314 bus_addr_t
315 pcibios_align_resource(void *p, const struct resource *resource,
316 bus_addr_t addr, bus_size_t size)
317 {
318 panic("pcibios_align_resource has accessed unaligned neurons!");
319 }
320
321 int
322 pci_bus_alloc_resource(struct pci_bus *bus, struct resource *resource,
323 bus_size_t size, bus_size_t align, bus_addr_t start, int type __unused,
324 bus_addr_t (*align_fn)(void *, const struct resource *, bus_addr_t,
325 bus_size_t) __unused,
326 struct pci_dev *pdev)
327 {
328 const struct pci_attach_args *const pa = &pdev->pd_pa;
329 bus_space_tag_t bst;
330 int error;
331
332 switch (resource->flags) {
333 case IORESOURCE_MEM:
334 bst = pa->pa_memt;
335 break;
336
337 case IORESOURCE_IO:
338 bst = pa->pa_iot;
339 break;
340
341 default:
342 panic("I don't know what kind of resource you want!");
343 }
344
345 resource->r_bst = bst;
346 error = bus_space_alloc(bst, start, __type_max(bus_addr_t),
347 size, align, 0, 0, &resource->start, &resource->r_bsh);
348 if (error)
349 return error;
350
351 resource->size = size;
352 return 0;
353 }
354
355 /*
356 * XXX Mega-kludgerific! pci_get_bus_and_slot and pci_get_class are
357 * defined only for their single purposes in i915drm, in
358 * i915_get_bridge_dev and intel_detect_pch. We can't define them more
359 * generally without adapting pci_find_device (and pci_enumerate_bus
360 * internally) to pass a cookie through.
361 */
362
363 static int
364 pci_kludgey_match_bus0_dev0_func0(const struct pci_attach_args *pa)
365 {
366
367 if (pa->pa_bus != 0)
368 return 0;
369 if (pa->pa_device != 0)
370 return 0;
371 if (pa->pa_function != 0)
372 return 0;
373
374 return 1;
375 }
376
377 struct pci_dev *
378 pci_get_bus_and_slot(int bus, int slot)
379 {
380 struct pci_attach_args pa;
381
382 KASSERT(bus == 0);
383 KASSERT(slot == PCI_DEVFN(0, 0));
384
385 if (!pci_find_device(&pa, &pci_kludgey_match_bus0_dev0_func0))
386 return NULL;
387
388 struct pci_dev *const pdev = kmem_zalloc(sizeof(*pdev), KM_SLEEP);
389 linux_pci_dev_init(pdev, NULL, NULL, &pa, NBPCI_KLUDGE_GET_MUMBLE);
390
391 return pdev;
392 }
393
394 static int
395 pci_kludgey_match_isa_bridge(const struct pci_attach_args *pa)
396 {
397
398 if (PCI_CLASS(pa->pa_class) != PCI_CLASS_BRIDGE)
399 return 0;
400 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_BRIDGE_ISA)
401 return 0;
402
403 return 1;
404 }
405
406 void
407 pci_dev_put(struct pci_dev *pdev)
408 {
409
410 if (pdev == NULL)
411 return;
412
413 KASSERT(ISSET(pdev->pd_kludges, NBPCI_KLUDGE_GET_MUMBLE));
414 kmem_free(pdev->bus, sizeof(*pdev->bus));
415 kmem_free(pdev, sizeof(*pdev));
416 }
417
418 struct pci_dev * /* XXX i915 kludge */
419 pci_get_class(uint32_t class_subclass_shifted __unused, struct pci_dev *from)
420 {
421 struct pci_attach_args pa;
422
423 KASSERT(class_subclass_shifted == (PCI_CLASS_BRIDGE_ISA << 8));
424
425 if (from != NULL) {
426 pci_dev_put(from);
427 return NULL;
428 }
429
430 if (!pci_find_device(&pa, &pci_kludgey_match_isa_bridge))
431 return NULL;
432
433 struct pci_dev *const pdev = kmem_zalloc(sizeof(*pdev), KM_SLEEP);
434 linux_pci_dev_init(pdev, NULL, NULL, &pa, NBPCI_KLUDGE_GET_MUMBLE);
435
436 return pdev;
437 }
438
439 void
440 pci_unmap_rom(struct pci_dev *pdev, void __pci_rom_iomem *vaddr __unused)
441 {
442
443 /* XXX Disable the ROM address decoder. */
444 KASSERT(ISSET(pdev->pd_kludges, NBPCI_KLUDGE_MAP_ROM));
445 KASSERT(vaddr == pdev->pd_rom_vaddr);
446 bus_space_unmap(pdev->pd_rom_bst, pdev->pd_rom_bsh, pdev->pd_rom_size);
447 pdev->pd_kludges &= ~NBPCI_KLUDGE_MAP_ROM;
448 pdev->pd_rom_vaddr = NULL;
449 }
450
451 /* XXX Whattakludge! Should move this in sys/arch/. */
452 static int
453 pci_map_rom_md(struct pci_dev *pdev)
454 {
455 #if defined(__i386__) || defined(__x86_64__) || defined(__ia64__)
456 const bus_addr_t rom_base = 0xc0000;
457 const bus_size_t rom_size = 0x20000;
458 bus_space_handle_t rom_bsh;
459 int error;
460
461 if (PCI_CLASS(pdev->pd_pa.pa_class) != PCI_CLASS_DISPLAY)
462 return ENXIO;
463 if (PCI_SUBCLASS(pdev->pd_pa.pa_class) != PCI_SUBCLASS_DISPLAY_VGA)
464 return ENXIO;
465 /* XXX Check whether this is the primary VGA card? */
466 error = bus_space_map(pdev->pd_pa.pa_memt, rom_base, rom_size,
467 (BUS_SPACE_MAP_LINEAR | BUS_SPACE_MAP_PREFETCHABLE), &rom_bsh);
468 if (error)
469 return ENXIO;
470
471 pdev->pd_rom_bst = pdev->pd_pa.pa_memt;
472 pdev->pd_rom_bsh = rom_bsh;
473 pdev->pd_rom_size = rom_size;
474 pdev->pd_kludges |= NBPCI_KLUDGE_MAP_ROM;
475
476 return 0;
477 #else
478 return ENXIO;
479 #endif
480 }
481
482 void __pci_rom_iomem *
483 pci_map_rom(struct pci_dev *pdev, size_t *sizep)
484 {
485
486 KASSERT(!ISSET(pdev->pd_kludges, NBPCI_KLUDGE_MAP_ROM));
487
488 if (pci_mapreg_map(&pdev->pd_pa, PCI_MAPREG_ROM, PCI_MAPREG_TYPE_ROM,
489 (BUS_SPACE_MAP_PREFETCHABLE | BUS_SPACE_MAP_LINEAR),
490 &pdev->pd_rom_bst, &pdev->pd_rom_bsh, NULL, &pdev->pd_rom_size)
491 != 0)
492 goto fail_mi;
493 pdev->pd_kludges |= NBPCI_KLUDGE_MAP_ROM;
494
495 /* XXX This type is obviously wrong in general... */
496 if (pci_find_rom(&pdev->pd_pa, pdev->pd_rom_bst, pdev->pd_rom_bsh,
497 pdev->pd_rom_size, PCI_ROM_CODE_TYPE_X86,
498 &pdev->pd_rom_found_bsh, &pdev->pd_rom_found_size)) {
499 pci_unmap_rom(pdev, NULL);
500 goto fail_mi;
501 }
502 goto success;
503
504 fail_mi:
505 if (pci_map_rom_md(pdev) != 0)
506 goto fail_md;
507
508 /* XXX This type is obviously wrong in general... */
509 if (pci_find_rom(&pdev->pd_pa, pdev->pd_rom_bst, pdev->pd_rom_bsh,
510 pdev->pd_rom_size, PCI_ROM_CODE_TYPE_X86,
511 &pdev->pd_rom_found_bsh, &pdev->pd_rom_found_size)) {
512 pci_unmap_rom(pdev, NULL);
513 goto fail_md;
514 }
515
516 success:
517 KASSERT(pdev->pd_rom_found_size <= SIZE_T_MAX);
518 *sizep = pdev->pd_rom_found_size;
519 pdev->pd_rom_vaddr = bus_space_vaddr(pdev->pd_rom_bst,
520 pdev->pd_rom_found_bsh);
521 return pdev->pd_rom_vaddr;
522
523 fail_md:
524 return NULL;
525 }
526
527 void __pci_rom_iomem *
528 pci_platform_rom(struct pci_dev *pdev __unused, size_t *sizep)
529 {
530
531 *sizep = 0;
532 return NULL;
533 }
534
535 int
536 pci_enable_rom(struct pci_dev *pdev)
537 {
538 const pci_chipset_tag_t pc = pdev->pd_pa.pa_pc;
539 const pcitag_t tag = pdev->pd_pa.pa_tag;
540 pcireg_t addr;
541 int s;
542
543 /* XXX Don't do anything if the ROM isn't there. */
544
545 s = splhigh();
546 addr = pci_conf_read(pc, tag, PCI_MAPREG_ROM);
547 addr |= PCI_MAPREG_ROM_ENABLE;
548 pci_conf_write(pc, tag, PCI_MAPREG_ROM, addr);
549 splx(s);
550
551 return 0;
552 }
553
554 void
555 pci_disable_rom(struct pci_dev *pdev)
556 {
557 const pci_chipset_tag_t pc = pdev->pd_pa.pa_pc;
558 const pcitag_t tag = pdev->pd_pa.pa_tag;
559 pcireg_t addr;
560 int s;
561
562 s = splhigh();
563 addr = pci_conf_read(pc, tag, PCI_MAPREG_ROM);
564 addr &= ~(pcireg_t)PCI_MAPREG_ROM_ENABLE;
565 pci_conf_write(pc, tag, PCI_MAPREG_ROM, addr);
566 splx(s);
567 }
568
569 bus_addr_t
570 pci_resource_start(struct pci_dev *pdev, unsigned i)
571 {
572
573 KASSERT(i < PCI_NUM_RESOURCES);
574 return pdev->pd_resources[i].addr;
575 }
576
577 bus_size_t
578 pci_resource_len(struct pci_dev *pdev, unsigned i)
579 {
580
581 KASSERT(i < PCI_NUM_RESOURCES);
582 return pdev->pd_resources[i].size;
583 }
584
585 bus_addr_t
586 pci_resource_end(struct pci_dev *pdev, unsigned i)
587 {
588
589 return pci_resource_start(pdev, i) + (pci_resource_len(pdev, i) - 1);
590 }
591
592 int
593 pci_resource_flags(struct pci_dev *pdev, unsigned i)
594 {
595
596 KASSERT(i < PCI_NUM_RESOURCES);
597 return pdev->pd_resources[i].flags;
598 }
599
600 void __pci_iomem *
601 pci_iomap(struct pci_dev *pdev, unsigned i, bus_size_t size)
602 {
603 int error;
604
605 KASSERT(i < PCI_NUM_RESOURCES);
606 KASSERT(pdev->pd_resources[i].kva == NULL);
607
608 if (PCI_MAPREG_TYPE(pdev->pd_resources[i].type) != PCI_MAPREG_TYPE_MEM)
609 return NULL;
610 if (pdev->pd_resources[i].size < size)
611 return NULL;
612 error = bus_space_map(pdev->pd_pa.pa_memt, pdev->pd_resources[i].addr,
613 size, BUS_SPACE_MAP_LINEAR | pdev->pd_resources[i].flags,
614 &pdev->pd_resources[i].bsh);
615 if (error)
616 return NULL;
617 /* XXX Synchronize with drm_agp_borrow_hook in drm_agpsupport.c. */
618 pdev->pd_resources[i].bst = pdev->pd_pa.pa_memt;
619 pdev->pd_resources[i].kva = bus_space_vaddr(pdev->pd_resources[i].bst,
620 pdev->pd_resources[i].bsh);
621 pdev->pd_resources[i].mapped = true;
622
623 return pdev->pd_resources[i].kva;
624 }
625
626 void
627 pci_iounmap(struct pci_dev *pdev, void __pci_iomem *kva)
628 {
629 unsigned i;
630
631 CTASSERT(__arraycount(pdev->pd_resources) == PCI_NUM_RESOURCES);
632 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
633 if (pdev->pd_resources[i].kva == kva)
634 break;
635 }
636 KASSERT(i < PCI_NUM_RESOURCES);
637
638 pdev->pd_resources[i].kva = NULL;
639 bus_space_unmap(pdev->pd_resources[i].bst, pdev->pd_resources[i].bsh,
640 pdev->pd_resources[i].size);
641 }
642
643 void
644 pci_save_state(struct pci_dev *pdev)
645 {
646
647 KASSERT(pdev->pd_saved_state == NULL);
648 pdev->pd_saved_state = kmem_alloc(sizeof(*pdev->pd_saved_state),
649 KM_SLEEP);
650 pci_conf_capture(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
651 pdev->pd_saved_state);
652 }
653
654 void
655 pci_restore_state(struct pci_dev *pdev)
656 {
657
658 KASSERT(pdev->pd_saved_state != NULL);
659 pci_conf_restore(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
660 pdev->pd_saved_state);
661 kmem_free(pdev->pd_saved_state, sizeof(*pdev->pd_saved_state));
662 pdev->pd_saved_state = NULL;
663 }
664
665 bool
666 pci_is_pcie(struct pci_dev *pdev)
667 {
668
669 return (pci_find_capability(pdev, PCI_CAP_PCIEXPRESS) != 0);
670 }
671
672 bool
673 pci_dma_supported(struct pci_dev *pdev, uintmax_t mask)
674 {
675
676 /* XXX Cop-out. */
677 if (mask > DMA_BIT_MASK(32))
678 return pci_dma64_available(&pdev->pd_pa);
679 else
680 return true;
681 }
682
683 bool
684 pci_is_root_bus(struct pci_bus *bus)
685 {
686
687 /* XXX Cop-out. */
688 return false;
689 }
690
691 int
692 pci_domain_nr(struct pci_bus *bus)
693 {
694
695 return device_unit(bus->pb_dev);
696 }
697
698 /*
699 * We explicitly rename pci_enable/disable_device so that you have to
700 * review each use of them, since NetBSD's PCI API does _not_ respect
701 * our local enablecnt here, but there are different parts of NetBSD
702 * that automatically enable/disable like PMF, so you have to decide
703 * for each one whether to call it or not.
704 */
705
706 int
707 linux_pci_enable_device(struct pci_dev *pdev)
708 {
709 const struct pci_attach_args *pa = &pdev->pd_pa;
710 pcireg_t csr;
711 int s;
712
713 if (pdev->pd_enablecnt++)
714 return 0;
715
716 s = splhigh();
717 csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
718 /* If someone else (firmware) already enabled it, credit them. */
719 if (csr & (PCI_COMMAND_IO_ENABLE|PCI_COMMAND_MEM_ENABLE))
720 pdev->pd_enablecnt++;
721 csr |= PCI_COMMAND_IO_ENABLE;
722 csr |= PCI_COMMAND_MEM_ENABLE;
723 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, csr);
724 splx(s);
725
726 return 0;
727 }
728
729 void
730 linux_pci_disable_device(struct pci_dev *pdev)
731 {
732 const struct pci_attach_args *pa = &pdev->pd_pa;
733 pcireg_t csr;
734 int s;
735
736 if (--pdev->pd_enablecnt)
737 return;
738
739 s = splhigh();
740 csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
741 csr &= ~PCI_COMMAND_IO_ENABLE;
742 csr &= ~PCI_COMMAND_MEM_ENABLE;
743 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, csr);
744 splx(s);
745 }
746
747 void
748 linux_pci_dev_destroy(struct pci_dev *pdev)
749 {
750 unsigned i;
751
752 if (pdev->bus != NULL) {
753 kmem_free(pdev->bus, sizeof(*pdev->bus));
754 pdev->bus = NULL;
755 }
756 if (ISSET(pdev->pd_kludges, NBPCI_KLUDGE_MAP_ROM)) {
757 pci_unmap_rom(pdev, pdev->pd_rom_vaddr);
758 pdev->pd_rom_vaddr = 0;
759 }
760 for (i = 0; i < __arraycount(pdev->pd_resources); i++) {
761 if (!pdev->pd_resources[i].mapped)
762 continue;
763 bus_space_unmap(pdev->pd_resources[i].bst,
764 pdev->pd_resources[i].bsh, pdev->pd_resources[i].size);
765 }
766
767 /* There is no way these should be still in use. */
768 KASSERT(pdev->pd_saved_state == NULL);
769 KASSERT(pdev->pd_intr_handles == NULL);
770 }
771