linux_pci.c revision 1.6.10.1 1 /* $NetBSD: linux_pci.c,v 1.6.10.1 2020/01/25 22:38:50 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2013 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #ifdef _KERNEL_OPT
33 #include "opt_pci.h"
34 #endif
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: linux_pci.c,v 1.6.10.1 2020/01/25 22:38:50 ad Exp $");
38
39 #include <linux/pci.h>
40
41 #include <drm/drm_agp_netbsd.h>
42
43 device_t
44 pci_dev_dev(struct pci_dev *pdev)
45 {
46
47 return pdev->pd_dev;
48 }
49
50 /* XXX Nouveau kludge! */
51 struct drm_device *
52 pci_get_drvdata(struct pci_dev *pdev)
53 {
54
55 return pdev->pd_drm_dev;
56 }
57
58 void
59 linux_pci_dev_init(struct pci_dev *pdev, device_t dev, device_t parent,
60 const struct pci_attach_args *pa, int kludges)
61 {
62 const uint32_t subsystem_id = pci_conf_read(pa->pa_pc, pa->pa_tag,
63 PCI_SUBSYS_ID_REG);
64 unsigned i;
65
66 memset(pdev, 0, sizeof(*pdev)); /* paranoia */
67
68 pdev->pd_pa = *pa;
69 pdev->pd_kludges = kludges;
70 pdev->pd_rom_vaddr = NULL;
71 pdev->pd_dev = dev;
72 #if (NACPICA > 0)
73 #ifdef __HAVE_PCI_GET_SEGMENT
74 const int seg = pci_get_segment(pa->pa_pc);
75 #else
76 const int seg = 0;
77 #endif
78 pdev->pd_ad = acpi_pcidev_find(seg, pa->pa_bus,
79 pa->pa_device, pa->pa_function);
80 #else
81 pdev->pd_ad = NULL;
82 #endif
83 pdev->pd_saved_state = NULL;
84 pdev->pd_intr_handles = NULL;
85 pdev->bus = kmem_zalloc(sizeof(*pdev->bus), KM_NOSLEEP);
86 pdev->bus->pb_pc = pa->pa_pc;
87 pdev->bus->pb_dev = parent;
88 pdev->bus->number = pa->pa_bus;
89 pdev->devfn = PCI_DEVFN(pa->pa_device, pa->pa_function);
90 pdev->vendor = PCI_VENDOR(pa->pa_id);
91 pdev->device = PCI_PRODUCT(pa->pa_id);
92 pdev->subsystem_vendor = PCI_SUBSYS_VENDOR(subsystem_id);
93 pdev->subsystem_device = PCI_SUBSYS_ID(subsystem_id);
94 pdev->revision = PCI_REVISION(pa->pa_class);
95 pdev->class = __SHIFTOUT(pa->pa_class, 0xffffff00UL); /* ? */
96
97 CTASSERT(__arraycount(pdev->pd_resources) == PCI_NUM_RESOURCES);
98 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
99 const int reg = PCI_BAR(i);
100
101 pdev->pd_resources[i].type = pci_mapreg_type(pa->pa_pc,
102 pa->pa_tag, reg);
103 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, reg,
104 pdev->pd_resources[i].type,
105 &pdev->pd_resources[i].addr,
106 &pdev->pd_resources[i].size,
107 &pdev->pd_resources[i].flags)) {
108 pdev->pd_resources[i].addr = 0;
109 pdev->pd_resources[i].size = 0;
110 pdev->pd_resources[i].flags = 0;
111 }
112 pdev->pd_resources[i].kva = NULL;
113 pdev->pd_resources[i].mapped = false;
114 }
115 }
116
117 int
118 pci_find_capability(struct pci_dev *pdev, int cap)
119 {
120
121 return pci_get_capability(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, cap,
122 NULL, NULL);
123 }
124
125 int
126 pci_read_config_dword(struct pci_dev *pdev, int reg, uint32_t *valuep)
127 {
128
129 KASSERT(!ISSET(reg, 3));
130 *valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg);
131 return 0;
132 }
133
134 int
135 pci_read_config_word(struct pci_dev *pdev, int reg, uint16_t *valuep)
136 {
137
138 KASSERT(!ISSET(reg, 1));
139 *valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
140 (reg &~ 2)) >> (8 * (reg & 2));
141 return 0;
142 }
143
144 int
145 pci_read_config_byte(struct pci_dev *pdev, int reg, uint8_t *valuep)
146 {
147
148 *valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
149 (reg &~ 3)) >> (8 * (reg & 3));
150 return 0;
151 }
152
153 int
154 pci_write_config_dword(struct pci_dev *pdev, int reg, uint32_t value)
155 {
156
157 KASSERT(!ISSET(reg, 3));
158 pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg, value);
159 return 0;
160 }
161
162 int
163 pci_bus_read_config_dword(struct pci_bus *bus, unsigned devfn, int reg,
164 uint32_t *valuep)
165 {
166 pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
167 PCI_FUNC(devfn));
168
169 KASSERT(!ISSET(reg, 1));
170 *valuep = pci_conf_read(bus->pb_pc, tag, reg & ~3) >> (8 * (reg & 3));
171 return 0;
172 }
173
174 int
175 pci_bus_read_config_word(struct pci_bus *bus, unsigned devfn, int reg,
176 uint16_t *valuep)
177 {
178 pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
179 PCI_FUNC(devfn));
180
181 KASSERT(!ISSET(reg, 1));
182 *valuep = pci_conf_read(bus->pb_pc, tag, reg &~ 2) >> (8 * (reg & 2));
183 return 0;
184 }
185
186 int
187 pci_bus_read_config_byte(struct pci_bus *bus, unsigned devfn, int reg,
188 uint8_t *valuep)
189 {
190 pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
191 PCI_FUNC(devfn));
192
193 *valuep = pci_conf_read(bus->pb_pc, tag, reg &~ 3) >> (8 * (reg & 3));
194 return 0;
195 }
196
197 int
198 pci_bus_write_config_dword(struct pci_bus *bus, unsigned devfn, int reg,
199 uint32_t value)
200 {
201 pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
202 PCI_FUNC(devfn));
203
204 KASSERT(!ISSET(reg, 3));
205 pci_conf_write(bus->pb_pc, tag, reg, value);
206 return 0;
207 }
208
209 static void
210 pci_rmw_config(pci_chipset_tag_t pc, pcitag_t tag, int reg, unsigned int bytes,
211 uint32_t value)
212 {
213 const uint32_t mask = ~((~0UL) << (8 * bytes));
214 const int reg32 = (reg &~ 3);
215 const unsigned int shift = (8 * (reg & 3));
216 uint32_t value32;
217
218 KASSERT(bytes <= 4);
219 KASSERT(!ISSET(value, ~mask));
220 value32 = pci_conf_read(pc, tag, reg32);
221 value32 &=~ (mask << shift);
222 value32 |= (value << shift);
223 pci_conf_write(pc, tag, reg32, value32);
224 }
225
226 int
227 pci_write_config_word(struct pci_dev *pdev, int reg, uint16_t value)
228 {
229
230 KASSERT(!ISSET(reg, 1));
231 pci_rmw_config(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg, 2, value);
232 return 0;
233 }
234
235 int
236 pci_write_config_byte(struct pci_dev *pdev, int reg, uint8_t value)
237 {
238
239 pci_rmw_config(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg, 1, value);
240 return 0;
241 }
242
243 int
244 pci_bus_write_config_word(struct pci_bus *bus, unsigned devfn, int reg,
245 uint16_t value)
246 {
247 pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
248 PCI_FUNC(devfn));
249
250 KASSERT(!ISSET(reg, 1));
251 pci_rmw_config(bus->pb_pc, tag, reg, 2, value);
252 return 0;
253 }
254
255 int
256 pci_bus_write_config_byte(struct pci_bus *bus, unsigned devfn, int reg,
257 uint8_t value)
258 {
259 pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
260 PCI_FUNC(devfn));
261
262 pci_rmw_config(bus->pb_pc, tag, reg, 1, value);
263 return 0;
264 }
265
266 int
267 pci_enable_msi(struct pci_dev *pdev)
268 {
269 #ifdef notyet
270 const struct pci_attach_args *const pa = &pdev->pd_pa;
271
272 if (pci_msi_alloc_exact(pa, &pdev->pd_intr_handles, 1))
273 return -EINVAL;
274
275 pdev->msi_enabled = 1;
276 return 0;
277 #else
278 return -ENOSYS;
279 #endif
280 }
281
282 void
283 pci_disable_msi(struct pci_dev *pdev __unused)
284 {
285 const struct pci_attach_args *const pa = &pdev->pd_pa;
286
287 if (pdev->pd_intr_handles != NULL) {
288 pci_intr_release(pa->pa_pc, pdev->pd_intr_handles, 1);
289 pdev->pd_intr_handles = NULL;
290 }
291 pdev->msi_enabled = 0;
292 }
293
294 void
295 pci_set_master(struct pci_dev *pdev)
296 {
297 pcireg_t csr;
298
299 csr = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
300 PCI_COMMAND_STATUS_REG);
301 csr |= PCI_COMMAND_MASTER_ENABLE;
302 pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
303 PCI_COMMAND_STATUS_REG, csr);
304 }
305
306 void
307 pci_clear_master(struct pci_dev *pdev)
308 {
309 pcireg_t csr;
310
311 csr = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
312 PCI_COMMAND_STATUS_REG);
313 csr &= ~(pcireg_t)PCI_COMMAND_MASTER_ENABLE;
314 pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
315 PCI_COMMAND_STATUS_REG, csr);
316 }
317
318 bus_addr_t
319 pcibios_align_resource(void *p, const struct resource *resource,
320 bus_addr_t addr, bus_size_t size)
321 {
322 panic("pcibios_align_resource has accessed unaligned neurons!");
323 }
324
325 int
326 pci_bus_alloc_resource(struct pci_bus *bus, struct resource *resource,
327 bus_size_t size, bus_size_t align, bus_addr_t start, int type __unused,
328 bus_addr_t (*align_fn)(void *, const struct resource *, bus_addr_t,
329 bus_size_t) __unused,
330 struct pci_dev *pdev)
331 {
332 const struct pci_attach_args *const pa = &pdev->pd_pa;
333 bus_space_tag_t bst;
334 int error;
335
336 switch (resource->flags) {
337 case IORESOURCE_MEM:
338 bst = pa->pa_memt;
339 break;
340
341 case IORESOURCE_IO:
342 bst = pa->pa_iot;
343 break;
344
345 default:
346 panic("I don't know what kind of resource you want!");
347 }
348
349 resource->r_bst = bst;
350 error = bus_space_alloc(bst, start, __type_max(bus_addr_t),
351 size, align, 0, 0, &resource->start, &resource->r_bsh);
352 if (error)
353 return error;
354
355 resource->size = size;
356 return 0;
357 }
358
359 /*
360 * XXX Mega-kludgerific! pci_get_bus_and_slot and pci_get_class are
361 * defined only for their single purposes in i915drm, in
362 * i915_get_bridge_dev and intel_detect_pch. We can't define them more
363 * generally without adapting pci_find_device (and pci_enumerate_bus
364 * internally) to pass a cookie through.
365 */
366
367 static int
368 pci_kludgey_match_bus0_dev0_func0(const struct pci_attach_args *pa)
369 {
370
371 if (pa->pa_bus != 0)
372 return 0;
373 if (pa->pa_device != 0)
374 return 0;
375 if (pa->pa_function != 0)
376 return 0;
377
378 return 1;
379 }
380
381 struct pci_dev *
382 pci_get_bus_and_slot(int bus, int slot)
383 {
384 struct pci_attach_args pa;
385
386 KASSERT(bus == 0);
387 KASSERT(slot == PCI_DEVFN(0, 0));
388
389 if (!pci_find_device(&pa, &pci_kludgey_match_bus0_dev0_func0))
390 return NULL;
391
392 struct pci_dev *const pdev = kmem_zalloc(sizeof(*pdev), KM_SLEEP);
393 linux_pci_dev_init(pdev, NULL, NULL, &pa, NBPCI_KLUDGE_GET_MUMBLE);
394
395 return pdev;
396 }
397
398 static int
399 pci_kludgey_match_isa_bridge(const struct pci_attach_args *pa)
400 {
401
402 if (PCI_CLASS(pa->pa_class) != PCI_CLASS_BRIDGE)
403 return 0;
404 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_BRIDGE_ISA)
405 return 0;
406
407 return 1;
408 }
409
410 void
411 pci_dev_put(struct pci_dev *pdev)
412 {
413
414 if (pdev == NULL)
415 return;
416
417 KASSERT(ISSET(pdev->pd_kludges, NBPCI_KLUDGE_GET_MUMBLE));
418 kmem_free(pdev->bus, sizeof(*pdev->bus));
419 kmem_free(pdev, sizeof(*pdev));
420 }
421
422 struct pci_dev * /* XXX i915 kludge */
423 pci_get_class(uint32_t class_subclass_shifted __unused, struct pci_dev *from)
424 {
425 struct pci_attach_args pa;
426
427 KASSERT(class_subclass_shifted == (PCI_CLASS_BRIDGE_ISA << 8));
428
429 if (from != NULL) {
430 pci_dev_put(from);
431 return NULL;
432 }
433
434 if (!pci_find_device(&pa, &pci_kludgey_match_isa_bridge))
435 return NULL;
436
437 struct pci_dev *const pdev = kmem_zalloc(sizeof(*pdev), KM_SLEEP);
438 linux_pci_dev_init(pdev, NULL, NULL, &pa, NBPCI_KLUDGE_GET_MUMBLE);
439
440 return pdev;
441 }
442
443 void
444 pci_unmap_rom(struct pci_dev *pdev, void __pci_rom_iomem *vaddr __unused)
445 {
446
447 /* XXX Disable the ROM address decoder. */
448 KASSERT(ISSET(pdev->pd_kludges, NBPCI_KLUDGE_MAP_ROM));
449 KASSERT(vaddr == pdev->pd_rom_vaddr);
450 bus_space_unmap(pdev->pd_rom_bst, pdev->pd_rom_bsh, pdev->pd_rom_size);
451 pdev->pd_kludges &= ~NBPCI_KLUDGE_MAP_ROM;
452 pdev->pd_rom_vaddr = NULL;
453 }
454
455 /* XXX Whattakludge! Should move this in sys/arch/. */
456 static int
457 pci_map_rom_md(struct pci_dev *pdev)
458 {
459 #if defined(__i386__) || defined(__x86_64__) || defined(__ia64__)
460 const bus_addr_t rom_base = 0xc0000;
461 const bus_size_t rom_size = 0x20000;
462 bus_space_handle_t rom_bsh;
463 int error;
464
465 if (PCI_CLASS(pdev->pd_pa.pa_class) != PCI_CLASS_DISPLAY)
466 return ENXIO;
467 if (PCI_SUBCLASS(pdev->pd_pa.pa_class) != PCI_SUBCLASS_DISPLAY_VGA)
468 return ENXIO;
469 /* XXX Check whether this is the primary VGA card? */
470 error = bus_space_map(pdev->pd_pa.pa_memt, rom_base, rom_size,
471 (BUS_SPACE_MAP_LINEAR | BUS_SPACE_MAP_PREFETCHABLE), &rom_bsh);
472 if (error)
473 return ENXIO;
474
475 pdev->pd_rom_bst = pdev->pd_pa.pa_memt;
476 pdev->pd_rom_bsh = rom_bsh;
477 pdev->pd_rom_size = rom_size;
478 pdev->pd_kludges |= NBPCI_KLUDGE_MAP_ROM;
479
480 return 0;
481 #else
482 return ENXIO;
483 #endif
484 }
485
486 void __pci_rom_iomem *
487 pci_map_rom(struct pci_dev *pdev, size_t *sizep)
488 {
489
490 KASSERT(!ISSET(pdev->pd_kludges, NBPCI_KLUDGE_MAP_ROM));
491
492 if (pci_mapreg_map(&pdev->pd_pa, PCI_MAPREG_ROM, PCI_MAPREG_TYPE_ROM,
493 (BUS_SPACE_MAP_PREFETCHABLE | BUS_SPACE_MAP_LINEAR),
494 &pdev->pd_rom_bst, &pdev->pd_rom_bsh, NULL, &pdev->pd_rom_size)
495 != 0)
496 goto fail_mi;
497 pdev->pd_kludges |= NBPCI_KLUDGE_MAP_ROM;
498
499 /* XXX This type is obviously wrong in general... */
500 if (pci_find_rom(&pdev->pd_pa, pdev->pd_rom_bst, pdev->pd_rom_bsh,
501 pdev->pd_rom_size, PCI_ROM_CODE_TYPE_X86,
502 &pdev->pd_rom_found_bsh, &pdev->pd_rom_found_size)) {
503 pci_unmap_rom(pdev, NULL);
504 goto fail_mi;
505 }
506 goto success;
507
508 fail_mi:
509 if (pci_map_rom_md(pdev) != 0)
510 goto fail_md;
511
512 /* XXX This type is obviously wrong in general... */
513 if (pci_find_rom(&pdev->pd_pa, pdev->pd_rom_bst, pdev->pd_rom_bsh,
514 pdev->pd_rom_size, PCI_ROM_CODE_TYPE_X86,
515 &pdev->pd_rom_found_bsh, &pdev->pd_rom_found_size)) {
516 pci_unmap_rom(pdev, NULL);
517 goto fail_md;
518 }
519
520 success:
521 KASSERT(pdev->pd_rom_found_size <= SIZE_T_MAX);
522 *sizep = pdev->pd_rom_found_size;
523 pdev->pd_rom_vaddr = bus_space_vaddr(pdev->pd_rom_bst,
524 pdev->pd_rom_found_bsh);
525 return pdev->pd_rom_vaddr;
526
527 fail_md:
528 return NULL;
529 }
530
531 void __pci_rom_iomem *
532 pci_platform_rom(struct pci_dev *pdev __unused, size_t *sizep)
533 {
534
535 *sizep = 0;
536 return NULL;
537 }
538
539 int
540 pci_enable_rom(struct pci_dev *pdev)
541 {
542 const pci_chipset_tag_t pc = pdev->pd_pa.pa_pc;
543 const pcitag_t tag = pdev->pd_pa.pa_tag;
544 pcireg_t addr;
545 int s;
546
547 /* XXX Don't do anything if the ROM isn't there. */
548
549 s = splhigh();
550 addr = pci_conf_read(pc, tag, PCI_MAPREG_ROM);
551 addr |= PCI_MAPREG_ROM_ENABLE;
552 pci_conf_write(pc, tag, PCI_MAPREG_ROM, addr);
553 splx(s);
554
555 return 0;
556 }
557
558 void
559 pci_disable_rom(struct pci_dev *pdev)
560 {
561 const pci_chipset_tag_t pc = pdev->pd_pa.pa_pc;
562 const pcitag_t tag = pdev->pd_pa.pa_tag;
563 pcireg_t addr;
564 int s;
565
566 s = splhigh();
567 addr = pci_conf_read(pc, tag, PCI_MAPREG_ROM);
568 addr &= ~(pcireg_t)PCI_MAPREG_ROM_ENABLE;
569 pci_conf_write(pc, tag, PCI_MAPREG_ROM, addr);
570 splx(s);
571 }
572
573 bus_addr_t
574 pci_resource_start(struct pci_dev *pdev, unsigned i)
575 {
576
577 KASSERT(i < PCI_NUM_RESOURCES);
578 return pdev->pd_resources[i].addr;
579 }
580
581 bus_size_t
582 pci_resource_len(struct pci_dev *pdev, unsigned i)
583 {
584
585 KASSERT(i < PCI_NUM_RESOURCES);
586 return pdev->pd_resources[i].size;
587 }
588
589 bus_addr_t
590 pci_resource_end(struct pci_dev *pdev, unsigned i)
591 {
592
593 return pci_resource_start(pdev, i) + (pci_resource_len(pdev, i) - 1);
594 }
595
596 int
597 pci_resource_flags(struct pci_dev *pdev, unsigned i)
598 {
599
600 KASSERT(i < PCI_NUM_RESOURCES);
601 return pdev->pd_resources[i].flags;
602 }
603
604 void __pci_iomem *
605 pci_iomap(struct pci_dev *pdev, unsigned i, bus_size_t size)
606 {
607 int error;
608
609 KASSERT(i < PCI_NUM_RESOURCES);
610 KASSERT(pdev->pd_resources[i].kva == NULL);
611
612 if (PCI_MAPREG_TYPE(pdev->pd_resources[i].type) != PCI_MAPREG_TYPE_MEM)
613 return NULL;
614 if (pdev->pd_resources[i].size < size)
615 return NULL;
616 error = bus_space_map(pdev->pd_pa.pa_memt, pdev->pd_resources[i].addr,
617 size, BUS_SPACE_MAP_LINEAR | pdev->pd_resources[i].flags,
618 &pdev->pd_resources[i].bsh);
619 if (error)
620 return NULL;
621 /* XXX Synchronize with drm_agp_borrow_hook in drm_agpsupport.c. */
622 pdev->pd_resources[i].bst = pdev->pd_pa.pa_memt;
623 pdev->pd_resources[i].kva = bus_space_vaddr(pdev->pd_resources[i].bst,
624 pdev->pd_resources[i].bsh);
625 pdev->pd_resources[i].mapped = true;
626
627 return pdev->pd_resources[i].kva;
628 }
629
630 void
631 pci_iounmap(struct pci_dev *pdev, void __pci_iomem *kva)
632 {
633 unsigned i;
634
635 CTASSERT(__arraycount(pdev->pd_resources) == PCI_NUM_RESOURCES);
636 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
637 if (pdev->pd_resources[i].kva == kva)
638 break;
639 }
640 KASSERT(i < PCI_NUM_RESOURCES);
641
642 pdev->pd_resources[i].kva = NULL;
643 bus_space_unmap(pdev->pd_resources[i].bst, pdev->pd_resources[i].bsh,
644 pdev->pd_resources[i].size);
645 }
646
647 void
648 pci_save_state(struct pci_dev *pdev)
649 {
650
651 KASSERT(pdev->pd_saved_state == NULL);
652 pdev->pd_saved_state = kmem_alloc(sizeof(*pdev->pd_saved_state),
653 KM_SLEEP);
654 pci_conf_capture(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
655 pdev->pd_saved_state);
656 }
657
658 void
659 pci_restore_state(struct pci_dev *pdev)
660 {
661
662 KASSERT(pdev->pd_saved_state != NULL);
663 pci_conf_restore(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
664 pdev->pd_saved_state);
665 kmem_free(pdev->pd_saved_state, sizeof(*pdev->pd_saved_state));
666 pdev->pd_saved_state = NULL;
667 }
668
669 bool
670 pci_is_pcie(struct pci_dev *pdev)
671 {
672
673 return (pci_find_capability(pdev, PCI_CAP_PCIEXPRESS) != 0);
674 }
675
676 bool
677 pci_dma_supported(struct pci_dev *pdev, uintmax_t mask)
678 {
679
680 /* XXX Cop-out. */
681 if (mask > DMA_BIT_MASK(32))
682 return pci_dma64_available(&pdev->pd_pa);
683 else
684 return true;
685 }
686
687 bool
688 pci_is_root_bus(struct pci_bus *bus)
689 {
690
691 /* XXX Cop-out. */
692 return false;
693 }
694
695 int
696 pci_domain_nr(struct pci_bus *bus)
697 {
698
699 return device_unit(bus->pb_dev);
700 }
701
702 /*
703 * We explicitly rename pci_enable/disable_device so that you have to
704 * review each use of them, since NetBSD's PCI API does _not_ respect
705 * our local enablecnt here, but there are different parts of NetBSD
706 * that automatically enable/disable like PMF, so you have to decide
707 * for each one whether to call it or not.
708 */
709
710 int
711 linux_pci_enable_device(struct pci_dev *pdev)
712 {
713 const struct pci_attach_args *pa = &pdev->pd_pa;
714 pcireg_t csr;
715 int s;
716
717 if (pdev->pd_enablecnt++)
718 return 0;
719
720 s = splhigh();
721 csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
722 /* If someone else (firmware) already enabled it, credit them. */
723 if (csr & (PCI_COMMAND_IO_ENABLE|PCI_COMMAND_MEM_ENABLE))
724 pdev->pd_enablecnt++;
725 csr |= PCI_COMMAND_IO_ENABLE;
726 csr |= PCI_COMMAND_MEM_ENABLE;
727 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, csr);
728 splx(s);
729
730 return 0;
731 }
732
733 void
734 linux_pci_disable_device(struct pci_dev *pdev)
735 {
736 const struct pci_attach_args *pa = &pdev->pd_pa;
737 pcireg_t csr;
738 int s;
739
740 if (--pdev->pd_enablecnt)
741 return;
742
743 s = splhigh();
744 csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
745 csr &= ~PCI_COMMAND_IO_ENABLE;
746 csr &= ~PCI_COMMAND_MEM_ENABLE;
747 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, csr);
748 splx(s);
749 }
750
751 void
752 linux_pci_dev_destroy(struct pci_dev *pdev)
753 {
754 unsigned i;
755
756 if (pdev->bus != NULL) {
757 kmem_free(pdev->bus, sizeof(*pdev->bus));
758 pdev->bus = NULL;
759 }
760 if (ISSET(pdev->pd_kludges, NBPCI_KLUDGE_MAP_ROM)) {
761 pci_unmap_rom(pdev, pdev->pd_rom_vaddr);
762 pdev->pd_rom_vaddr = 0;
763 }
764 for (i = 0; i < __arraycount(pdev->pd_resources); i++) {
765 if (!pdev->pd_resources[i].mapped)
766 continue;
767 bus_space_unmap(pdev->pd_resources[i].bst,
768 pdev->pd_resources[i].bsh, pdev->pd_resources[i].size);
769 }
770
771 /* There is no way these should be still in use. */
772 KASSERT(pdev->pd_saved_state == NULL);
773 KASSERT(pdev->pd_intr_handles == NULL);
774 }
775