linux_pci.c revision 1.19 1 /* $NetBSD: linux_pci.c,v 1.19 2021/12/19 11:38:04 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2013 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #ifdef _KERNEL_OPT
33 #include "acpica.h"
34 #include "opt_pci.h"
35 #endif
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: linux_pci.c,v 1.19 2021/12/19 11:38:04 riastradh Exp $");
39
40 #if NACPICA > 0
41 #include <dev/acpi/acpivar.h>
42 #include <dev/acpi/acpi_pci.h>
43 #endif
44
45 #include <linux/pci.h>
46
47 #include <drm/drm_agp_netbsd.h>
48
49 device_t
50 pci_dev_dev(struct pci_dev *pdev)
51 {
52
53 return pdev->pd_dev;
54 }
55
56 void
57 pci_set_drvdata(struct pci_dev *pdev, void *drvdata)
58 {
59 pdev->pd_drvdata = drvdata;
60 }
61
62 void *
63 pci_get_drvdata(struct pci_dev *pdev)
64 {
65 return pdev->pd_drvdata;
66 }
67
68 void
69 linux_pci_dev_init(struct pci_dev *pdev, device_t dev, device_t parent,
70 const struct pci_attach_args *pa, int kludges)
71 {
72 const uint32_t subsystem_id = pci_conf_read(pa->pa_pc, pa->pa_tag,
73 PCI_SUBSYS_ID_REG);
74 unsigned i;
75
76 memset(pdev, 0, sizeof(*pdev)); /* paranoia */
77
78 pdev->pd_pa = *pa;
79 pdev->pd_kludges = kludges;
80 pdev->pd_rom_vaddr = NULL;
81 pdev->pd_dev = dev;
82 #if (NACPICA > 0)
83 #ifdef __HAVE_PCI_GET_SEGMENT
84 const int seg = pci_get_segment(pa->pa_pc);
85 #else
86 const int seg = 0;
87 #endif
88 pdev->pd_ad = acpi_pcidev_find(seg, pa->pa_bus,
89 pa->pa_device, pa->pa_function);
90 #else
91 pdev->pd_ad = NULL;
92 #endif
93 pdev->pd_saved_state = NULL;
94 pdev->pd_intr_handles = NULL;
95 pdev->pd_drvdata = NULL;
96 pdev->bus = kmem_zalloc(sizeof(*pdev->bus), KM_NOSLEEP);
97 pdev->bus->pb_pc = pa->pa_pc;
98 pdev->bus->pb_dev = parent;
99 pdev->bus->number = pa->pa_bus;
100 pdev->devfn = PCI_DEVFN(pa->pa_device, pa->pa_function);
101 pdev->vendor = PCI_VENDOR(pa->pa_id);
102 pdev->device = PCI_PRODUCT(pa->pa_id);
103 pdev->subsystem_vendor = PCI_SUBSYS_VENDOR(subsystem_id);
104 pdev->subsystem_device = PCI_SUBSYS_ID(subsystem_id);
105 pdev->revision = PCI_REVISION(pa->pa_class);
106 pdev->class = __SHIFTOUT(pa->pa_class, 0xffffff00UL); /* ? */
107
108 CTASSERT(__arraycount(pdev->pd_resources) == PCI_NUM_RESOURCES);
109 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
110 const int reg = PCI_BAR(i);
111
112 pdev->pd_resources[i].type = pci_mapreg_type(pa->pa_pc,
113 pa->pa_tag, reg);
114 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, reg,
115 pdev->pd_resources[i].type,
116 &pdev->pd_resources[i].addr,
117 &pdev->pd_resources[i].size,
118 &pdev->pd_resources[i].flags)) {
119 pdev->pd_resources[i].addr = 0;
120 pdev->pd_resources[i].size = 0;
121 pdev->pd_resources[i].flags = 0;
122 }
123 pdev->pd_resources[i].kva = NULL;
124 pdev->pd_resources[i].mapped = false;
125 }
126 }
127
128 int
129 pci_find_capability(struct pci_dev *pdev, int cap)
130 {
131
132 return pci_get_capability(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, cap,
133 NULL, NULL);
134 }
135
136 int
137 pci_read_config_dword(struct pci_dev *pdev, int reg, uint32_t *valuep)
138 {
139
140 KASSERT(!ISSET(reg, 3));
141 *valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg);
142 return 0;
143 }
144
145 int
146 pci_read_config_word(struct pci_dev *pdev, int reg, uint16_t *valuep)
147 {
148
149 KASSERT(!ISSET(reg, 1));
150 *valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
151 (reg &~ 2)) >> (8 * (reg & 2));
152 return 0;
153 }
154
155 int
156 pci_read_config_byte(struct pci_dev *pdev, int reg, uint8_t *valuep)
157 {
158
159 *valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
160 (reg &~ 3)) >> (8 * (reg & 3));
161 return 0;
162 }
163
164 int
165 pci_write_config_dword(struct pci_dev *pdev, int reg, uint32_t value)
166 {
167
168 KASSERT(!ISSET(reg, 3));
169 pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg, value);
170 return 0;
171 }
172
173 int
174 pci_bus_read_config_dword(struct pci_bus *bus, unsigned devfn, int reg,
175 uint32_t *valuep)
176 {
177 pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
178 PCI_FUNC(devfn));
179
180 KASSERT(!ISSET(reg, 1));
181 *valuep = pci_conf_read(bus->pb_pc, tag, reg & ~3) >> (8 * (reg & 3));
182 return 0;
183 }
184
185 int
186 pci_bus_read_config_word(struct pci_bus *bus, unsigned devfn, int reg,
187 uint16_t *valuep)
188 {
189 pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
190 PCI_FUNC(devfn));
191
192 KASSERT(!ISSET(reg, 1));
193 *valuep = pci_conf_read(bus->pb_pc, tag, reg &~ 2) >> (8 * (reg & 2));
194 return 0;
195 }
196
197 int
198 pci_bus_read_config_byte(struct pci_bus *bus, unsigned devfn, int reg,
199 uint8_t *valuep)
200 {
201 pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
202 PCI_FUNC(devfn));
203
204 *valuep = pci_conf_read(bus->pb_pc, tag, reg &~ 3) >> (8 * (reg & 3));
205 return 0;
206 }
207
208 int
209 pci_bus_write_config_dword(struct pci_bus *bus, unsigned devfn, int reg,
210 uint32_t value)
211 {
212 pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
213 PCI_FUNC(devfn));
214
215 KASSERT(!ISSET(reg, 3));
216 pci_conf_write(bus->pb_pc, tag, reg, value);
217 return 0;
218 }
219
220 static void
221 pci_rmw_config(pci_chipset_tag_t pc, pcitag_t tag, int reg, unsigned int bytes,
222 uint32_t value)
223 {
224 const uint32_t mask = ~((~0UL) << (8 * bytes));
225 const int reg32 = (reg &~ 3);
226 const unsigned int shift = (8 * (reg & 3));
227 uint32_t value32;
228
229 KASSERT(bytes <= 4);
230 KASSERT(!ISSET(value, ~mask));
231 value32 = pci_conf_read(pc, tag, reg32);
232 value32 &=~ (mask << shift);
233 value32 |= (value << shift);
234 pci_conf_write(pc, tag, reg32, value32);
235 }
236
237 int
238 pci_write_config_word(struct pci_dev *pdev, int reg, uint16_t value)
239 {
240
241 KASSERT(!ISSET(reg, 1));
242 pci_rmw_config(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg, 2, value);
243 return 0;
244 }
245
246 int
247 pci_write_config_byte(struct pci_dev *pdev, int reg, uint8_t value)
248 {
249
250 pci_rmw_config(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg, 1, value);
251 return 0;
252 }
253
254 int
255 pci_bus_write_config_word(struct pci_bus *bus, unsigned devfn, int reg,
256 uint16_t value)
257 {
258 pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
259 PCI_FUNC(devfn));
260
261 KASSERT(!ISSET(reg, 1));
262 pci_rmw_config(bus->pb_pc, tag, reg, 2, value);
263 return 0;
264 }
265
266 int
267 pci_bus_write_config_byte(struct pci_bus *bus, unsigned devfn, int reg,
268 uint8_t value)
269 {
270 pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
271 PCI_FUNC(devfn));
272
273 pci_rmw_config(bus->pb_pc, tag, reg, 1, value);
274 return 0;
275 }
276
277 int
278 pci_enable_msi(struct pci_dev *pdev)
279 {
280 const struct pci_attach_args *const pa = &pdev->pd_pa;
281
282 if (pci_msi_alloc_exact(pa, &pdev->pd_intr_handles, 1))
283 return -EINVAL;
284
285 pdev->msi_enabled = 1;
286 return 0;
287 }
288
289 void
290 pci_disable_msi(struct pci_dev *pdev __unused)
291 {
292 const struct pci_attach_args *const pa = &pdev->pd_pa;
293
294 if (pdev->pd_intr_handles != NULL) {
295 pci_intr_release(pa->pa_pc, pdev->pd_intr_handles, 1);
296 pdev->pd_intr_handles = NULL;
297 }
298 pdev->msi_enabled = 0;
299 }
300
301 void
302 pci_set_master(struct pci_dev *pdev)
303 {
304 pcireg_t csr;
305
306 csr = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
307 PCI_COMMAND_STATUS_REG);
308 csr |= PCI_COMMAND_MASTER_ENABLE;
309 pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
310 PCI_COMMAND_STATUS_REG, csr);
311 }
312
313 void
314 pci_clear_master(struct pci_dev *pdev)
315 {
316 pcireg_t csr;
317
318 csr = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
319 PCI_COMMAND_STATUS_REG);
320 csr &= ~(pcireg_t)PCI_COMMAND_MASTER_ENABLE;
321 pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
322 PCI_COMMAND_STATUS_REG, csr);
323 }
324
325 bus_addr_t
326 pcibios_align_resource(void *p, const struct resource *resource,
327 bus_addr_t addr, bus_size_t size)
328 {
329 panic("pcibios_align_resource has accessed unaligned neurons!");
330 }
331
332 int
333 pci_bus_alloc_resource(struct pci_bus *bus, struct resource *resource,
334 bus_size_t size, bus_size_t align, bus_addr_t start, int type __unused,
335 bus_addr_t (*align_fn)(void *, const struct resource *, bus_addr_t,
336 bus_size_t) __unused,
337 struct pci_dev *pdev)
338 {
339 const struct pci_attach_args *const pa = &pdev->pd_pa;
340 bus_space_tag_t bst;
341 int error;
342
343 switch (resource->flags) {
344 case IORESOURCE_MEM:
345 bst = pa->pa_memt;
346 break;
347
348 case IORESOURCE_IO:
349 bst = pa->pa_iot;
350 break;
351
352 default:
353 panic("I don't know what kind of resource you want!");
354 }
355
356 resource->r_bst = bst;
357 error = bus_space_alloc(bst, start, __type_max(bus_addr_t),
358 size, align, 0, 0, &resource->start, &resource->r_bsh);
359 if (error)
360 return error;
361
362 resource->end = start + (size - 1);
363 return 0;
364 }
365
366 /*
367 * XXX Mega-kludgerific! pci_get_bus_and_slot and pci_get_class are
368 * defined only for their single purposes in i915drm, in
369 * i915_get_bridge_dev and intel_detect_pch. We can't define them more
370 * generally without adapting pci_find_device (and pci_enumerate_bus
371 * internally) to pass a cookie through.
372 */
373
374 static int
375 pci_kludgey_match_bus0_dev0_func0(const struct pci_attach_args *pa)
376 {
377
378 /* XXX domain */
379 if (pa->pa_bus != 0)
380 return 0;
381 if (pa->pa_device != 0)
382 return 0;
383 if (pa->pa_function != 0)
384 return 0;
385
386 return 1;
387 }
388
389 struct pci_dev *
390 pci_get_domain_bus_and_slot(int domain, int bus, int slot)
391 {
392 struct pci_attach_args pa;
393
394 KASSERT(domain == 0);
395 KASSERT(bus == 0);
396 KASSERT(slot == PCI_DEVFN(0, 0));
397
398 if (!pci_find_device(&pa, &pci_kludgey_match_bus0_dev0_func0))
399 return NULL;
400
401 struct pci_dev *const pdev = kmem_zalloc(sizeof(*pdev), KM_SLEEP);
402 linux_pci_dev_init(pdev, NULL, NULL, &pa, NBPCI_KLUDGE_GET_MUMBLE);
403
404 return pdev;
405 }
406
407 static int
408 pci_kludgey_match_isa_bridge(const struct pci_attach_args *pa)
409 {
410
411 if (PCI_CLASS(pa->pa_class) != PCI_CLASS_BRIDGE)
412 return 0;
413 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_BRIDGE_ISA)
414 return 0;
415
416 return 1;
417 }
418
419 void
420 pci_dev_put(struct pci_dev *pdev)
421 {
422
423 if (pdev == NULL)
424 return;
425
426 KASSERT(ISSET(pdev->pd_kludges, NBPCI_KLUDGE_GET_MUMBLE));
427 kmem_free(pdev->bus, sizeof(*pdev->bus));
428 kmem_free(pdev, sizeof(*pdev));
429 }
430
431 struct pci_dev * /* XXX i915 kludge */
432 pci_get_class(uint32_t class_subclass_shifted __unused, struct pci_dev *from)
433 {
434 struct pci_attach_args pa;
435
436 KASSERT(class_subclass_shifted == (PCI_CLASS_BRIDGE_ISA << 8));
437
438 if (from != NULL) {
439 pci_dev_put(from);
440 return NULL;
441 }
442
443 if (!pci_find_device(&pa, &pci_kludgey_match_isa_bridge))
444 return NULL;
445
446 struct pci_dev *const pdev = kmem_zalloc(sizeof(*pdev), KM_SLEEP);
447 linux_pci_dev_init(pdev, NULL, NULL, &pa, NBPCI_KLUDGE_GET_MUMBLE);
448
449 return pdev;
450 }
451
452 int
453 pci_dev_present(const struct pci_device_id *ids)
454 {
455
456 /* XXX implement me -- pci_find_device doesn't pass a cookie */
457 return 0;
458 }
459
460 void
461 pci_unmap_rom(struct pci_dev *pdev, void __pci_rom_iomem *vaddr __unused)
462 {
463
464 /* XXX Disable the ROM address decoder. */
465 KASSERT(ISSET(pdev->pd_kludges, NBPCI_KLUDGE_MAP_ROM));
466 KASSERT(vaddr == pdev->pd_rom_vaddr);
467 bus_space_unmap(pdev->pd_rom_bst, pdev->pd_rom_bsh, pdev->pd_rom_size);
468 pdev->pd_kludges &= ~NBPCI_KLUDGE_MAP_ROM;
469 pdev->pd_rom_vaddr = NULL;
470 }
471
472 /* XXX Whattakludge! Should move this in sys/arch/. */
473 static int
474 pci_map_rom_md(struct pci_dev *pdev)
475 {
476 #if defined(__i386__) || defined(__x86_64__) || defined(__ia64__)
477 const bus_addr_t rom_base = 0xc0000;
478 const bus_size_t rom_size = 0x20000;
479 bus_space_handle_t rom_bsh;
480 int error;
481
482 if (PCI_CLASS(pdev->pd_pa.pa_class) != PCI_CLASS_DISPLAY)
483 return ENXIO;
484 if (PCI_SUBCLASS(pdev->pd_pa.pa_class) != PCI_SUBCLASS_DISPLAY_VGA)
485 return ENXIO;
486 /* XXX Check whether this is the primary VGA card? */
487 error = bus_space_map(pdev->pd_pa.pa_memt, rom_base, rom_size,
488 (BUS_SPACE_MAP_LINEAR | BUS_SPACE_MAP_PREFETCHABLE), &rom_bsh);
489 if (error)
490 return ENXIO;
491
492 pdev->pd_rom_bst = pdev->pd_pa.pa_memt;
493 pdev->pd_rom_bsh = rom_bsh;
494 pdev->pd_rom_size = rom_size;
495 pdev->pd_kludges |= NBPCI_KLUDGE_MAP_ROM;
496
497 return 0;
498 #else
499 return ENXIO;
500 #endif
501 }
502
503 void __pci_rom_iomem *
504 pci_map_rom(struct pci_dev *pdev, size_t *sizep)
505 {
506
507 KASSERT(!ISSET(pdev->pd_kludges, NBPCI_KLUDGE_MAP_ROM));
508
509 if (pci_mapreg_map(&pdev->pd_pa, PCI_MAPREG_ROM, PCI_MAPREG_TYPE_ROM,
510 (BUS_SPACE_MAP_PREFETCHABLE | BUS_SPACE_MAP_LINEAR),
511 &pdev->pd_rom_bst, &pdev->pd_rom_bsh, NULL, &pdev->pd_rom_size)
512 != 0)
513 goto fail_mi;
514 pdev->pd_kludges |= NBPCI_KLUDGE_MAP_ROM;
515
516 /* XXX This type is obviously wrong in general... */
517 if (pci_find_rom(&pdev->pd_pa, pdev->pd_rom_bst, pdev->pd_rom_bsh,
518 pdev->pd_rom_size, PCI_ROM_CODE_TYPE_X86,
519 &pdev->pd_rom_found_bsh, &pdev->pd_rom_found_size)) {
520 pci_unmap_rom(pdev, NULL);
521 goto fail_mi;
522 }
523 goto success;
524
525 fail_mi:
526 if (pci_map_rom_md(pdev) != 0)
527 goto fail_md;
528
529 /* XXX This type is obviously wrong in general... */
530 if (pci_find_rom(&pdev->pd_pa, pdev->pd_rom_bst, pdev->pd_rom_bsh,
531 pdev->pd_rom_size, PCI_ROM_CODE_TYPE_X86,
532 &pdev->pd_rom_found_bsh, &pdev->pd_rom_found_size)) {
533 pci_unmap_rom(pdev, NULL);
534 goto fail_md;
535 }
536
537 success:
538 KASSERT(pdev->pd_rom_found_size <= SIZE_T_MAX);
539 *sizep = pdev->pd_rom_found_size;
540 pdev->pd_rom_vaddr = bus_space_vaddr(pdev->pd_rom_bst,
541 pdev->pd_rom_found_bsh);
542 return pdev->pd_rom_vaddr;
543
544 fail_md:
545 return NULL;
546 }
547
548 void __pci_rom_iomem *
549 pci_platform_rom(struct pci_dev *pdev __unused, size_t *sizep)
550 {
551
552 *sizep = 0;
553 return NULL;
554 }
555
556 int
557 pci_enable_rom(struct pci_dev *pdev)
558 {
559 const pci_chipset_tag_t pc = pdev->pd_pa.pa_pc;
560 const pcitag_t tag = pdev->pd_pa.pa_tag;
561 pcireg_t addr;
562 int s;
563
564 /* XXX Don't do anything if the ROM isn't there. */
565
566 s = splhigh();
567 addr = pci_conf_read(pc, tag, PCI_MAPREG_ROM);
568 addr |= PCI_MAPREG_ROM_ENABLE;
569 pci_conf_write(pc, tag, PCI_MAPREG_ROM, addr);
570 splx(s);
571
572 return 0;
573 }
574
575 void
576 pci_disable_rom(struct pci_dev *pdev)
577 {
578 const pci_chipset_tag_t pc = pdev->pd_pa.pa_pc;
579 const pcitag_t tag = pdev->pd_pa.pa_tag;
580 pcireg_t addr;
581 int s;
582
583 s = splhigh();
584 addr = pci_conf_read(pc, tag, PCI_MAPREG_ROM);
585 addr &= ~(pcireg_t)PCI_MAPREG_ROM_ENABLE;
586 pci_conf_write(pc, tag, PCI_MAPREG_ROM, addr);
587 splx(s);
588 }
589
590 bus_addr_t
591 pci_resource_start(struct pci_dev *pdev, unsigned i)
592 {
593
594 KASSERT(i < PCI_NUM_RESOURCES);
595 return pdev->pd_resources[i].addr;
596 }
597
598 bus_size_t
599 pci_resource_len(struct pci_dev *pdev, unsigned i)
600 {
601
602 KASSERT(i < PCI_NUM_RESOURCES);
603 return pdev->pd_resources[i].size;
604 }
605
606 bus_addr_t
607 pci_resource_end(struct pci_dev *pdev, unsigned i)
608 {
609
610 return pci_resource_start(pdev, i) + (pci_resource_len(pdev, i) - 1);
611 }
612
613 int
614 pci_resource_flags(struct pci_dev *pdev, unsigned i)
615 {
616
617 KASSERT(i < PCI_NUM_RESOURCES);
618 return pdev->pd_resources[i].flags;
619 }
620
621 void __pci_iomem *
622 pci_iomap(struct pci_dev *pdev, unsigned i, bus_size_t size)
623 {
624 int error;
625
626 KASSERT(i < PCI_NUM_RESOURCES);
627 KASSERT(pdev->pd_resources[i].kva == NULL);
628
629 if (PCI_MAPREG_TYPE(pdev->pd_resources[i].type) != PCI_MAPREG_TYPE_MEM)
630 return NULL;
631 if (pdev->pd_resources[i].size < size)
632 return NULL;
633 error = bus_space_map(pdev->pd_pa.pa_memt, pdev->pd_resources[i].addr,
634 size, BUS_SPACE_MAP_LINEAR | pdev->pd_resources[i].flags,
635 &pdev->pd_resources[i].bsh);
636 if (error)
637 return NULL;
638 /* XXX Synchronize with drm_agp_borrow_hook in drm_agpsupport.c. */
639 pdev->pd_resources[i].bst = pdev->pd_pa.pa_memt;
640 pdev->pd_resources[i].kva = bus_space_vaddr(pdev->pd_resources[i].bst,
641 pdev->pd_resources[i].bsh);
642 pdev->pd_resources[i].mapped = true;
643
644 return pdev->pd_resources[i].kva;
645 }
646
647 void
648 pci_iounmap(struct pci_dev *pdev, void __pci_iomem *kva)
649 {
650 unsigned i;
651
652 CTASSERT(__arraycount(pdev->pd_resources) == PCI_NUM_RESOURCES);
653 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
654 if (pdev->pd_resources[i].kva == kva)
655 break;
656 }
657 KASSERT(i < PCI_NUM_RESOURCES);
658
659 pdev->pd_resources[i].kva = NULL;
660 bus_space_unmap(pdev->pd_resources[i].bst, pdev->pd_resources[i].bsh,
661 pdev->pd_resources[i].size);
662 }
663
664 void
665 pci_save_state(struct pci_dev *pdev)
666 {
667
668 KASSERT(pdev->pd_saved_state == NULL);
669 pdev->pd_saved_state = kmem_alloc(sizeof(*pdev->pd_saved_state),
670 KM_SLEEP);
671 pci_conf_capture(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
672 pdev->pd_saved_state);
673 }
674
675 void
676 pci_restore_state(struct pci_dev *pdev)
677 {
678
679 KASSERT(pdev->pd_saved_state != NULL);
680 pci_conf_restore(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
681 pdev->pd_saved_state);
682 kmem_free(pdev->pd_saved_state, sizeof(*pdev->pd_saved_state));
683 pdev->pd_saved_state = NULL;
684 }
685
686 bool
687 pci_is_pcie(struct pci_dev *pdev)
688 {
689
690 return (pci_find_capability(pdev, PCI_CAP_PCIEXPRESS) != 0);
691 }
692
693 bool
694 pci_dma_supported(struct pci_dev *pdev, uintmax_t mask)
695 {
696
697 /* XXX Cop-out. */
698 if (mask > DMA_BIT_MASK(32))
699 return pci_dma64_available(&pdev->pd_pa);
700 else
701 return true;
702 }
703
704 bool
705 pci_is_thunderbolt_attached(struct pci_dev *pdev)
706 {
707
708 /* XXX Cop-out. */
709 return false;
710 }
711
712 bool
713 pci_is_root_bus(struct pci_bus *bus)
714 {
715
716 /* XXX Cop-out. */
717 return false;
718 }
719
720 int
721 pci_domain_nr(struct pci_bus *bus)
722 {
723
724 return device_unit(bus->pb_dev);
725 }
726
727 /*
728 * We explicitly rename pci_enable/disable_device so that you have to
729 * review each use of them, since NetBSD's PCI API does _not_ respect
730 * our local enablecnt here, but there are different parts of NetBSD
731 * that automatically enable/disable like PMF, so you have to decide
732 * for each one whether to call it or not.
733 */
734
735 int
736 linux_pci_enable_device(struct pci_dev *pdev)
737 {
738 const struct pci_attach_args *pa = &pdev->pd_pa;
739 pcireg_t csr;
740 int s;
741
742 if (pdev->pd_enablecnt++)
743 return 0;
744
745 s = splhigh();
746 csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
747 /* If someone else (firmware) already enabled it, credit them. */
748 if (csr & (PCI_COMMAND_IO_ENABLE|PCI_COMMAND_MEM_ENABLE))
749 pdev->pd_enablecnt++;
750 csr |= PCI_COMMAND_IO_ENABLE;
751 csr |= PCI_COMMAND_MEM_ENABLE;
752 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, csr);
753 splx(s);
754
755 return 0;
756 }
757
758 void
759 linux_pci_disable_device(struct pci_dev *pdev)
760 {
761 const struct pci_attach_args *pa = &pdev->pd_pa;
762 pcireg_t csr;
763 int s;
764
765 if (--pdev->pd_enablecnt)
766 return;
767
768 s = splhigh();
769 csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
770 csr &= ~PCI_COMMAND_IO_ENABLE;
771 csr &= ~PCI_COMMAND_MEM_ENABLE;
772 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, csr);
773 splx(s);
774 }
775
776 void
777 linux_pci_dev_destroy(struct pci_dev *pdev)
778 {
779 unsigned i;
780
781 if (pdev->bus != NULL) {
782 kmem_free(pdev->bus, sizeof(*pdev->bus));
783 pdev->bus = NULL;
784 }
785 if (ISSET(pdev->pd_kludges, NBPCI_KLUDGE_MAP_ROM)) {
786 pci_unmap_rom(pdev, pdev->pd_rom_vaddr);
787 pdev->pd_rom_vaddr = 0;
788 }
789 for (i = 0; i < __arraycount(pdev->pd_resources); i++) {
790 if (!pdev->pd_resources[i].mapped)
791 continue;
792 bus_space_unmap(pdev->pd_resources[i].bst,
793 pdev->pd_resources[i].bsh, pdev->pd_resources[i].size);
794 }
795
796 /* There is no way these should be still in use. */
797 KASSERT(pdev->pd_saved_state == NULL);
798 KASSERT(pdev->pd_intr_handles == NULL);
799 }
800