pci.h revision 1.22.4.1 1 /* $NetBSD: pci.h,v 1.22.4.1 2017/04/21 16:53:59 bouyer Exp $ */
2
3 /*-
4 * Copyright (c) 2013 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #ifndef _LINUX_PCI_H_
33 #define _LINUX_PCI_H_
34
35 #ifdef _KERNEL_OPT
36 #if defined(i386) || defined(amd64)
37 #include "acpica.h"
38 #else /* !(i386 || amd64) */
39 #define NACPICA 0
40 #endif /* i386 || amd64 */
41 #endif
42
43 #include <sys/types.h>
44 #include <sys/param.h>
45 #include <sys/bus.h>
46 #include <sys/cdefs.h>
47 #include <sys/kmem.h>
48 #include <sys/systm.h>
49
50 #include <machine/limits.h>
51
52 #include <dev/pci/pcidevs.h>
53 #include <dev/pci/pcireg.h>
54 #include <dev/pci/pcivar.h>
55 #include <dev/pci/agpvar.h>
56
57 #if NACPICA > 0
58 #include <dev/acpi/acpivar.h>
59 #include <dev/acpi/acpi_pci.h>
60 #else
61 struct acpi_devnode;
62 #endif
63
64 #include <linux/dma-mapping.h>
65 #include <linux/ioport.h>
66 #include <linux/kernel.h>
67
68 struct pci_bus {
69 u_int number;
70 };
71
72 struct pci_device_id {
73 uint32_t vendor;
74 uint32_t device;
75 uint32_t subvendor;
76 uint32_t subdevice;
77 uint32_t class;
78 uint32_t class_mask;
79 unsigned long driver_data;
80 };
81
82 #define PCI_ANY_ID ((pcireg_t)-1)
83
84 #define PCI_BASE_CLASS_DISPLAY PCI_CLASS_DISPLAY
85
86 #define PCI_CLASS_DISPLAY_VGA \
87 ((PCI_CLASS_DISPLAY << 8) | PCI_SUBCLASS_DISPLAY_VGA)
88 #define PCI_CLASS_BRIDGE_ISA \
89 ((PCI_CLASS_BRIDGE << 8) | PCI_SUBCLASS_BRIDGE_ISA)
90 CTASSERT(PCI_CLASS_BRIDGE_ISA == 0x0601);
91
92 /* XXX This is getting silly... */
93 #define PCI_VENDOR_ID_ASUSTEK PCI_VENDOR_ASUSTEK
94 #define PCI_VENDOR_ID_ATI PCI_VENDOR_ATI
95 #define PCI_VENDOR_ID_DELL PCI_VENDOR_DELL
96 #define PCI_VENDOR_ID_IBM PCI_VENDOR_IBM
97 #define PCI_VENDOR_ID_HP PCI_VENDOR_HP
98 #define PCI_VENDOR_ID_INTEL PCI_VENDOR_INTEL
99 #define PCI_VENDOR_ID_NVIDIA PCI_VENDOR_NVIDIA
100 #define PCI_VENDOR_ID_SONY PCI_VENDOR_SONY
101 #define PCI_VENDOR_ID_VIA PCI_VENDOR_VIATECH
102
103 #define PCI_DEVICE_ID_ATI_RADEON_QY PCI_PRODUCT_ATI_RADEON_RV100_QY
104
105 #define PCI_DEVFN(DEV, FN) \
106 (__SHIFTIN((DEV), __BITS(3, 7)) | __SHIFTIN((FN), __BITS(0, 2)))
107 #define PCI_SLOT(DEVFN) __SHIFTOUT((DEVFN), __BITS(3, 7))
108 #define PCI_FUNC(DEVFN) __SHIFTOUT((DEVFN), __BITS(0, 2))
109
110 #define PCI_NUM_RESOURCES ((PCI_MAPREG_END - PCI_MAPREG_START) / 4)
111 #define DEVICE_COUNT_RESOURCE PCI_NUM_RESOURCES
112
113 #define PCI_CAP_ID_AGP PCI_CAP_AGP
114
115 typedef int pci_power_t;
116
117 #define PCI_D0 0
118 #define PCI_D1 1
119 #define PCI_D2 2
120 #define PCI_D3hot 3
121 #define PCI_D3cold 4
122
123 #define __pci_iomem
124
125 struct pci_dev {
126 struct pci_attach_args pd_pa;
127 int pd_kludges; /* Gotta lose 'em... */
128 #define NBPCI_KLUDGE_GET_MUMBLE 0x01
129 #define NBPCI_KLUDGE_MAP_ROM 0x02
130 bus_space_tag_t pd_rom_bst;
131 bus_space_handle_t pd_rom_bsh;
132 bus_size_t pd_rom_size;
133 bus_space_handle_t pd_rom_found_bsh;
134 bus_size_t pd_rom_found_size;
135 void *pd_rom_vaddr;
136 device_t pd_dev;
137 struct drm_device *pd_drm_dev; /* XXX Nouveau kludge! */
138 struct {
139 pcireg_t type;
140 bus_addr_t addr;
141 bus_size_t size;
142 int flags;
143 bus_space_tag_t bst;
144 bus_space_handle_t bsh;
145 void __pci_iomem *kva;
146 } pd_resources[PCI_NUM_RESOURCES];
147 struct pci_conf_state *pd_saved_state;
148 struct acpi_devnode *pd_ad;
149 struct pci_bus *bus;
150 uint32_t devfn;
151 uint16_t vendor;
152 uint16_t device;
153 uint16_t subsystem_vendor;
154 uint16_t subsystem_device;
155 uint8_t revision;
156 uint32_t class;
157 bool msi_enabled;
158 pci_intr_handle_t *intr_handles;
159 };
160
161 static inline device_t
162 pci_dev_dev(struct pci_dev *pdev)
163 {
164 return pdev->pd_dev;
165 }
166
167 /* XXX Nouveau kludge! */
168 static inline struct drm_device *
169 pci_get_drvdata(struct pci_dev *pdev)
170 {
171 return pdev->pd_drm_dev;
172 }
173
174 static inline void
175 linux_pci_dev_init(struct pci_dev *pdev, device_t dev,
176 const struct pci_attach_args *pa, int kludges)
177 {
178 const uint32_t subsystem_id = pci_conf_read(pa->pa_pc, pa->pa_tag,
179 PCI_SUBSYS_ID_REG);
180 unsigned i;
181
182 pdev->pd_pa = *pa;
183 pdev->pd_kludges = kludges;
184 pdev->pd_rom_vaddr = NULL;
185 pdev->pd_dev = dev;
186 #if (NACPICA > 0)
187 pdev->pd_ad = acpi_pcidev_find(0 /*XXX segment*/, pa->pa_bus,
188 pa->pa_device, pa->pa_function);
189 #else
190 pdev->pd_ad = NULL;
191 #endif
192 pdev->bus = kmem_zalloc(sizeof(struct pci_bus), KM_NOSLEEP);
193 pdev->bus->number = pa->pa_bus;
194 pdev->devfn = PCI_DEVFN(pa->pa_device, pa->pa_function);
195 pdev->vendor = PCI_VENDOR(pa->pa_id);
196 pdev->device = PCI_PRODUCT(pa->pa_id);
197 pdev->subsystem_vendor = PCI_SUBSYS_VENDOR(subsystem_id);
198 pdev->subsystem_device = PCI_SUBSYS_ID(subsystem_id);
199 pdev->revision = PCI_REVISION(pa->pa_class);
200 pdev->class = __SHIFTOUT(pa->pa_class, 0xffffff00UL); /* ? */
201
202 CTASSERT(__arraycount(pdev->pd_resources) == PCI_NUM_RESOURCES);
203 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
204 const int reg = PCI_BAR(i);
205
206 pdev->pd_resources[i].type = pci_mapreg_type(pa->pa_pc,
207 pa->pa_tag, reg);
208 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, reg,
209 pdev->pd_resources[i].type,
210 &pdev->pd_resources[i].addr,
211 &pdev->pd_resources[i].size,
212 &pdev->pd_resources[i].flags)) {
213 pdev->pd_resources[i].addr = 0;
214 pdev->pd_resources[i].size = 0;
215 pdev->pd_resources[i].flags = 0;
216 }
217 pdev->pd_resources[i].kva = NULL;
218 }
219 }
220
221 static inline int
222 pci_find_capability(struct pci_dev *pdev, int cap)
223 {
224 return pci_get_capability(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, cap,
225 NULL, NULL);
226 }
227
228 static inline int
229 pci_read_config_dword(struct pci_dev *pdev, int reg, uint32_t *valuep)
230 {
231 KASSERT(!ISSET(reg, 3));
232 *valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg);
233 return 0;
234 }
235
236 static inline int
237 pci_read_config_word(struct pci_dev *pdev, int reg, uint16_t *valuep)
238 {
239 KASSERT(!ISSET(reg, 1));
240 *valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
241 (reg &~ 2)) >> (8 * (reg & 2));
242 return 0;
243 }
244
245 static inline int
246 pci_read_config_byte(struct pci_dev *pdev, int reg, uint8_t *valuep)
247 {
248 *valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
249 (reg &~ 3)) >> (8 * (reg & 3));
250 return 0;
251 }
252
253 static inline int
254 pci_write_config_dword(struct pci_dev *pdev, int reg, uint32_t value)
255 {
256 KASSERT(!ISSET(reg, 3));
257 pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg, value);
258 return 0;
259 }
260
261 static inline void
262 pci_rmw_config(struct pci_dev *pdev, int reg, unsigned int bytes,
263 uint32_t value)
264 {
265 const uint32_t mask = ~((~0UL) << (8 * bytes));
266 const int reg32 = (reg &~ 3);
267 const unsigned int shift = (8 * (reg & 3));
268 uint32_t value32;
269
270 KASSERT(bytes <= 4);
271 KASSERT(!ISSET(value, ~mask));
272 pci_read_config_dword(pdev, reg32, &value32);
273 value32 &=~ (mask << shift);
274 value32 |= (value << shift);
275 pci_write_config_dword(pdev, reg32, value32);
276 }
277
278 static inline int
279 pci_write_config_word(struct pci_dev *pdev, int reg, uint16_t value)
280 {
281 KASSERT(!ISSET(reg, 1));
282 pci_rmw_config(pdev, reg, 2, value);
283 return 0;
284 }
285
286 static inline int
287 pci_write_config_byte(struct pci_dev *pdev, int reg, uint8_t value)
288 {
289 pci_rmw_config(pdev, reg, 1, value);
290 return 0;
291 }
292
293 static inline int
294 pci_enable_msi(struct pci_dev *pdev)
295 {
296 const struct pci_attach_args *const pa = &pdev->pd_pa;
297
298 if (pci_msi_alloc_exact(pa, &pdev->intr_handles, 1))
299 return -EINVAL;
300
301 pdev->msi_enabled = 1;
302 return 0;
303 }
304
305 static inline void
306 pci_disable_msi(struct pci_dev *pdev __unused)
307 {
308 const struct pci_attach_args *const pa = &pdev->pd_pa;
309
310 if (pdev->intr_handles != NULL) {
311 pci_intr_release(pa->pa_pc, pdev->intr_handles, 1);
312 pdev->intr_handles = NULL;
313 }
314 pdev->msi_enabled = 0;
315 }
316
317 static inline void
318 pci_set_master(struct pci_dev *pdev)
319 {
320 pcireg_t csr;
321
322 csr = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
323 PCI_COMMAND_STATUS_REG);
324 csr |= PCI_COMMAND_MASTER_ENABLE;
325 pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
326 PCI_COMMAND_STATUS_REG, csr);
327 }
328
329 static inline void
330 pci_clear_master(struct pci_dev *pdev)
331 {
332 pcireg_t csr;
333
334 csr = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
335 PCI_COMMAND_STATUS_REG);
336 csr &= ~(pcireg_t)PCI_COMMAND_MASTER_ENABLE;
337 pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
338 PCI_COMMAND_STATUS_REG, csr);
339 }
340
341 #define PCIBIOS_MIN_MEM 0x100000 /* XXX bogus x86 kludge bollocks */
342
343 static inline bus_addr_t
344 pcibios_align_resource(void *p, const struct resource *resource,
345 bus_addr_t addr, bus_size_t size)
346 {
347 panic("pcibios_align_resource has accessed unaligned neurons!");
348 }
349
350 static inline int
351 pci_bus_alloc_resource(struct pci_bus *bus, struct resource *resource,
352 bus_size_t size, bus_size_t align, bus_addr_t start, int type __unused,
353 bus_addr_t (*align_fn)(void *, const struct resource *, bus_addr_t,
354 bus_size_t) __unused,
355 struct pci_dev *pdev)
356 {
357 const struct pci_attach_args *const pa = &pdev->pd_pa;
358 bus_space_tag_t bst;
359 int error;
360
361 switch (resource->flags) {
362 case IORESOURCE_MEM:
363 bst = pa->pa_memt;
364 break;
365
366 case IORESOURCE_IO:
367 bst = pa->pa_iot;
368 break;
369
370 default:
371 panic("I don't know what kind of resource you want!");
372 }
373
374 resource->r_bst = bst;
375 error = bus_space_alloc(bst, start, __type_max(bus_addr_t),
376 size, align, 0, 0, &resource->start, &resource->r_bsh);
377 if (error)
378 return error;
379
380 resource->size = size;
381 return 0;
382 }
383
384 /*
385 * XXX Mega-kludgerific! pci_get_bus_and_slot and pci_get_class are
386 * defined only for their single purposes in i915drm, in
387 * i915_get_bridge_dev and intel_detect_pch. We can't define them more
388 * generally without adapting pci_find_device (and pci_enumerate_bus
389 * internally) to pass a cookie through.
390 */
391
392 static inline int /* XXX inline? */
393 pci_kludgey_match_bus0_dev0_func0(const struct pci_attach_args *pa)
394 {
395
396 if (pa->pa_bus != 0)
397 return 0;
398 if (pa->pa_device != 0)
399 return 0;
400 if (pa->pa_function != 0)
401 return 0;
402
403 return 1;
404 }
405
406 static inline struct pci_dev *
407 pci_get_bus_and_slot(int bus, int slot)
408 {
409 struct pci_attach_args pa;
410
411 KASSERT(bus == 0);
412 KASSERT(slot == PCI_DEVFN(0, 0));
413
414 if (!pci_find_device(&pa, &pci_kludgey_match_bus0_dev0_func0))
415 return NULL;
416
417 struct pci_dev *const pdev = kmem_zalloc(sizeof(*pdev), KM_SLEEP);
418 linux_pci_dev_init(pdev, NULL, &pa, NBPCI_KLUDGE_GET_MUMBLE);
419
420 return pdev;
421 }
422
423 static inline int /* XXX inline? */
424 pci_kludgey_match_isa_bridge(const struct pci_attach_args *pa)
425 {
426
427 if (PCI_CLASS(pa->pa_class) != PCI_CLASS_BRIDGE)
428 return 0;
429 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_BRIDGE_ISA)
430 return 0;
431
432 return 1;
433 }
434
435 static inline void
436 pci_dev_put(struct pci_dev *pdev)
437 {
438
439 if (pdev == NULL)
440 return;
441
442 KASSERT(ISSET(pdev->pd_kludges, NBPCI_KLUDGE_GET_MUMBLE));
443 kmem_free(pdev, sizeof(*pdev));
444 }
445
446 static inline struct pci_dev *
447 pci_get_class(uint32_t class_subclass_shifted __unused, struct pci_dev *from)
448 {
449 struct pci_attach_args pa;
450
451 KASSERT(class_subclass_shifted == (PCI_CLASS_BRIDGE_ISA << 8));
452
453 if (from != NULL) {
454 pci_dev_put(from);
455 return NULL;
456 }
457
458 if (!pci_find_device(&pa, &pci_kludgey_match_isa_bridge))
459 return NULL;
460
461 struct pci_dev *const pdev = kmem_zalloc(sizeof(*pdev), KM_SLEEP);
462 linux_pci_dev_init(pdev, NULL, &pa, NBPCI_KLUDGE_GET_MUMBLE);
463
464 return pdev;
465 }
466
467 #define __pci_rom_iomem
468
469 static inline void
470 pci_unmap_rom(struct pci_dev *pdev, void __pci_rom_iomem *vaddr __unused)
471 {
472
473 /* XXX Disable the ROM address decoder. */
474 KASSERT(ISSET(pdev->pd_kludges, NBPCI_KLUDGE_MAP_ROM));
475 KASSERT(vaddr == pdev->pd_rom_vaddr);
476 bus_space_unmap(pdev->pd_rom_bst, pdev->pd_rom_bsh, pdev->pd_rom_size);
477 pdev->pd_kludges &= ~NBPCI_KLUDGE_MAP_ROM;
478 pdev->pd_rom_vaddr = NULL;
479 }
480
481 /* XXX Whattakludge! Should move this in sys/arch/. */
482 static int
483 pci_map_rom_md(struct pci_dev *pdev)
484 {
485 #if defined(__i386__) || defined(__x86_64__) || defined(__ia64__)
486 const bus_addr_t rom_base = 0xc0000;
487 const bus_size_t rom_size = 0x20000;
488 bus_space_handle_t rom_bsh;
489 int error;
490
491 if (PCI_CLASS(pdev->pd_pa.pa_class) != PCI_CLASS_DISPLAY)
492 return ENXIO;
493 if (PCI_SUBCLASS(pdev->pd_pa.pa_class) != PCI_SUBCLASS_DISPLAY_VGA)
494 return ENXIO;
495 /* XXX Check whether this is the primary VGA card? */
496 error = bus_space_map(pdev->pd_pa.pa_memt, rom_base, rom_size,
497 (BUS_SPACE_MAP_LINEAR | BUS_SPACE_MAP_PREFETCHABLE), &rom_bsh);
498 if (error)
499 return ENXIO;
500
501 pdev->pd_rom_bst = pdev->pd_pa.pa_memt;
502 pdev->pd_rom_bsh = rom_bsh;
503 pdev->pd_rom_size = rom_size;
504 pdev->pd_kludges |= NBPCI_KLUDGE_MAP_ROM;
505
506 return 0;
507 #else
508 return ENXIO;
509 #endif
510 }
511
512 static inline void __pci_rom_iomem *
513 pci_map_rom(struct pci_dev *pdev, size_t *sizep)
514 {
515
516 KASSERT(!ISSET(pdev->pd_kludges, NBPCI_KLUDGE_MAP_ROM));
517
518 if (pci_mapreg_map(&pdev->pd_pa, PCI_MAPREG_ROM, PCI_MAPREG_TYPE_ROM,
519 (BUS_SPACE_MAP_PREFETCHABLE | BUS_SPACE_MAP_LINEAR),
520 &pdev->pd_rom_bst, &pdev->pd_rom_bsh, NULL, &pdev->pd_rom_size)
521 != 0)
522 goto fail_mi;
523 pdev->pd_kludges |= NBPCI_KLUDGE_MAP_ROM;
524
525 /* XXX This type is obviously wrong in general... */
526 if (pci_find_rom(&pdev->pd_pa, pdev->pd_rom_bst, pdev->pd_rom_bsh,
527 pdev->pd_rom_size, PCI_ROM_CODE_TYPE_X86,
528 &pdev->pd_rom_found_bsh, &pdev->pd_rom_found_size)) {
529 pci_unmap_rom(pdev, NULL);
530 goto fail_mi;
531 }
532 goto success;
533
534 fail_mi:
535 if (pci_map_rom_md(pdev) != 0)
536 goto fail_md;
537
538 /* XXX This type is obviously wrong in general... */
539 if (pci_find_rom(&pdev->pd_pa, pdev->pd_rom_bst, pdev->pd_rom_bsh,
540 pdev->pd_rom_size, PCI_ROM_CODE_TYPE_X86,
541 &pdev->pd_rom_found_bsh, &pdev->pd_rom_found_size)) {
542 pci_unmap_rom(pdev, NULL);
543 goto fail_md;
544 }
545
546 success:
547 KASSERT(pdev->pd_rom_found_size <= SIZE_T_MAX);
548 *sizep = pdev->pd_rom_found_size;
549 pdev->pd_rom_vaddr = bus_space_vaddr(pdev->pd_rom_bst,
550 pdev->pd_rom_found_bsh);
551 return pdev->pd_rom_vaddr;
552
553 fail_md:
554 return NULL;
555 }
556
557 static inline void __pci_rom_iomem *
558 pci_platform_rom(struct pci_dev *pdev __unused, size_t *sizep)
559 {
560
561 *sizep = 0;
562 return NULL;
563 }
564
565 static inline int
566 pci_enable_rom(struct pci_dev *pdev)
567 {
568 const pci_chipset_tag_t pc = pdev->pd_pa.pa_pc;
569 const pcitag_t tag = pdev->pd_pa.pa_tag;
570 pcireg_t addr;
571 int s;
572
573 /* XXX Don't do anything if the ROM isn't there. */
574
575 s = splhigh();
576 addr = pci_conf_read(pc, tag, PCI_MAPREG_ROM);
577 addr |= PCI_MAPREG_ROM_ENABLE;
578 pci_conf_write(pc, tag, PCI_MAPREG_ROM, addr);
579 splx(s);
580
581 return 0;
582 }
583
584 static inline void
585 pci_disable_rom(struct pci_dev *pdev)
586 {
587 const pci_chipset_tag_t pc = pdev->pd_pa.pa_pc;
588 const pcitag_t tag = pdev->pd_pa.pa_tag;
589 pcireg_t addr;
590 int s;
591
592 s = splhigh();
593 addr = pci_conf_read(pc, tag, PCI_MAPREG_ROM);
594 addr &= ~(pcireg_t)PCI_MAPREG_ROM_ENABLE;
595 pci_conf_write(pc, tag, PCI_MAPREG_ROM, addr);
596 splx(s);
597 }
598
599 static inline bus_addr_t
600 pci_resource_start(struct pci_dev *pdev, unsigned i)
601 {
602
603 KASSERT(i < PCI_NUM_RESOURCES);
604 return pdev->pd_resources[i].addr;
605 }
606
607 static inline bus_size_t
608 pci_resource_len(struct pci_dev *pdev, unsigned i)
609 {
610
611 KASSERT(i < PCI_NUM_RESOURCES);
612 return pdev->pd_resources[i].size;
613 }
614
615 static inline bus_addr_t
616 pci_resource_end(struct pci_dev *pdev, unsigned i)
617 {
618
619 return pci_resource_start(pdev, i) + (pci_resource_len(pdev, i) - 1);
620 }
621
622 static inline int
623 pci_resource_flags(struct pci_dev *pdev, unsigned i)
624 {
625
626 KASSERT(i < PCI_NUM_RESOURCES);
627 return pdev->pd_resources[i].flags;
628 }
629
630 static inline void __pci_iomem *
631 pci_iomap(struct pci_dev *pdev, unsigned i, bus_size_t size)
632 {
633 int error;
634
635 KASSERT(i < PCI_NUM_RESOURCES);
636 KASSERT(pdev->pd_resources[i].kva == NULL);
637
638 if (PCI_MAPREG_TYPE(pdev->pd_resources[i].type) != PCI_MAPREG_TYPE_MEM)
639 return NULL;
640 if (pdev->pd_resources[i].size < size)
641 return NULL;
642 error = bus_space_map(pdev->pd_pa.pa_memt, pdev->pd_resources[i].addr,
643 size, BUS_SPACE_MAP_LINEAR | pdev->pd_resources[i].flags,
644 &pdev->pd_resources[i].bsh);
645 if (error) {
646 /* Horrible hack: try asking the fake AGP device. */
647 if (!agp_i810_borrow(pdev->pd_resources[i].addr, size,
648 &pdev->pd_resources[i].bsh))
649 return NULL;
650 }
651 pdev->pd_resources[i].bst = pdev->pd_pa.pa_memt;
652 pdev->pd_resources[i].kva = bus_space_vaddr(pdev->pd_resources[i].bst,
653 pdev->pd_resources[i].bsh);
654
655 return pdev->pd_resources[i].kva;
656 }
657
658 static inline void
659 pci_iounmap(struct pci_dev *pdev, void __pci_iomem *kva)
660 {
661 unsigned i;
662
663 CTASSERT(__arraycount(pdev->pd_resources) == PCI_NUM_RESOURCES);
664 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
665 if (pdev->pd_resources[i].kva == kva)
666 break;
667 }
668 KASSERT(i < PCI_NUM_RESOURCES);
669
670 pdev->pd_resources[i].kva = NULL;
671 bus_space_unmap(pdev->pd_resources[i].bst, pdev->pd_resources[i].bsh,
672 pdev->pd_resources[i].size);
673 }
674
675 static inline void
676 pci_save_state(struct pci_dev *pdev)
677 {
678
679 KASSERT(pdev->pd_saved_state == NULL);
680 pdev->pd_saved_state = kmem_alloc(sizeof(*pdev->pd_saved_state),
681 KM_SLEEP);
682 pci_conf_capture(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
683 pdev->pd_saved_state);
684 }
685
686 static inline void
687 pci_restore_state(struct pci_dev *pdev)
688 {
689
690 KASSERT(pdev->pd_saved_state != NULL);
691 pci_conf_restore(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
692 pdev->pd_saved_state);
693 kmem_free(pdev->pd_saved_state, sizeof(*pdev->pd_saved_state));
694 pdev->pd_saved_state = NULL;
695 }
696
697 static inline bool
698 pci_is_pcie(struct pci_dev *pdev)
699 {
700
701 return (pci_find_capability(pdev, PCI_CAP_PCIEXPRESS) != 0);
702 }
703
704 static inline bool
705 pci_dma_supported(struct pci_dev *pdev, uintmax_t mask)
706 {
707
708 /* XXX Cop-out. */
709 if (mask > DMA_BIT_MASK(32))
710 return pci_dma64_available(&pdev->pd_pa);
711 else
712 return true;
713 }
714
715 #endif /* _LINUX_PCI_H_ */
716