pci.h revision 1.10 1 /* $NetBSD: pci.h,v 1.10 2014/11/05 23:46:09 nonaka Exp $ */
2
3 /*-
4 * Copyright (c) 2013 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #ifndef _LINUX_PCI_H_
33 #define _LINUX_PCI_H_
34
35 #if defined(i386) || defined(amd64)
36 #include "acpica.h"
37 #else /* !(i386 || amd64) */
38 #define NACPICA 0
39 #endif /* i386 || amd64 */
40
41 #include <sys/types.h>
42 #include <sys/param.h>
43 #include <sys/bus.h>
44 #include <sys/cdefs.h>
45 #include <sys/kmem.h>
46 #include <sys/systm.h>
47
48 #include <machine/limits.h>
49
50 #include <dev/pci/pcidevs.h>
51 #include <dev/pci/pcireg.h>
52 #include <dev/pci/pcivar.h>
53 #include <dev/pci/agpvar.h>
54
55 #include <dev/acpi/acpivar.h>
56 #include <dev/acpi/acpi_pci.h>
57
58 #include <linux/dma-mapping.h>
59 #include <linux/ioport.h>
60
61 struct pci_bus {
62 u_int number;
63 };
64
65 struct pci_device_id {
66 uint32_t vendor;
67 uint32_t device;
68 uint32_t subvendor;
69 uint32_t subdevice;
70 uint32_t class;
71 uint32_t class_mask;
72 unsigned long driver_data;
73 };
74
75 #define PCI_ANY_ID ((pcireg_t)-1)
76
77 #define PCI_BASE_CLASS_DISPLAY PCI_CLASS_DISPLAY
78
79 #define PCI_CLASS_BRIDGE_ISA \
80 ((PCI_CLASS_BRIDGE << 8) | PCI_SUBCLASS_BRIDGE_ISA)
81 CTASSERT(PCI_CLASS_BRIDGE_ISA == 0x0601);
82
83 /* XXX This is getting silly... */
84 #define PCI_VENDOR_ID_ASUSTEK PCI_VENDOR_ASUSTEK
85 #define PCI_VENDOR_ID_ATI PCI_VENDOR_ATI
86 #define PCI_VENDOR_ID_DELL PCI_VENDOR_DELL
87 #define PCI_VENDOR_ID_IBM PCI_VENDOR_IBM
88 #define PCI_VENDOR_ID_HP PCI_VENDOR_HP
89 #define PCI_VENDOR_ID_INTEL PCI_VENDOR_INTEL
90 #define PCI_VENDOR_ID_NVIDIA PCI_VENDOR_NVIDIA
91 #define PCI_VENDOR_ID_SONY PCI_VENDOR_SONY
92 #define PCI_VENDOR_ID_VIA PCI_VENDOR_VIATECH
93
94 #define PCI_DEVICE_ID_ATI_RADEON_QY PCI_PRODUCT_ATI_RADEON_RV100_QY
95
96 #define PCI_DEVFN(DEV, FN) \
97 (__SHIFTIN((DEV), __BITS(3, 7)) | __SHIFTIN((FN), __BITS(0, 2)))
98 #define PCI_SLOT(DEVFN) __SHIFTOUT((DEVFN), __BITS(3, 7))
99 #define PCI_FUNC(DEVFN) __SHIFTOUT((DEVFN), __BITS(0, 2))
100
101 #define PCI_NUM_RESOURCES ((PCI_MAPREG_END - PCI_MAPREG_START) / 4)
102 #define DEVICE_COUNT_RESOURCE PCI_NUM_RESOURCES
103
104 #define PCI_CAP_ID_AGP PCI_CAP_AGP
105
106 typedef int pci_power_t;
107
108 #define PCI_D0 0
109 #define PCI_D1 1
110 #define PCI_D2 2
111 #define PCI_D3hot 3
112 #define PCI_D3cold 4
113
114 #define __pci_iomem
115
116 struct pci_dev {
117 struct pci_attach_args pd_pa;
118 int pd_kludges; /* Gotta lose 'em... */
119 #define NBPCI_KLUDGE_GET_MUMBLE 0x01
120 #define NBPCI_KLUDGE_MAP_ROM 0x02
121 bus_space_tag_t pd_rom_bst;
122 bus_space_handle_t pd_rom_bsh;
123 bus_size_t pd_rom_size;
124 void *pd_rom_vaddr;
125 device_t pd_dev;
126 struct {
127 pcireg_t type;
128 bus_addr_t addr;
129 bus_size_t size;
130 int flags;
131 bus_space_tag_t bst;
132 bus_space_handle_t bsh;
133 void __pci_iomem *kva;
134 } pd_resources[PCI_NUM_RESOURCES];
135 struct pci_conf_state *pd_saved_state;
136 struct acpi_devnode *pd_ad;
137 struct device dev; /* XXX Don't believe me! */
138 struct pci_bus *bus;
139 uint32_t devfn;
140 uint16_t vendor;
141 uint16_t device;
142 uint16_t subsystem_vendor;
143 uint16_t subsystem_device;
144 uint8_t revision;
145 uint32_t class;
146 bool msi_enabled;
147 };
148
149 static inline device_t
150 pci_dev_dev(struct pci_dev *pdev)
151 {
152 return pdev->pd_dev;
153 }
154
155 static inline void
156 linux_pci_dev_init(struct pci_dev *pdev, device_t dev,
157 const struct pci_attach_args *pa, int kludges)
158 {
159 const uint32_t subsystem_id = pci_conf_read(pa->pa_pc, pa->pa_tag,
160 PCI_SUBSYS_ID_REG);
161 unsigned i;
162
163 pdev->pd_pa = *pa;
164 pdev->pd_kludges = kludges;
165 pdev->pd_rom_vaddr = NULL;
166 pdev->pd_dev = dev;
167 #if (NACPICA > 0)
168 pdev->pd_ad = acpi_pcidev_find(0 /*XXX segment*/, pa->pa_bus,
169 pa->pa_device, pa->pa_function);
170 #else
171 pdev->pd_ad = NULL;
172 #endif
173 pdev->bus = kmem_zalloc(sizeof(struct pci_bus), KM_NOSLEEP);
174 pdev->bus->number = pa->pa_bus;
175 pdev->devfn = PCI_DEVFN(pa->pa_device, pa->pa_function);
176 pdev->vendor = PCI_VENDOR(pa->pa_id);
177 pdev->device = PCI_PRODUCT(pa->pa_id);
178 pdev->subsystem_vendor = PCI_SUBSYS_VENDOR(subsystem_id);
179 pdev->subsystem_device = PCI_SUBSYS_ID(subsystem_id);
180 pdev->revision = PCI_REVISION(pa->pa_class);
181 pdev->class = __SHIFTOUT(pa->pa_class, 0xffffff00UL); /* ? */
182
183 CTASSERT(__arraycount(pdev->pd_resources) == PCI_NUM_RESOURCES);
184 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
185 const int reg = PCI_BAR(i);
186
187 pdev->pd_resources[i].type = pci_mapreg_type(pa->pa_pc,
188 pa->pa_tag, reg);
189 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, reg,
190 pdev->pd_resources[i].type,
191 &pdev->pd_resources[i].addr,
192 &pdev->pd_resources[i].size,
193 &pdev->pd_resources[i].flags)) {
194 pdev->pd_resources[i].addr = 0;
195 pdev->pd_resources[i].size = 0;
196 pdev->pd_resources[i].flags = 0;
197 }
198 pdev->pd_resources[i].kva = NULL;
199 }
200 }
201
202 static inline int
203 pci_find_capability(struct pci_dev *pdev, int cap)
204 {
205 return pci_get_capability(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, cap,
206 NULL, NULL);
207 }
208
209 static inline int
210 pci_read_config_dword(struct pci_dev *pdev, int reg, uint32_t *valuep)
211 {
212 KASSERT(!ISSET(reg, 3));
213 *valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg);
214 return 0;
215 }
216
217 static inline int
218 pci_read_config_word(struct pci_dev *pdev, int reg, uint16_t *valuep)
219 {
220 KASSERT(!ISSET(reg, 1));
221 *valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
222 (reg &~ 2)) >> (8 * (reg & 2));
223 return 0;
224 }
225
226 static inline int
227 pci_read_config_byte(struct pci_dev *pdev, int reg, uint8_t *valuep)
228 {
229 *valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
230 (reg &~ 3)) >> (8 * (reg & 3));
231 return 0;
232 }
233
234 static inline int
235 pci_write_config_dword(struct pci_dev *pdev, int reg, uint32_t value)
236 {
237 KASSERT(!ISSET(reg, 3));
238 pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg, value);
239 return 0;
240 }
241
242 static inline void
243 pci_rmw_config(struct pci_dev *pdev, int reg, unsigned int bytes,
244 uint32_t value)
245 {
246 const uint32_t mask = ~((~0UL) << (8 * bytes));
247 const int reg32 = (reg &~ 3);
248 const unsigned int shift = (8 * (reg & 3));
249 uint32_t value32;
250
251 KASSERT(bytes <= 4);
252 KASSERT(!ISSET(value, ~mask));
253 pci_read_config_dword(pdev, reg32, &value32);
254 value32 &=~ (mask << shift);
255 value32 |= (value << shift);
256 pci_write_config_dword(pdev, reg32, value32);
257 }
258
259 static inline int
260 pci_write_config_word(struct pci_dev *pdev, int reg, uint16_t value)
261 {
262 KASSERT(!ISSET(reg, 1));
263 pci_rmw_config(pdev, reg, 2, value);
264 return 0;
265 }
266
267 static inline int
268 pci_write_config_byte(struct pci_dev *pdev, int reg, uint8_t value)
269 {
270 pci_rmw_config(pdev, reg, 1, value);
271 return 0;
272 }
273
274 /*
275 * XXX pci msi
276 */
277 static inline int
278 pci_enable_msi(struct pci_dev *pdev)
279 {
280 return -ENOSYS;
281 }
282
283 static inline void
284 pci_disable_msi(struct pci_dev *pdev __unused)
285 {
286 KASSERT(pdev->msi_enabled);
287 }
288
289 static inline void
290 pci_set_master(struct pci_dev *pdev)
291 {
292 pcireg_t csr;
293
294 csr = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
295 PCI_COMMAND_STATUS_REG);
296 csr |= PCI_COMMAND_MASTER_ENABLE;
297 pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
298 PCI_COMMAND_STATUS_REG, csr);
299 }
300
301 static inline void
302 pci_clear_master(struct pci_dev *pdev)
303 {
304 pcireg_t csr;
305
306 csr = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
307 PCI_COMMAND_STATUS_REG);
308 csr &= ~(pcireg_t)PCI_COMMAND_MASTER_ENABLE;
309 pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
310 PCI_COMMAND_STATUS_REG, csr);
311 }
312
313 #define PCIBIOS_MIN_MEM 0 /* XXX bogus x86 kludge bollocks */
314
315 static inline bus_addr_t
316 pcibios_align_resource(void *p, const struct resource *resource,
317 bus_addr_t addr, bus_size_t size)
318 {
319 panic("pcibios_align_resource has accessed unaligned neurons!");
320 }
321
322 static inline int
323 pci_bus_alloc_resource(struct pci_bus *bus, struct resource *resource,
324 bus_size_t size, bus_size_t align, bus_addr_t start, int type __unused,
325 bus_addr_t (*align_fn)(void *, const struct resource *, bus_addr_t,
326 bus_size_t) __unused,
327 struct pci_dev *pdev)
328 {
329 const struct pci_attach_args *const pa = &pdev->pd_pa;
330 bus_space_tag_t bst;
331 int error;
332
333 switch (resource->flags) {
334 case IORESOURCE_MEM:
335 bst = pa->pa_memt;
336 break;
337
338 case IORESOURCE_IO:
339 bst = pa->pa_iot;
340 break;
341
342 default:
343 panic("I don't know what kind of resource you want!");
344 }
345
346 resource->r_bst = bst;
347 error = bus_space_alloc(bst, start, __type_max(bus_addr_t),
348 size, align, 0, 0, &resource->start, &resource->r_bsh);
349 if (error)
350 return error;
351
352 resource->size = size;
353 return 0;
354 }
355
356 /*
357 * XXX Mega-kludgerific! pci_get_bus_and_slot and pci_get_class are
358 * defined only for their single purposes in i915drm, in
359 * i915_get_bridge_dev and intel_detect_pch. We can't define them more
360 * generally without adapting pci_find_device (and pci_enumerate_bus
361 * internally) to pass a cookie through.
362 */
363
364 static inline int /* XXX inline? */
365 pci_kludgey_match_bus0_dev0_func0(const struct pci_attach_args *pa)
366 {
367
368 if (pa->pa_bus != 0)
369 return 0;
370 if (pa->pa_device != 0)
371 return 0;
372 if (pa->pa_function != 0)
373 return 0;
374
375 return 1;
376 }
377
378 static inline struct pci_dev *
379 pci_get_bus_and_slot(int bus, int slot)
380 {
381 struct pci_attach_args pa;
382
383 KASSERT(bus == 0);
384 KASSERT(slot == PCI_DEVFN(0, 0));
385
386 if (!pci_find_device(&pa, &pci_kludgey_match_bus0_dev0_func0))
387 return NULL;
388
389 struct pci_dev *const pdev = kmem_zalloc(sizeof(*pdev), KM_SLEEP);
390 linux_pci_dev_init(pdev, NULL, &pa, NBPCI_KLUDGE_GET_MUMBLE);
391
392 return pdev;
393 }
394
395 static inline int /* XXX inline? */
396 pci_kludgey_match_isa_bridge(const struct pci_attach_args *pa)
397 {
398
399 if (PCI_CLASS(pa->pa_class) != PCI_CLASS_BRIDGE)
400 return 0;
401 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_BRIDGE_ISA)
402 return 0;
403
404 return 1;
405 }
406
407 static inline void
408 pci_dev_put(struct pci_dev *pdev)
409 {
410
411 if (pdev == NULL)
412 return;
413
414 KASSERT(ISSET(pdev->pd_kludges, NBPCI_KLUDGE_GET_MUMBLE));
415 kmem_free(pdev, sizeof(*pdev));
416 }
417
418 static inline struct pci_dev *
419 pci_get_class(uint32_t class_subclass_shifted __unused, struct pci_dev *from)
420 {
421 struct pci_attach_args pa;
422
423 KASSERT(class_subclass_shifted == (PCI_CLASS_BRIDGE_ISA << 8));
424
425 if (from != NULL) {
426 pci_dev_put(from);
427 return NULL;
428 }
429
430 if (!pci_find_device(&pa, &pci_kludgey_match_isa_bridge))
431 return NULL;
432
433 struct pci_dev *const pdev = kmem_zalloc(sizeof(*pdev), KM_SLEEP);
434 linux_pci_dev_init(pdev, NULL, &pa, NBPCI_KLUDGE_GET_MUMBLE);
435
436 return pdev;
437 }
438
439 #define __pci_rom_iomem
440
441 static inline void
442 pci_unmap_rom(struct pci_dev *pdev, void __pci_rom_iomem *vaddr __unused)
443 {
444
445 KASSERT(ISSET(pdev->pd_kludges, NBPCI_KLUDGE_MAP_ROM));
446 KASSERT(vaddr == pdev->pd_rom_vaddr);
447 bus_space_unmap(pdev->pd_rom_bst, pdev->pd_rom_bsh, pdev->pd_rom_size);
448 pdev->pd_kludges &= ~NBPCI_KLUDGE_MAP_ROM;
449 pdev->pd_rom_vaddr = NULL;
450 }
451
452 /* XXX Whattakludge! Should move this in sys/arch/. */
453 static int
454 pci_map_rom_md(struct pci_dev *pdev)
455 {
456 #if defined(__i386__) || defined(__x86_64__) || defined(__ia64__)
457 const bus_addr_t rom_base = 0xc0000;
458 const bus_size_t rom_size = 0x20000;
459 bus_space_handle_t rom_bsh;
460 int error;
461
462 if (PCI_CLASS(pdev->pd_pa.pa_class) != PCI_CLASS_DISPLAY)
463 return ENXIO;
464 if (PCI_SUBCLASS(pdev->pd_pa.pa_class) != PCI_SUBCLASS_DISPLAY_VGA)
465 return ENXIO;
466 /* XXX Check whether this is the primary VGA card? */
467 error = bus_space_map(pdev->pd_pa.pa_memt, rom_base, rom_size,
468 (BUS_SPACE_MAP_LINEAR | BUS_SPACE_MAP_PREFETCHABLE), &rom_bsh);
469 if (error)
470 return ENXIO;
471
472 pdev->pd_rom_bst = pdev->pd_pa.pa_memt;
473 pdev->pd_rom_bsh = rom_bsh;
474 pdev->pd_rom_size = rom_size;
475
476 return 0;
477 #else
478 return ENXIO;
479 #endif
480 }
481
482 static inline void __pci_rom_iomem *
483 pci_map_rom(struct pci_dev *pdev, size_t *sizep)
484 {
485 bus_space_handle_t bsh;
486 bus_size_t size;
487
488 KASSERT(!ISSET(pdev->pd_kludges, NBPCI_KLUDGE_MAP_ROM));
489
490 if (pci_mapreg_map(&pdev->pd_pa, PCI_MAPREG_ROM, PCI_MAPREG_TYPE_ROM,
491 (BUS_SPACE_MAP_PREFETCHABLE | BUS_SPACE_MAP_LINEAR),
492 &pdev->pd_rom_bst, &pdev->pd_rom_bsh, NULL, &pdev->pd_rom_size)
493 != 0 &&
494 pci_map_rom_md(pdev) != 0)
495 return NULL;
496 pdev->pd_kludges |= NBPCI_KLUDGE_MAP_ROM;
497
498 /* XXX This type is obviously wrong in general... */
499 if (pci_find_rom(&pdev->pd_pa, pdev->pd_rom_bst, pdev->pd_rom_bsh,
500 pdev->pd_rom_size, PCI_ROM_CODE_TYPE_X86, &bsh, &size)) {
501 pci_unmap_rom(pdev, NULL);
502 return NULL;
503 }
504
505 KASSERT(size <= SIZE_T_MAX);
506 *sizep = size;
507 pdev->pd_rom_vaddr = bus_space_vaddr(pdev->pd_rom_bst, bsh);
508 return pdev->pd_rom_vaddr;
509 }
510
511 static inline bus_addr_t
512 pci_resource_start(struct pci_dev *pdev, unsigned i)
513 {
514
515 KASSERT(i < PCI_NUM_RESOURCES);
516 return pdev->pd_resources[i].addr;
517 }
518
519 static inline bus_size_t
520 pci_resource_len(struct pci_dev *pdev, unsigned i)
521 {
522
523 KASSERT(i < PCI_NUM_RESOURCES);
524 return pdev->pd_resources[i].size;
525 }
526
527 static inline bus_addr_t
528 pci_resource_end(struct pci_dev *pdev, unsigned i)
529 {
530
531 return pci_resource_start(pdev, i) + (pci_resource_len(pdev, i) - 1);
532 }
533
534 static inline int
535 pci_resource_flags(struct pci_dev *pdev, unsigned i)
536 {
537
538 KASSERT(i < PCI_NUM_RESOURCES);
539 return pdev->pd_resources[i].flags;
540 }
541
542 static inline void __pci_iomem *
543 pci_iomap(struct pci_dev *pdev, unsigned i, bus_size_t size)
544 {
545 int error;
546
547 KASSERT(i < PCI_NUM_RESOURCES);
548 KASSERT(pdev->pd_resources[i].kva == NULL);
549
550 if (PCI_MAPREG_TYPE(pdev->pd_resources[i].type) != PCI_MAPREG_TYPE_MEM)
551 return NULL;
552 if (pdev->pd_resources[i].size < size)
553 return NULL;
554 error = bus_space_map(pdev->pd_pa.pa_memt, pdev->pd_resources[i].addr,
555 size, BUS_SPACE_MAP_LINEAR | pdev->pd_resources[i].flags,
556 &pdev->pd_resources[i].bsh);
557 if (error) {
558 /* Horrible hack: try asking the fake AGP device. */
559 if (!agp_i810_borrow(pdev->pd_resources[i].addr, size,
560 &pdev->pd_resources[i].bsh))
561 return NULL;
562 }
563 pdev->pd_resources[i].bst = pdev->pd_pa.pa_memt;
564 pdev->pd_resources[i].kva = bus_space_vaddr(pdev->pd_resources[i].bst,
565 pdev->pd_resources[i].bsh);
566
567 return pdev->pd_resources[i].kva;
568 }
569
570 static inline void
571 pci_iounmap(struct pci_dev *pdev, void __pci_iomem *kva)
572 {
573 unsigned i;
574
575 CTASSERT(__arraycount(pdev->pd_resources) == PCI_NUM_RESOURCES);
576 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
577 if (pdev->pd_resources[i].kva == kva)
578 break;
579 }
580 KASSERT(i < PCI_NUM_RESOURCES);
581
582 pdev->pd_resources[i].kva = NULL;
583 bus_space_unmap(pdev->pd_resources[i].bst, pdev->pd_resources[i].bsh,
584 pdev->pd_resources[i].size);
585 }
586
587 static inline void
588 pci_save_state(struct pci_dev *pdev)
589 {
590
591 KASSERT(pdev->pd_saved_state == NULL);
592 pdev->pd_saved_state = kmem_alloc(sizeof(*pdev->pd_saved_state),
593 KM_SLEEP);
594 pci_conf_capture(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
595 pdev->pd_saved_state);
596 }
597
598 static inline void
599 pci_restore_state(struct pci_dev *pdev)
600 {
601
602 KASSERT(pdev->pd_saved_state != NULL);
603 pci_conf_restore(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
604 pdev->pd_saved_state);
605 kmem_free(pdev->pd_saved_state, sizeof(*pdev->pd_saved_state));
606 pdev->pd_saved_state = NULL;
607 }
608
609 static inline bool
610 pci_is_pcie(struct pci_dev *pdev)
611 {
612
613 return (pci_find_capability(pdev, PCI_CAP_PCIEXPRESS) != 0);
614 }
615
616 static inline bool
617 pci_dma_supported(struct pci_dev *pdev, uintmax_t mask)
618 {
619
620 /* XXX Cop-out. */
621 if (mask > DMA_BIT_MASK(32))
622 return pci_dma64_available(&pdev->pd_pa);
623 else
624 return true;
625 }
626
627 #endif /* _LINUX_PCI_H_ */
628