pci.h revision 1.36 1 /* $NetBSD: pci.h,v 1.36 2018/08/27 14:16:04 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2013 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #ifndef _LINUX_PCI_H_
33 #define _LINUX_PCI_H_
34
35 #ifdef _KERNEL_OPT
36 #if defined(i386) || defined(amd64)
37 #include "acpica.h"
38 #else /* !(i386 || amd64) */
39 #define NACPICA 0
40 #endif /* i386 || amd64 */
41 #endif
42
43 #include <sys/types.h>
44 #include <sys/param.h>
45 #include <sys/bus.h>
46 #include <sys/cdefs.h>
47 #include <sys/kmem.h>
48 #include <sys/systm.h>
49
50 #include <machine/limits.h>
51
52 #include <dev/pci/pcidevs.h>
53 #include <dev/pci/pcireg.h>
54 #include <dev/pci/pcivar.h>
55 #include <dev/pci/agpvar.h>
56
57 #if NACPICA > 0
58 #include <dev/acpi/acpivar.h>
59 #include <dev/acpi/acpi_pci.h>
60 #else
61 struct acpi_devnode;
62 #endif
63
64 #include <linux/dma-mapping.h>
65 #include <linux/ioport.h>
66 #include <linux/kernel.h>
67
68 struct pci_driver;
69
70 struct pci_bus {
71 /* NetBSD private members */
72 pci_chipset_tag_t pb_pc;
73 device_t pb_dev;
74
75 /* Linux API */
76 u_int number;
77 };
78
79 struct pci_device_id {
80 uint32_t vendor;
81 uint32_t device;
82 uint32_t subvendor;
83 uint32_t subdevice;
84 uint32_t class;
85 uint32_t class_mask;
86 unsigned long driver_data;
87 };
88
89 #define PCI_ANY_ID (~0)
90
91 #define PCI_BASE_CLASS_DISPLAY PCI_CLASS_DISPLAY
92
93 #define PCI_CLASS_DISPLAY_VGA \
94 ((PCI_CLASS_DISPLAY << 8) | PCI_SUBCLASS_DISPLAY_VGA)
95 #define PCI_CLASS_BRIDGE_ISA \
96 ((PCI_CLASS_BRIDGE << 8) | PCI_SUBCLASS_BRIDGE_ISA)
97 CTASSERT(PCI_CLASS_BRIDGE_ISA == 0x0601);
98
99 /* XXX This is getting silly... */
100 #define PCI_VENDOR_ID_APPLE PCI_VENDOR_APPLE
101 #define PCI_VENDOR_ID_ASUSTEK PCI_VENDOR_ASUSTEK
102 #define PCI_VENDOR_ID_ATI PCI_VENDOR_ATI
103 #define PCI_VENDOR_ID_DELL PCI_VENDOR_DELL
104 #define PCI_VENDOR_ID_IBM PCI_VENDOR_IBM
105 #define PCI_VENDOR_ID_HP PCI_VENDOR_HP
106 #define PCI_VENDOR_ID_INTEL PCI_VENDOR_INTEL
107 #define PCI_VENDOR_ID_NVIDIA PCI_VENDOR_NVIDIA
108 #define PCI_VENDOR_ID_SI PCI_VENDOR_SIS
109 #define PCI_VENDOR_ID_SONY PCI_VENDOR_SONY
110 #define PCI_VENDOR_ID_VIA PCI_VENDOR_VIATECH
111
112 #define PCI_DEVICE_ID_ATI_RADEON_QY PCI_PRODUCT_ATI_RADEON_RV100_QY
113
114 #define PCI_DEVFN(DEV, FN) \
115 (__SHIFTIN((DEV), __BITS(3, 7)) | __SHIFTIN((FN), __BITS(0, 2)))
116 #define PCI_SLOT(DEVFN) __SHIFTOUT((DEVFN), __BITS(3, 7))
117 #define PCI_FUNC(DEVFN) __SHIFTOUT((DEVFN), __BITS(0, 2))
118
119 #define PCI_NUM_RESOURCES ((PCI_MAPREG_END - PCI_MAPREG_START) / 4)
120 #define DEVICE_COUNT_RESOURCE PCI_NUM_RESOURCES
121
122 #define PCI_CAP_ID_AGP PCI_CAP_AGP
123
124 typedef int pci_power_t;
125
126 #define PCI_D0 0
127 #define PCI_D1 1
128 #define PCI_D2 2
129 #define PCI_D3hot 3
130 #define PCI_D3cold 4
131
132 #define __pci_iomem
133
134 struct pci_dev {
135 struct pci_attach_args pd_pa;
136 int pd_kludges; /* Gotta lose 'em... */
137 #define NBPCI_KLUDGE_GET_MUMBLE 0x01
138 #define NBPCI_KLUDGE_MAP_ROM 0x02
139 bus_space_tag_t pd_rom_bst;
140 bus_space_handle_t pd_rom_bsh;
141 bus_size_t pd_rom_size;
142 bus_space_handle_t pd_rom_found_bsh;
143 bus_size_t pd_rom_found_size;
144 void *pd_rom_vaddr;
145 device_t pd_dev;
146 struct drm_device *pd_drm_dev; /* XXX Nouveau kludge! */
147 struct {
148 pcireg_t type;
149 bus_addr_t addr;
150 bus_size_t size;
151 int flags;
152 bus_space_tag_t bst;
153 bus_space_handle_t bsh;
154 void __pci_iomem *kva;
155 bool mapped;
156 } pd_resources[PCI_NUM_RESOURCES];
157 struct pci_conf_state *pd_saved_state;
158 struct acpi_devnode *pd_ad;
159 pci_intr_handle_t *pd_intr_handles;
160 unsigned pd_enablecnt;
161
162 /* Linx API only below */
163 struct pci_bus *bus;
164 uint32_t devfn;
165 uint16_t vendor;
166 uint16_t device;
167 uint16_t subsystem_vendor;
168 uint16_t subsystem_device;
169 uint8_t revision;
170 uint32_t class;
171 bool msi_enabled;
172 bool no_64bit_msi;
173 };
174
175 static inline device_t
176 pci_dev_dev(struct pci_dev *pdev)
177 {
178 return pdev->pd_dev;
179 }
180
181 /* XXX Nouveau kludge! */
182 static inline struct drm_device *
183 pci_get_drvdata(struct pci_dev *pdev)
184 {
185 return pdev->pd_drm_dev;
186 }
187
188 static inline void
189 linux_pci_dev_init(struct pci_dev *pdev, device_t dev, device_t parent,
190 const struct pci_attach_args *pa, int kludges)
191 {
192 const uint32_t subsystem_id = pci_conf_read(pa->pa_pc, pa->pa_tag,
193 PCI_SUBSYS_ID_REG);
194 unsigned i;
195
196 pdev->pd_pa = *pa;
197 pdev->pd_kludges = kludges;
198 pdev->pd_rom_vaddr = NULL;
199 pdev->pd_dev = dev;
200 #if (NACPICA > 0)
201 pdev->pd_ad = acpi_pcidev_find(0 /*XXX segment*/, pa->pa_bus,
202 pa->pa_device, pa->pa_function);
203 #else
204 pdev->pd_ad = NULL;
205 #endif
206 pdev->pd_saved_state = NULL;
207 pdev->pd_intr_handles = NULL;
208 pdev->bus = kmem_zalloc(sizeof(*pdev->bus), KM_NOSLEEP);
209 pdev->bus->pb_pc = pa->pa_pc;
210 pdev->bus->pb_dev = parent;
211 pdev->bus->number = pa->pa_bus;
212 pdev->devfn = PCI_DEVFN(pa->pa_device, pa->pa_function);
213 pdev->vendor = PCI_VENDOR(pa->pa_id);
214 pdev->device = PCI_PRODUCT(pa->pa_id);
215 pdev->subsystem_vendor = PCI_SUBSYS_VENDOR(subsystem_id);
216 pdev->subsystem_device = PCI_SUBSYS_ID(subsystem_id);
217 pdev->revision = PCI_REVISION(pa->pa_class);
218 pdev->class = __SHIFTOUT(pa->pa_class, 0xffffff00UL); /* ? */
219
220 CTASSERT(__arraycount(pdev->pd_resources) == PCI_NUM_RESOURCES);
221 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
222 const int reg = PCI_BAR(i);
223
224 pdev->pd_resources[i].type = pci_mapreg_type(pa->pa_pc,
225 pa->pa_tag, reg);
226 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, reg,
227 pdev->pd_resources[i].type,
228 &pdev->pd_resources[i].addr,
229 &pdev->pd_resources[i].size,
230 &pdev->pd_resources[i].flags)) {
231 pdev->pd_resources[i].addr = 0;
232 pdev->pd_resources[i].size = 0;
233 pdev->pd_resources[i].flags = 0;
234 }
235 pdev->pd_resources[i].kva = NULL;
236 }
237 }
238
239 static inline int
240 pci_find_capability(struct pci_dev *pdev, int cap)
241 {
242 return pci_get_capability(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, cap,
243 NULL, NULL);
244 }
245
246 static inline int
247 pci_read_config_dword(struct pci_dev *pdev, int reg, uint32_t *valuep)
248 {
249 KASSERT(!ISSET(reg, 3));
250 *valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg);
251 return 0;
252 }
253
254 static inline int
255 pci_read_config_word(struct pci_dev *pdev, int reg, uint16_t *valuep)
256 {
257 KASSERT(!ISSET(reg, 1));
258 *valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
259 (reg &~ 2)) >> (8 * (reg & 2));
260 return 0;
261 }
262
263 static inline int
264 pci_read_config_byte(struct pci_dev *pdev, int reg, uint8_t *valuep)
265 {
266 *valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
267 (reg &~ 3)) >> (8 * (reg & 3));
268 return 0;
269 }
270
271 static inline int
272 pci_write_config_dword(struct pci_dev *pdev, int reg, uint32_t value)
273 {
274 KASSERT(!ISSET(reg, 3));
275 pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg, value);
276 return 0;
277 }
278
279 static inline int
280 pci_bus_read_config_dword(struct pci_bus *bus, unsigned devfn, int reg,
281 uint32_t *valuep)
282 {
283 pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
284 PCI_FUNC(devfn));
285
286 KASSERT(!ISSET(reg, 1));
287 *valuep = pci_conf_read(bus->pb_pc, tag, reg & ~3) >> (8 * (reg & 3));
288
289 return 0;
290 }
291
292 static inline int
293 pci_bus_read_config_word(struct pci_bus *bus, unsigned devfn, int reg,
294 uint16_t *valuep)
295 {
296 pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
297 PCI_FUNC(devfn));
298 KASSERT(!ISSET(reg, 1));
299 *valuep = pci_conf_read(bus->pb_pc, tag, reg &~ 2) >> (8 * (reg & 2));
300 return 0;
301 }
302
303 static inline int
304 pci_bus_read_config_byte(struct pci_bus *bus, unsigned devfn, int reg,
305 uint8_t *valuep)
306 {
307 pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
308 PCI_FUNC(devfn));
309 *valuep = pci_conf_read(bus->pb_pc, tag, reg &~ 3) >> (8 * (reg & 3));
310 return 0;
311 }
312
313 static inline int
314 pci_bus_write_config_dword(struct pci_bus *bus, unsigned devfn, int reg,
315 uint32_t value)
316 {
317 pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
318 PCI_FUNC(devfn));
319 KASSERT(!ISSET(reg, 3));
320 pci_conf_write(bus->pb_pc, tag, reg, value);
321 return 0;
322 }
323
324 static inline void
325 pci_rmw_config(pci_chipset_tag_t pc, pcitag_t tag, int reg, unsigned int bytes,
326 uint32_t value)
327 {
328 const uint32_t mask = ~((~0UL) << (8 * bytes));
329 const int reg32 = (reg &~ 3);
330 const unsigned int shift = (8 * (reg & 3));
331 uint32_t value32;
332
333 KASSERT(bytes <= 4);
334 KASSERT(!ISSET(value, ~mask));
335 value32 = pci_conf_read(pc, tag, reg32);
336 value32 &=~ (mask << shift);
337 value32 |= (value << shift);
338 pci_conf_write(pc, tag, reg32, value32);
339 }
340
341 static inline int
342 pci_write_config_word(struct pci_dev *pdev, int reg, uint16_t value)
343 {
344 KASSERT(!ISSET(reg, 1));
345 pci_rmw_config(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg, 2, value);
346 return 0;
347 }
348
349 static inline int
350 pci_write_config_byte(struct pci_dev *pdev, int reg, uint8_t value)
351 {
352 pci_rmw_config(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg, 1, value);
353 return 0;
354 }
355
356 static inline int
357 pci_bus_write_config_word(struct pci_bus *bus, unsigned devfn, int reg,
358 uint16_t value)
359 {
360 pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
361 PCI_FUNC(devfn));
362 KASSERT(!ISSET(reg, 1));
363 pci_rmw_config(bus->pb_pc, tag, reg, 2, value);
364 return 0;
365 }
366
367 static inline int
368 pci_bus_write_config_byte(struct pci_bus *bus, unsigned devfn, int reg,
369 uint8_t value)
370 {
371 pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
372 PCI_FUNC(devfn));
373 pci_rmw_config(bus->pb_pc, tag, reg, 1, value);
374 return 0;
375 }
376
377 static inline int
378 pci_enable_msi(struct pci_dev *pdev)
379 {
380 #ifdef notyet
381 const struct pci_attach_args *const pa = &pdev->pd_pa;
382
383 if (pci_msi_alloc_exact(pa, &pdev->pd_intr_handles, 1))
384 return -EINVAL;
385
386 pdev->msi_enabled = 1;
387 return 0;
388 #else
389 return -ENOSYS;
390 #endif
391 }
392
393 static inline void
394 pci_disable_msi(struct pci_dev *pdev __unused)
395 {
396 const struct pci_attach_args *const pa = &pdev->pd_pa;
397
398 if (pdev->pd_intr_handles != NULL) {
399 pci_intr_release(pa->pa_pc, pdev->pd_intr_handles, 1);
400 pdev->pd_intr_handles = NULL;
401 }
402 pdev->msi_enabled = 0;
403 }
404
405 static inline void
406 pci_set_master(struct pci_dev *pdev)
407 {
408 pcireg_t csr;
409
410 csr = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
411 PCI_COMMAND_STATUS_REG);
412 csr |= PCI_COMMAND_MASTER_ENABLE;
413 pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
414 PCI_COMMAND_STATUS_REG, csr);
415 }
416
417 static inline void
418 pci_clear_master(struct pci_dev *pdev)
419 {
420 pcireg_t csr;
421
422 csr = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
423 PCI_COMMAND_STATUS_REG);
424 csr &= ~(pcireg_t)PCI_COMMAND_MASTER_ENABLE;
425 pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
426 PCI_COMMAND_STATUS_REG, csr);
427 }
428
429 #define PCIBIOS_MIN_MEM 0x100000 /* XXX bogus x86 kludge bollocks */
430
431 static inline bus_addr_t
432 pcibios_align_resource(void *p, const struct resource *resource,
433 bus_addr_t addr, bus_size_t size)
434 {
435 panic("pcibios_align_resource has accessed unaligned neurons!");
436 }
437
438 static inline int
439 pci_bus_alloc_resource(struct pci_bus *bus, struct resource *resource,
440 bus_size_t size, bus_size_t align, bus_addr_t start, int type __unused,
441 bus_addr_t (*align_fn)(void *, const struct resource *, bus_addr_t,
442 bus_size_t) __unused,
443 struct pci_dev *pdev)
444 {
445 const struct pci_attach_args *const pa = &pdev->pd_pa;
446 bus_space_tag_t bst;
447 int error;
448
449 switch (resource->flags) {
450 case IORESOURCE_MEM:
451 bst = pa->pa_memt;
452 break;
453
454 case IORESOURCE_IO:
455 bst = pa->pa_iot;
456 break;
457
458 default:
459 panic("I don't know what kind of resource you want!");
460 }
461
462 resource->r_bst = bst;
463 error = bus_space_alloc(bst, start, __type_max(bus_addr_t),
464 size, align, 0, 0, &resource->start, &resource->r_bsh);
465 if (error)
466 return error;
467
468 resource->size = size;
469 return 0;
470 }
471
472 /*
473 * XXX Mega-kludgerific! pci_get_bus_and_slot and pci_get_class are
474 * defined only for their single purposes in i915drm, in
475 * i915_get_bridge_dev and intel_detect_pch. We can't define them more
476 * generally without adapting pci_find_device (and pci_enumerate_bus
477 * internally) to pass a cookie through.
478 */
479
480 static inline int /* XXX inline? */
481 pci_kludgey_match_bus0_dev0_func0(const struct pci_attach_args *pa)
482 {
483
484 if (pa->pa_bus != 0)
485 return 0;
486 if (pa->pa_device != 0)
487 return 0;
488 if (pa->pa_function != 0)
489 return 0;
490
491 return 1;
492 }
493
494 static inline struct pci_dev *
495 pci_get_bus_and_slot(int bus, int slot)
496 {
497 struct pci_attach_args pa;
498
499 KASSERT(bus == 0);
500 KASSERT(slot == PCI_DEVFN(0, 0));
501
502 if (!pci_find_device(&pa, &pci_kludgey_match_bus0_dev0_func0))
503 return NULL;
504
505 struct pci_dev *const pdev = kmem_zalloc(sizeof(*pdev), KM_SLEEP);
506 linux_pci_dev_init(pdev, NULL, NULL, &pa, NBPCI_KLUDGE_GET_MUMBLE);
507
508 return pdev;
509 }
510
511 static inline int /* XXX inline? */
512 pci_kludgey_match_isa_bridge(const struct pci_attach_args *pa)
513 {
514
515 if (PCI_CLASS(pa->pa_class) != PCI_CLASS_BRIDGE)
516 return 0;
517 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_BRIDGE_ISA)
518 return 0;
519
520 return 1;
521 }
522
523 static inline void
524 pci_dev_put(struct pci_dev *pdev)
525 {
526
527 if (pdev == NULL)
528 return;
529
530 KASSERT(ISSET(pdev->pd_kludges, NBPCI_KLUDGE_GET_MUMBLE));
531 kmem_free(pdev->bus, sizeof(*pdev->bus));
532 kmem_free(pdev, sizeof(*pdev));
533 }
534
535 static inline struct pci_dev *
536 pci_get_class(uint32_t class_subclass_shifted __unused, struct pci_dev *from)
537 {
538 struct pci_attach_args pa;
539
540 KASSERT(class_subclass_shifted == (PCI_CLASS_BRIDGE_ISA << 8));
541
542 if (from != NULL) {
543 pci_dev_put(from);
544 return NULL;
545 }
546
547 if (!pci_find_device(&pa, &pci_kludgey_match_isa_bridge))
548 return NULL;
549
550 struct pci_dev *const pdev = kmem_zalloc(sizeof(*pdev), KM_SLEEP);
551 linux_pci_dev_init(pdev, NULL, NULL, &pa, NBPCI_KLUDGE_GET_MUMBLE);
552
553 return pdev;
554 }
555
556 #define __pci_rom_iomem
557
558 static inline void
559 pci_unmap_rom(struct pci_dev *pdev, void __pci_rom_iomem *vaddr __unused)
560 {
561
562 /* XXX Disable the ROM address decoder. */
563 KASSERT(ISSET(pdev->pd_kludges, NBPCI_KLUDGE_MAP_ROM));
564 KASSERT(vaddr == pdev->pd_rom_vaddr);
565 bus_space_unmap(pdev->pd_rom_bst, pdev->pd_rom_bsh, pdev->pd_rom_size);
566 pdev->pd_kludges &= ~NBPCI_KLUDGE_MAP_ROM;
567 pdev->pd_rom_vaddr = NULL;
568 }
569
570 /* XXX Whattakludge! Should move this in sys/arch/. */
571 static int
572 pci_map_rom_md(struct pci_dev *pdev)
573 {
574 #if defined(__i386__) || defined(__x86_64__) || defined(__ia64__)
575 const bus_addr_t rom_base = 0xc0000;
576 const bus_size_t rom_size = 0x20000;
577 bus_space_handle_t rom_bsh;
578 int error;
579
580 if (PCI_CLASS(pdev->pd_pa.pa_class) != PCI_CLASS_DISPLAY)
581 return ENXIO;
582 if (PCI_SUBCLASS(pdev->pd_pa.pa_class) != PCI_SUBCLASS_DISPLAY_VGA)
583 return ENXIO;
584 /* XXX Check whether this is the primary VGA card? */
585 error = bus_space_map(pdev->pd_pa.pa_memt, rom_base, rom_size,
586 (BUS_SPACE_MAP_LINEAR | BUS_SPACE_MAP_PREFETCHABLE), &rom_bsh);
587 if (error)
588 return ENXIO;
589
590 pdev->pd_rom_bst = pdev->pd_pa.pa_memt;
591 pdev->pd_rom_bsh = rom_bsh;
592 pdev->pd_rom_size = rom_size;
593 pdev->pd_kludges |= NBPCI_KLUDGE_MAP_ROM;
594
595 return 0;
596 #else
597 return ENXIO;
598 #endif
599 }
600
601 static inline void __pci_rom_iomem *
602 pci_map_rom(struct pci_dev *pdev, size_t *sizep)
603 {
604
605 KASSERT(!ISSET(pdev->pd_kludges, NBPCI_KLUDGE_MAP_ROM));
606
607 if (pci_mapreg_map(&pdev->pd_pa, PCI_MAPREG_ROM, PCI_MAPREG_TYPE_ROM,
608 (BUS_SPACE_MAP_PREFETCHABLE | BUS_SPACE_MAP_LINEAR),
609 &pdev->pd_rom_bst, &pdev->pd_rom_bsh, NULL, &pdev->pd_rom_size)
610 != 0)
611 goto fail_mi;
612 pdev->pd_kludges |= NBPCI_KLUDGE_MAP_ROM;
613
614 /* XXX This type is obviously wrong in general... */
615 if (pci_find_rom(&pdev->pd_pa, pdev->pd_rom_bst, pdev->pd_rom_bsh,
616 pdev->pd_rom_size, PCI_ROM_CODE_TYPE_X86,
617 &pdev->pd_rom_found_bsh, &pdev->pd_rom_found_size)) {
618 pci_unmap_rom(pdev, NULL);
619 goto fail_mi;
620 }
621 goto success;
622
623 fail_mi:
624 if (pci_map_rom_md(pdev) != 0)
625 goto fail_md;
626
627 /* XXX This type is obviously wrong in general... */
628 if (pci_find_rom(&pdev->pd_pa, pdev->pd_rom_bst, pdev->pd_rom_bsh,
629 pdev->pd_rom_size, PCI_ROM_CODE_TYPE_X86,
630 &pdev->pd_rom_found_bsh, &pdev->pd_rom_found_size)) {
631 pci_unmap_rom(pdev, NULL);
632 goto fail_md;
633 }
634
635 success:
636 KASSERT(pdev->pd_rom_found_size <= SIZE_T_MAX);
637 *sizep = pdev->pd_rom_found_size;
638 pdev->pd_rom_vaddr = bus_space_vaddr(pdev->pd_rom_bst,
639 pdev->pd_rom_found_bsh);
640 return pdev->pd_rom_vaddr;
641
642 fail_md:
643 return NULL;
644 }
645
646 static inline void __pci_rom_iomem *
647 pci_platform_rom(struct pci_dev *pdev __unused, size_t *sizep)
648 {
649
650 *sizep = 0;
651 return NULL;
652 }
653
654 static inline int
655 pci_enable_rom(struct pci_dev *pdev)
656 {
657 const pci_chipset_tag_t pc = pdev->pd_pa.pa_pc;
658 const pcitag_t tag = pdev->pd_pa.pa_tag;
659 pcireg_t addr;
660 int s;
661
662 /* XXX Don't do anything if the ROM isn't there. */
663
664 s = splhigh();
665 addr = pci_conf_read(pc, tag, PCI_MAPREG_ROM);
666 addr |= PCI_MAPREG_ROM_ENABLE;
667 pci_conf_write(pc, tag, PCI_MAPREG_ROM, addr);
668 splx(s);
669
670 return 0;
671 }
672
673 static inline void
674 pci_disable_rom(struct pci_dev *pdev)
675 {
676 const pci_chipset_tag_t pc = pdev->pd_pa.pa_pc;
677 const pcitag_t tag = pdev->pd_pa.pa_tag;
678 pcireg_t addr;
679 int s;
680
681 s = splhigh();
682 addr = pci_conf_read(pc, tag, PCI_MAPREG_ROM);
683 addr &= ~(pcireg_t)PCI_MAPREG_ROM_ENABLE;
684 pci_conf_write(pc, tag, PCI_MAPREG_ROM, addr);
685 splx(s);
686 }
687
688 static inline bus_addr_t
689 pci_resource_start(struct pci_dev *pdev, unsigned i)
690 {
691
692 KASSERT(i < PCI_NUM_RESOURCES);
693 return pdev->pd_resources[i].addr;
694 }
695
696 static inline bus_size_t
697 pci_resource_len(struct pci_dev *pdev, unsigned i)
698 {
699
700 KASSERT(i < PCI_NUM_RESOURCES);
701 return pdev->pd_resources[i].size;
702 }
703
704 static inline bus_addr_t
705 pci_resource_end(struct pci_dev *pdev, unsigned i)
706 {
707
708 return pci_resource_start(pdev, i) + (pci_resource_len(pdev, i) - 1);
709 }
710
711 static inline int
712 pci_resource_flags(struct pci_dev *pdev, unsigned i)
713 {
714
715 KASSERT(i < PCI_NUM_RESOURCES);
716 return pdev->pd_resources[i].flags;
717 }
718
719 static inline void __pci_iomem *
720 pci_iomap(struct pci_dev *pdev, unsigned i, bus_size_t size)
721 {
722 int error;
723
724 KASSERT(i < PCI_NUM_RESOURCES);
725 KASSERT(pdev->pd_resources[i].kva == NULL);
726
727 if (PCI_MAPREG_TYPE(pdev->pd_resources[i].type) != PCI_MAPREG_TYPE_MEM)
728 return NULL;
729 if (pdev->pd_resources[i].size < size)
730 return NULL;
731 error = bus_space_map(pdev->pd_pa.pa_memt, pdev->pd_resources[i].addr,
732 size, BUS_SPACE_MAP_LINEAR | pdev->pd_resources[i].flags,
733 &pdev->pd_resources[i].bsh);
734 if (error) {
735 /* Horrible hack: try asking the fake AGP device. */
736 if (!agp_i810_borrow(pdev->pd_resources[i].addr, size,
737 &pdev->pd_resources[i].bsh))
738 return NULL;
739 }
740 pdev->pd_resources[i].bst = pdev->pd_pa.pa_memt;
741 pdev->pd_resources[i].kva = bus_space_vaddr(pdev->pd_resources[i].bst,
742 pdev->pd_resources[i].bsh);
743 pdev->pd_resources[i].mapped = true;
744
745 return pdev->pd_resources[i].kva;
746 }
747
748 static inline void
749 pci_iounmap(struct pci_dev *pdev, void __pci_iomem *kva)
750 {
751 unsigned i;
752
753 CTASSERT(__arraycount(pdev->pd_resources) == PCI_NUM_RESOURCES);
754 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
755 if (pdev->pd_resources[i].kva == kva)
756 break;
757 }
758 KASSERT(i < PCI_NUM_RESOURCES);
759
760 pdev->pd_resources[i].kva = NULL;
761 bus_space_unmap(pdev->pd_resources[i].bst, pdev->pd_resources[i].bsh,
762 pdev->pd_resources[i].size);
763 }
764
765 static inline void
766 pci_save_state(struct pci_dev *pdev)
767 {
768
769 KASSERT(pdev->pd_saved_state == NULL);
770 pdev->pd_saved_state = kmem_alloc(sizeof(*pdev->pd_saved_state),
771 KM_SLEEP);
772 pci_conf_capture(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
773 pdev->pd_saved_state);
774 }
775
776 static inline void
777 pci_restore_state(struct pci_dev *pdev)
778 {
779
780 KASSERT(pdev->pd_saved_state != NULL);
781 pci_conf_restore(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
782 pdev->pd_saved_state);
783 kmem_free(pdev->pd_saved_state, sizeof(*pdev->pd_saved_state));
784 pdev->pd_saved_state = NULL;
785 }
786
787 static inline bool
788 pci_is_pcie(struct pci_dev *pdev)
789 {
790
791 return (pci_find_capability(pdev, PCI_CAP_PCIEXPRESS) != 0);
792 }
793
794 static inline bool
795 pci_dma_supported(struct pci_dev *pdev, uintmax_t mask)
796 {
797
798 /* XXX Cop-out. */
799 if (mask > DMA_BIT_MASK(32))
800 return pci_dma64_available(&pdev->pd_pa);
801 else
802 return true;
803 }
804
805 static inline bool
806 pci_is_root_bus(struct pci_bus *bus)
807 {
808
809 /* XXX Cop-out. */
810 return false;
811 }
812
813 static inline int
814 pci_domain_nr(struct pci_bus *bus)
815 {
816
817 return device_unit(bus->pb_dev);
818 }
819
820 /*
821 * We explicitly rename pci_enable/disable_device so that you have to
822 * review each use of them, since NetBSD's PCI API does _not_ respect
823 * our local enablecnt here, but there are different parts of NetBSD
824 * that automatically enable/disable like PMF, so you have to decide
825 * for each one whether to call it or not.
826 */
827
828 static inline int
829 linux_pci_enable_device(struct pci_dev *pdev)
830 {
831 const struct pci_attach_args *pa = &pdev->pd_pa;
832 pcireg_t csr;
833 int s;
834
835 if (pdev->pd_enablecnt++)
836 return 0;
837
838 s = splhigh();
839 csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
840 csr |= PCI_COMMAND_IO_ENABLE;
841 csr |= PCI_COMMAND_MEM_ENABLE;
842 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, csr);
843 splx(s);
844
845 return 0;
846 }
847
848 static inline void
849 linux_pci_disable_device(struct pci_dev *pdev)
850 {
851 const struct pci_attach_args *pa = &pdev->pd_pa;
852 pcireg_t csr;
853 int s;
854
855 if (--pdev->pd_enablecnt)
856 return;
857
858 s = splhigh();
859 csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
860 csr &= ~PCI_COMMAND_IO_ENABLE;
861 csr &= ~PCI_COMMAND_MEM_ENABLE;
862 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, csr);
863 splx(s);
864 }
865
866 static inline void
867 linux_pci_dev_destroy(struct pci_dev *pdev)
868 {
869 unsigned i;
870
871 if (pdev->bus != NULL) {
872 kmem_free(pdev->bus, sizeof(*pdev->bus));
873 pdev->bus = NULL;
874 }
875 if (ISSET(pdev->pd_kludges, NBPCI_KLUDGE_MAP_ROM)) {
876 pci_unmap_rom(pdev, pdev->pd_rom_vaddr);
877 pdev->pd_rom_vaddr = 0;
878 }
879 for (i = 0; i < __arraycount(pdev->pd_resources); i++) {
880 if (!pdev->pd_resources[i].mapped)
881 continue;
882 bus_space_unmap(pdev->pd_resources[i].bst,
883 pdev->pd_resources[i].bsh, pdev->pd_resources[i].size);
884 }
885
886 /* There is no way these should be still in use. */
887 KASSERT(pdev->pd_saved_state == NULL);
888 KASSERT(pdev->pd_intr_handles == NULL);
889 }
890
891 #endif /* _LINUX_PCI_H_ */
892