pci.h revision 1.33 1 /* $NetBSD: pci.h,v 1.33 2018/08/27 14:11:46 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2013 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #ifndef _LINUX_PCI_H_
33 #define _LINUX_PCI_H_
34
35 #ifdef _KERNEL_OPT
36 #if defined(i386) || defined(amd64)
37 #include "acpica.h"
38 #else /* !(i386 || amd64) */
39 #define NACPICA 0
40 #endif /* i386 || amd64 */
41 #endif
42
43 #include <sys/types.h>
44 #include <sys/param.h>
45 #include <sys/bus.h>
46 #include <sys/cdefs.h>
47 #include <sys/kmem.h>
48 #include <sys/systm.h>
49
50 #include <machine/limits.h>
51
52 #include <dev/pci/pcidevs.h>
53 #include <dev/pci/pcireg.h>
54 #include <dev/pci/pcivar.h>
55 #include <dev/pci/agpvar.h>
56
57 #if NACPICA > 0
58 #include <dev/acpi/acpivar.h>
59 #include <dev/acpi/acpi_pci.h>
60 #else
61 struct acpi_devnode;
62 #endif
63
64 #include <linux/dma-mapping.h>
65 #include <linux/ioport.h>
66 #include <linux/kernel.h>
67
68 struct pci_driver;
69
70 struct pci_bus {
71 /* NetBSD private members */
72 pci_chipset_tag_t pb_pc;
73 device_t pb_dev;
74
75 /* Linux API */
76 u_int number;
77 };
78
79 struct pci_device_id {
80 uint32_t vendor;
81 uint32_t device;
82 uint32_t subvendor;
83 uint32_t subdevice;
84 uint32_t class;
85 uint32_t class_mask;
86 unsigned long driver_data;
87 };
88
89 #define PCI_ANY_ID (~0)
90
91 #define PCI_BASE_CLASS_DISPLAY PCI_CLASS_DISPLAY
92
93 #define PCI_CLASS_DISPLAY_VGA \
94 ((PCI_CLASS_DISPLAY << 8) | PCI_SUBCLASS_DISPLAY_VGA)
95 #define PCI_CLASS_BRIDGE_ISA \
96 ((PCI_CLASS_BRIDGE << 8) | PCI_SUBCLASS_BRIDGE_ISA)
97 CTASSERT(PCI_CLASS_BRIDGE_ISA == 0x0601);
98
99 /* XXX This is getting silly... */
100 #define PCI_VENDOR_ID_APPLE PCI_VENDOR_APPLE
101 #define PCI_VENDOR_ID_ASUSTEK PCI_VENDOR_ASUSTEK
102 #define PCI_VENDOR_ID_ATI PCI_VENDOR_ATI
103 #define PCI_VENDOR_ID_DELL PCI_VENDOR_DELL
104 #define PCI_VENDOR_ID_IBM PCI_VENDOR_IBM
105 #define PCI_VENDOR_ID_HP PCI_VENDOR_HP
106 #define PCI_VENDOR_ID_INTEL PCI_VENDOR_INTEL
107 #define PCI_VENDOR_ID_NVIDIA PCI_VENDOR_NVIDIA
108 #define PCI_VENDOR_ID_SI PCI_VENDOR_SIS
109 #define PCI_VENDOR_ID_SONY PCI_VENDOR_SONY
110 #define PCI_VENDOR_ID_VIA PCI_VENDOR_VIATECH
111
112 #define PCI_DEVICE_ID_ATI_RADEON_QY PCI_PRODUCT_ATI_RADEON_RV100_QY
113
114 #define PCI_DEVFN(DEV, FN) \
115 (__SHIFTIN((DEV), __BITS(3, 7)) | __SHIFTIN((FN), __BITS(0, 2)))
116 #define PCI_SLOT(DEVFN) __SHIFTOUT((DEVFN), __BITS(3, 7))
117 #define PCI_FUNC(DEVFN) __SHIFTOUT((DEVFN), __BITS(0, 2))
118
119 #define PCI_NUM_RESOURCES ((PCI_MAPREG_END - PCI_MAPREG_START) / 4)
120 #define DEVICE_COUNT_RESOURCE PCI_NUM_RESOURCES
121
122 #define PCI_CAP_ID_AGP PCI_CAP_AGP
123
124 typedef int pci_power_t;
125
126 #define PCI_D0 0
127 #define PCI_D1 1
128 #define PCI_D2 2
129 #define PCI_D3hot 3
130 #define PCI_D3cold 4
131
132 #define __pci_iomem
133
134 struct pci_dev {
135 struct pci_attach_args pd_pa;
136 int pd_kludges; /* Gotta lose 'em... */
137 #define NBPCI_KLUDGE_GET_MUMBLE 0x01
138 #define NBPCI_KLUDGE_MAP_ROM 0x02
139 bus_space_tag_t pd_rom_bst;
140 bus_space_handle_t pd_rom_bsh;
141 bus_size_t pd_rom_size;
142 bus_space_handle_t pd_rom_found_bsh;
143 bus_size_t pd_rom_found_size;
144 void *pd_rom_vaddr;
145 device_t pd_dev;
146 struct drm_device *pd_drm_dev; /* XXX Nouveau kludge! */
147 struct {
148 pcireg_t type;
149 bus_addr_t addr;
150 bus_size_t size;
151 int flags;
152 bus_space_tag_t bst;
153 bus_space_handle_t bsh;
154 void __pci_iomem *kva;
155 } pd_resources[PCI_NUM_RESOURCES];
156 struct pci_conf_state *pd_saved_state;
157 struct acpi_devnode *pd_ad;
158 pci_intr_handle_t *pd_intr_handles;
159 unsigned pd_enablecnt;
160
161 /* Linx API only below */
162 struct pci_bus *bus;
163 uint32_t devfn;
164 uint16_t vendor;
165 uint16_t device;
166 uint16_t subsystem_vendor;
167 uint16_t subsystem_device;
168 uint8_t revision;
169 uint32_t class;
170 bool msi_enabled;
171 bool no_64bit_msi;
172 };
173
174 static inline device_t
175 pci_dev_dev(struct pci_dev *pdev)
176 {
177 return pdev->pd_dev;
178 }
179
180 /* XXX Nouveau kludge! */
181 static inline struct drm_device *
182 pci_get_drvdata(struct pci_dev *pdev)
183 {
184 return pdev->pd_drm_dev;
185 }
186
187 static inline void
188 linux_pci_dev_init(struct pci_dev *pdev, device_t dev, device_t parent,
189 const struct pci_attach_args *pa, int kludges)
190 {
191 const uint32_t subsystem_id = pci_conf_read(pa->pa_pc, pa->pa_tag,
192 PCI_SUBSYS_ID_REG);
193 unsigned i;
194
195 pdev->pd_pa = *pa;
196 pdev->pd_kludges = kludges;
197 pdev->pd_rom_vaddr = NULL;
198 pdev->pd_dev = dev;
199 #if (NACPICA > 0)
200 pdev->pd_ad = acpi_pcidev_find(0 /*XXX segment*/, pa->pa_bus,
201 pa->pa_device, pa->pa_function);
202 #else
203 pdev->pd_ad = NULL;
204 #endif
205 pdev->bus = kmem_zalloc(sizeof(struct pci_bus), KM_NOSLEEP);
206 pdev->bus->pb_pc = pa->pa_pc;
207 pdev->bus->pb_dev = parent;
208 pdev->bus->number = pa->pa_bus;
209 pdev->devfn = PCI_DEVFN(pa->pa_device, pa->pa_function);
210 pdev->vendor = PCI_VENDOR(pa->pa_id);
211 pdev->device = PCI_PRODUCT(pa->pa_id);
212 pdev->subsystem_vendor = PCI_SUBSYS_VENDOR(subsystem_id);
213 pdev->subsystem_device = PCI_SUBSYS_ID(subsystem_id);
214 pdev->revision = PCI_REVISION(pa->pa_class);
215 pdev->class = __SHIFTOUT(pa->pa_class, 0xffffff00UL); /* ? */
216
217 CTASSERT(__arraycount(pdev->pd_resources) == PCI_NUM_RESOURCES);
218 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
219 const int reg = PCI_BAR(i);
220
221 pdev->pd_resources[i].type = pci_mapreg_type(pa->pa_pc,
222 pa->pa_tag, reg);
223 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, reg,
224 pdev->pd_resources[i].type,
225 &pdev->pd_resources[i].addr,
226 &pdev->pd_resources[i].size,
227 &pdev->pd_resources[i].flags)) {
228 pdev->pd_resources[i].addr = 0;
229 pdev->pd_resources[i].size = 0;
230 pdev->pd_resources[i].flags = 0;
231 }
232 pdev->pd_resources[i].kva = NULL;
233 }
234 }
235
236 static inline int
237 pci_find_capability(struct pci_dev *pdev, int cap)
238 {
239 return pci_get_capability(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, cap,
240 NULL, NULL);
241 }
242
243 static inline int
244 pci_read_config_dword(struct pci_dev *pdev, int reg, uint32_t *valuep)
245 {
246 KASSERT(!ISSET(reg, 3));
247 *valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg);
248 return 0;
249 }
250
251 static inline int
252 pci_read_config_word(struct pci_dev *pdev, int reg, uint16_t *valuep)
253 {
254 KASSERT(!ISSET(reg, 1));
255 *valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
256 (reg &~ 2)) >> (8 * (reg & 2));
257 return 0;
258 }
259
260 static inline int
261 pci_read_config_byte(struct pci_dev *pdev, int reg, uint8_t *valuep)
262 {
263 *valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
264 (reg &~ 3)) >> (8 * (reg & 3));
265 return 0;
266 }
267
268 static inline int
269 pci_write_config_dword(struct pci_dev *pdev, int reg, uint32_t value)
270 {
271 KASSERT(!ISSET(reg, 3));
272 pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg, value);
273 return 0;
274 }
275
276 static inline int
277 pci_bus_read_config_dword(struct pci_bus *bus, unsigned devfn, int reg,
278 uint32_t *valuep)
279 {
280 pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
281 PCI_FUNC(devfn));
282
283 KASSERT(!ISSET(reg, 1));
284 *valuep = pci_conf_read(bus->pb_pc, tag, reg & ~3) >> (8 * (reg & 3));
285
286 return 0;
287 }
288
289 static inline int
290 pci_bus_read_config_word(struct pci_bus *bus, unsigned devfn, int reg,
291 uint16_t *valuep)
292 {
293 pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
294 PCI_FUNC(devfn));
295 KASSERT(!ISSET(reg, 1));
296 *valuep = pci_conf_read(bus->pb_pc, tag, reg &~ 2) >> (8 * (reg & 2));
297 return 0;
298 }
299
300 static inline int
301 pci_bus_read_config_byte(struct pci_bus *bus, unsigned devfn, int reg,
302 uint8_t *valuep)
303 {
304 pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
305 PCI_FUNC(devfn));
306 *valuep = pci_conf_read(bus->pb_pc, tag, reg &~ 3) >> (8 * (reg & 3));
307 return 0;
308 }
309
310 static inline int
311 pci_bus_write_config_dword(struct pci_bus *bus, unsigned devfn, int reg,
312 uint32_t value)
313 {
314 pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
315 PCI_FUNC(devfn));
316 KASSERT(!ISSET(reg, 3));
317 pci_conf_write(bus->pb_pc, tag, reg, value);
318 return 0;
319 }
320
321 static inline void
322 pci_rmw_config(pci_chipset_tag_t pc, pcitag_t tag, int reg, unsigned int bytes,
323 uint32_t value)
324 {
325 const uint32_t mask = ~((~0UL) << (8 * bytes));
326 const int reg32 = (reg &~ 3);
327 const unsigned int shift = (8 * (reg & 3));
328 uint32_t value32;
329
330 KASSERT(bytes <= 4);
331 KASSERT(!ISSET(value, ~mask));
332 value32 = pci_conf_read(pc, tag, reg32);
333 value32 &=~ (mask << shift);
334 value32 |= (value << shift);
335 pci_conf_write(pc, tag, reg32, value32);
336 }
337
338 static inline int
339 pci_write_config_word(struct pci_dev *pdev, int reg, uint16_t value)
340 {
341 KASSERT(!ISSET(reg, 1));
342 pci_rmw_config(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg, 2, value);
343 return 0;
344 }
345
346 static inline int
347 pci_write_config_byte(struct pci_dev *pdev, int reg, uint8_t value)
348 {
349 pci_rmw_config(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg, 1, value);
350 return 0;
351 }
352
353 static inline int
354 pci_bus_write_config_word(struct pci_bus *bus, unsigned devfn, int reg,
355 uint16_t value)
356 {
357 pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
358 PCI_FUNC(devfn));
359 KASSERT(!ISSET(reg, 1));
360 pci_rmw_config(bus->pb_pc, tag, reg, 2, value);
361 return 0;
362 }
363
364 static inline int
365 pci_bus_write_config_byte(struct pci_bus *bus, unsigned devfn, int reg,
366 uint8_t value)
367 {
368 pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
369 PCI_FUNC(devfn));
370 pci_rmw_config(bus->pb_pc, tag, reg, 1, value);
371 return 0;
372 }
373
374 static inline int
375 pci_enable_msi(struct pci_dev *pdev)
376 {
377 #ifdef notyet
378 const struct pci_attach_args *const pa = &pdev->pd_pa;
379
380 if (pci_msi_alloc_exact(pa, &pdev->pd_intr_handles, 1))
381 return -EINVAL;
382
383 pdev->msi_enabled = 1;
384 return 0;
385 #else
386 return -ENOSYS;
387 #endif
388 }
389
390 static inline void
391 pci_disable_msi(struct pci_dev *pdev __unused)
392 {
393 const struct pci_attach_args *const pa = &pdev->pd_pa;
394
395 if (pdev->pd_intr_handles != NULL) {
396 pci_intr_release(pa->pa_pc, pdev->pd_intr_handles, 1);
397 pdev->pd_intr_handles = NULL;
398 }
399 pdev->msi_enabled = 0;
400 }
401
402 static inline void
403 pci_set_master(struct pci_dev *pdev)
404 {
405 pcireg_t csr;
406
407 csr = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
408 PCI_COMMAND_STATUS_REG);
409 csr |= PCI_COMMAND_MASTER_ENABLE;
410 pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
411 PCI_COMMAND_STATUS_REG, csr);
412 }
413
414 static inline void
415 pci_clear_master(struct pci_dev *pdev)
416 {
417 pcireg_t csr;
418
419 csr = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
420 PCI_COMMAND_STATUS_REG);
421 csr &= ~(pcireg_t)PCI_COMMAND_MASTER_ENABLE;
422 pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
423 PCI_COMMAND_STATUS_REG, csr);
424 }
425
426 #define PCIBIOS_MIN_MEM 0x100000 /* XXX bogus x86 kludge bollocks */
427
428 static inline bus_addr_t
429 pcibios_align_resource(void *p, const struct resource *resource,
430 bus_addr_t addr, bus_size_t size)
431 {
432 panic("pcibios_align_resource has accessed unaligned neurons!");
433 }
434
435 static inline int
436 pci_bus_alloc_resource(struct pci_bus *bus, struct resource *resource,
437 bus_size_t size, bus_size_t align, bus_addr_t start, int type __unused,
438 bus_addr_t (*align_fn)(void *, const struct resource *, bus_addr_t,
439 bus_size_t) __unused,
440 struct pci_dev *pdev)
441 {
442 const struct pci_attach_args *const pa = &pdev->pd_pa;
443 bus_space_tag_t bst;
444 int error;
445
446 switch (resource->flags) {
447 case IORESOURCE_MEM:
448 bst = pa->pa_memt;
449 break;
450
451 case IORESOURCE_IO:
452 bst = pa->pa_iot;
453 break;
454
455 default:
456 panic("I don't know what kind of resource you want!");
457 }
458
459 resource->r_bst = bst;
460 error = bus_space_alloc(bst, start, __type_max(bus_addr_t),
461 size, align, 0, 0, &resource->start, &resource->r_bsh);
462 if (error)
463 return error;
464
465 resource->size = size;
466 return 0;
467 }
468
469 /*
470 * XXX Mega-kludgerific! pci_get_bus_and_slot and pci_get_class are
471 * defined only for their single purposes in i915drm, in
472 * i915_get_bridge_dev and intel_detect_pch. We can't define them more
473 * generally without adapting pci_find_device (and pci_enumerate_bus
474 * internally) to pass a cookie through.
475 */
476
477 static inline int /* XXX inline? */
478 pci_kludgey_match_bus0_dev0_func0(const struct pci_attach_args *pa)
479 {
480
481 if (pa->pa_bus != 0)
482 return 0;
483 if (pa->pa_device != 0)
484 return 0;
485 if (pa->pa_function != 0)
486 return 0;
487
488 return 1;
489 }
490
491 static inline struct pci_dev *
492 pci_get_bus_and_slot(int bus, int slot)
493 {
494 struct pci_attach_args pa;
495
496 KASSERT(bus == 0);
497 KASSERT(slot == PCI_DEVFN(0, 0));
498
499 if (!pci_find_device(&pa, &pci_kludgey_match_bus0_dev0_func0))
500 return NULL;
501
502 struct pci_dev *const pdev = kmem_zalloc(sizeof(*pdev), KM_SLEEP);
503 linux_pci_dev_init(pdev, NULL, NULL, &pa, NBPCI_KLUDGE_GET_MUMBLE);
504
505 return pdev;
506 }
507
508 static inline int /* XXX inline? */
509 pci_kludgey_match_isa_bridge(const struct pci_attach_args *pa)
510 {
511
512 if (PCI_CLASS(pa->pa_class) != PCI_CLASS_BRIDGE)
513 return 0;
514 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_BRIDGE_ISA)
515 return 0;
516
517 return 1;
518 }
519
520 static inline void
521 pci_dev_put(struct pci_dev *pdev)
522 {
523
524 if (pdev == NULL)
525 return;
526
527 KASSERT(ISSET(pdev->pd_kludges, NBPCI_KLUDGE_GET_MUMBLE));
528 kmem_free(pdev->bus, sizeof(*pdev->bus));
529 kmem_free(pdev, sizeof(*pdev));
530 }
531
532 static inline struct pci_dev *
533 pci_get_class(uint32_t class_subclass_shifted __unused, struct pci_dev *from)
534 {
535 struct pci_attach_args pa;
536
537 KASSERT(class_subclass_shifted == (PCI_CLASS_BRIDGE_ISA << 8));
538
539 if (from != NULL) {
540 pci_dev_put(from);
541 return NULL;
542 }
543
544 if (!pci_find_device(&pa, &pci_kludgey_match_isa_bridge))
545 return NULL;
546
547 struct pci_dev *const pdev = kmem_zalloc(sizeof(*pdev), KM_SLEEP);
548 linux_pci_dev_init(pdev, NULL, NULL, &pa, NBPCI_KLUDGE_GET_MUMBLE);
549
550 return pdev;
551 }
552
553 #define __pci_rom_iomem
554
555 static inline void
556 pci_unmap_rom(struct pci_dev *pdev, void __pci_rom_iomem *vaddr __unused)
557 {
558
559 /* XXX Disable the ROM address decoder. */
560 KASSERT(ISSET(pdev->pd_kludges, NBPCI_KLUDGE_MAP_ROM));
561 KASSERT(vaddr == pdev->pd_rom_vaddr);
562 bus_space_unmap(pdev->pd_rom_bst, pdev->pd_rom_bsh, pdev->pd_rom_size);
563 pdev->pd_kludges &= ~NBPCI_KLUDGE_MAP_ROM;
564 pdev->pd_rom_vaddr = NULL;
565 }
566
567 /* XXX Whattakludge! Should move this in sys/arch/. */
568 static int
569 pci_map_rom_md(struct pci_dev *pdev)
570 {
571 #if defined(__i386__) || defined(__x86_64__) || defined(__ia64__)
572 const bus_addr_t rom_base = 0xc0000;
573 const bus_size_t rom_size = 0x20000;
574 bus_space_handle_t rom_bsh;
575 int error;
576
577 if (PCI_CLASS(pdev->pd_pa.pa_class) != PCI_CLASS_DISPLAY)
578 return ENXIO;
579 if (PCI_SUBCLASS(pdev->pd_pa.pa_class) != PCI_SUBCLASS_DISPLAY_VGA)
580 return ENXIO;
581 /* XXX Check whether this is the primary VGA card? */
582 error = bus_space_map(pdev->pd_pa.pa_memt, rom_base, rom_size,
583 (BUS_SPACE_MAP_LINEAR | BUS_SPACE_MAP_PREFETCHABLE), &rom_bsh);
584 if (error)
585 return ENXIO;
586
587 pdev->pd_rom_bst = pdev->pd_pa.pa_memt;
588 pdev->pd_rom_bsh = rom_bsh;
589 pdev->pd_rom_size = rom_size;
590 pdev->pd_kludges |= NBPCI_KLUDGE_MAP_ROM;
591
592 return 0;
593 #else
594 return ENXIO;
595 #endif
596 }
597
598 static inline void __pci_rom_iomem *
599 pci_map_rom(struct pci_dev *pdev, size_t *sizep)
600 {
601
602 KASSERT(!ISSET(pdev->pd_kludges, NBPCI_KLUDGE_MAP_ROM));
603
604 if (pci_mapreg_map(&pdev->pd_pa, PCI_MAPREG_ROM, PCI_MAPREG_TYPE_ROM,
605 (BUS_SPACE_MAP_PREFETCHABLE | BUS_SPACE_MAP_LINEAR),
606 &pdev->pd_rom_bst, &pdev->pd_rom_bsh, NULL, &pdev->pd_rom_size)
607 != 0)
608 goto fail_mi;
609 pdev->pd_kludges |= NBPCI_KLUDGE_MAP_ROM;
610
611 /* XXX This type is obviously wrong in general... */
612 if (pci_find_rom(&pdev->pd_pa, pdev->pd_rom_bst, pdev->pd_rom_bsh,
613 pdev->pd_rom_size, PCI_ROM_CODE_TYPE_X86,
614 &pdev->pd_rom_found_bsh, &pdev->pd_rom_found_size)) {
615 pci_unmap_rom(pdev, NULL);
616 goto fail_mi;
617 }
618 goto success;
619
620 fail_mi:
621 if (pci_map_rom_md(pdev) != 0)
622 goto fail_md;
623
624 /* XXX This type is obviously wrong in general... */
625 if (pci_find_rom(&pdev->pd_pa, pdev->pd_rom_bst, pdev->pd_rom_bsh,
626 pdev->pd_rom_size, PCI_ROM_CODE_TYPE_X86,
627 &pdev->pd_rom_found_bsh, &pdev->pd_rom_found_size)) {
628 pci_unmap_rom(pdev, NULL);
629 goto fail_md;
630 }
631
632 success:
633 KASSERT(pdev->pd_rom_found_size <= SIZE_T_MAX);
634 *sizep = pdev->pd_rom_found_size;
635 pdev->pd_rom_vaddr = bus_space_vaddr(pdev->pd_rom_bst,
636 pdev->pd_rom_found_bsh);
637 return pdev->pd_rom_vaddr;
638
639 fail_md:
640 return NULL;
641 }
642
643 static inline void __pci_rom_iomem *
644 pci_platform_rom(struct pci_dev *pdev __unused, size_t *sizep)
645 {
646
647 *sizep = 0;
648 return NULL;
649 }
650
651 static inline int
652 pci_enable_rom(struct pci_dev *pdev)
653 {
654 const pci_chipset_tag_t pc = pdev->pd_pa.pa_pc;
655 const pcitag_t tag = pdev->pd_pa.pa_tag;
656 pcireg_t addr;
657 int s;
658
659 /* XXX Don't do anything if the ROM isn't there. */
660
661 s = splhigh();
662 addr = pci_conf_read(pc, tag, PCI_MAPREG_ROM);
663 addr |= PCI_MAPREG_ROM_ENABLE;
664 pci_conf_write(pc, tag, PCI_MAPREG_ROM, addr);
665 splx(s);
666
667 return 0;
668 }
669
670 static inline void
671 pci_disable_rom(struct pci_dev *pdev)
672 {
673 const pci_chipset_tag_t pc = pdev->pd_pa.pa_pc;
674 const pcitag_t tag = pdev->pd_pa.pa_tag;
675 pcireg_t addr;
676 int s;
677
678 s = splhigh();
679 addr = pci_conf_read(pc, tag, PCI_MAPREG_ROM);
680 addr &= ~(pcireg_t)PCI_MAPREG_ROM_ENABLE;
681 pci_conf_write(pc, tag, PCI_MAPREG_ROM, addr);
682 splx(s);
683 }
684
685 static inline bus_addr_t
686 pci_resource_start(struct pci_dev *pdev, unsigned i)
687 {
688
689 KASSERT(i < PCI_NUM_RESOURCES);
690 return pdev->pd_resources[i].addr;
691 }
692
693 static inline bus_size_t
694 pci_resource_len(struct pci_dev *pdev, unsigned i)
695 {
696
697 KASSERT(i < PCI_NUM_RESOURCES);
698 return pdev->pd_resources[i].size;
699 }
700
701 static inline bus_addr_t
702 pci_resource_end(struct pci_dev *pdev, unsigned i)
703 {
704
705 return pci_resource_start(pdev, i) + (pci_resource_len(pdev, i) - 1);
706 }
707
708 static inline int
709 pci_resource_flags(struct pci_dev *pdev, unsigned i)
710 {
711
712 KASSERT(i < PCI_NUM_RESOURCES);
713 return pdev->pd_resources[i].flags;
714 }
715
716 static inline void __pci_iomem *
717 pci_iomap(struct pci_dev *pdev, unsigned i, bus_size_t size)
718 {
719 int error;
720
721 KASSERT(i < PCI_NUM_RESOURCES);
722 KASSERT(pdev->pd_resources[i].kva == NULL);
723
724 if (PCI_MAPREG_TYPE(pdev->pd_resources[i].type) != PCI_MAPREG_TYPE_MEM)
725 return NULL;
726 if (pdev->pd_resources[i].size < size)
727 return NULL;
728 error = bus_space_map(pdev->pd_pa.pa_memt, pdev->pd_resources[i].addr,
729 size, BUS_SPACE_MAP_LINEAR | pdev->pd_resources[i].flags,
730 &pdev->pd_resources[i].bsh);
731 if (error) {
732 /* Horrible hack: try asking the fake AGP device. */
733 if (!agp_i810_borrow(pdev->pd_resources[i].addr, size,
734 &pdev->pd_resources[i].bsh))
735 return NULL;
736 }
737 pdev->pd_resources[i].bst = pdev->pd_pa.pa_memt;
738 pdev->pd_resources[i].kva = bus_space_vaddr(pdev->pd_resources[i].bst,
739 pdev->pd_resources[i].bsh);
740
741 return pdev->pd_resources[i].kva;
742 }
743
744 static inline void
745 pci_iounmap(struct pci_dev *pdev, void __pci_iomem *kva)
746 {
747 unsigned i;
748
749 CTASSERT(__arraycount(pdev->pd_resources) == PCI_NUM_RESOURCES);
750 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
751 if (pdev->pd_resources[i].kva == kva)
752 break;
753 }
754 KASSERT(i < PCI_NUM_RESOURCES);
755
756 pdev->pd_resources[i].kva = NULL;
757 bus_space_unmap(pdev->pd_resources[i].bst, pdev->pd_resources[i].bsh,
758 pdev->pd_resources[i].size);
759 }
760
761 static inline void
762 pci_save_state(struct pci_dev *pdev)
763 {
764
765 KASSERT(pdev->pd_saved_state == NULL);
766 pdev->pd_saved_state = kmem_alloc(sizeof(*pdev->pd_saved_state),
767 KM_SLEEP);
768 pci_conf_capture(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
769 pdev->pd_saved_state);
770 }
771
772 static inline void
773 pci_restore_state(struct pci_dev *pdev)
774 {
775
776 KASSERT(pdev->pd_saved_state != NULL);
777 pci_conf_restore(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
778 pdev->pd_saved_state);
779 kmem_free(pdev->pd_saved_state, sizeof(*pdev->pd_saved_state));
780 pdev->pd_saved_state = NULL;
781 }
782
783 static inline bool
784 pci_is_pcie(struct pci_dev *pdev)
785 {
786
787 return (pci_find_capability(pdev, PCI_CAP_PCIEXPRESS) != 0);
788 }
789
790 static inline bool
791 pci_dma_supported(struct pci_dev *pdev, uintmax_t mask)
792 {
793
794 /* XXX Cop-out. */
795 if (mask > DMA_BIT_MASK(32))
796 return pci_dma64_available(&pdev->pd_pa);
797 else
798 return true;
799 }
800
801 static inline bool
802 pci_is_root_bus(struct pci_bus *bus)
803 {
804
805 /* XXX Cop-out. */
806 return false;
807 }
808
809 static inline int
810 pci_domain_nr(struct pci_bus *bus)
811 {
812
813 return device_unit(bus->pb_dev);
814 }
815
816 /*
817 * We explicitly rename pci_enable/disable_device so that you have to
818 * review each use of them, since NetBSD's PCI API does _not_ respect
819 * our local enablecnt here, but there are different parts of NetBSD
820 * that automatically enable/disable like PMF, so you have to decide
821 * for each one whether to call it or not.
822 */
823
824 static inline int
825 linux_pci_enable_device(struct pci_dev *pdev)
826 {
827 const struct pci_attach_args *pa = &pdev->pd_pa;
828 pcireg_t csr;
829 int s;
830
831 if (pdev->pd_enablecnt++)
832 return 0;
833
834 s = splhigh();
835 csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
836 csr |= PCI_COMMAND_IO_ENABLE;
837 csr |= PCI_COMMAND_MEM_ENABLE;
838 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, csr);
839 splx(s);
840
841 return 0;
842 }
843
844 static inline void
845 linux_pci_disable_device(struct pci_dev *pdev)
846 {
847 const struct pci_attach_args *pa = &pdev->pd_pa;
848 pcireg_t csr;
849 int s;
850
851 if (--pdev->pd_enablecnt)
852 return;
853
854 s = splhigh();
855 csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
856 csr &= ~PCI_COMMAND_IO_ENABLE;
857 csr &= ~PCI_COMMAND_MEM_ENABLE;
858 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, csr);
859 splx(s);
860 }
861
862 #endif /* _LINUX_PCI_H_ */
863