Home | History | Annotate | Line # | Download | only in linux
linux_pci.c revision 1.25
      1 /*	$NetBSD: linux_pci.c,v 1.25 2022/10/17 03:05:32 mrg Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2013 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Taylor R. Campbell.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #ifdef _KERNEL_OPT
     33 #include "acpica.h"
     34 #include "opt_pci.h"
     35 #endif
     36 
     37 #include <sys/cdefs.h>
     38 __KERNEL_RCSID(0, "$NetBSD: linux_pci.c,v 1.25 2022/10/17 03:05:32 mrg Exp $");
     39 
     40 #if NACPICA > 0
     41 #include <dev/acpi/acpivar.h>
     42 #include <dev/acpi/acpi_pci.h>
     43 #endif
     44 
     45 #include <linux/pci.h>
     46 
     47 #include <drm/drm_agp_netbsd.h>
     48 
     49 device_t
     50 pci_dev_dev(struct pci_dev *pdev)
     51 {
     52 
     53 	return pdev->pd_dev;
     54 }
     55 
     56 void
     57 pci_set_drvdata(struct pci_dev *pdev, void *drvdata)
     58 {
     59 	pdev->pd_drvdata = drvdata;
     60 }
     61 
     62 void *
     63 pci_get_drvdata(struct pci_dev *pdev)
     64 {
     65 	return pdev->pd_drvdata;
     66 }
     67 
     68 const char *
     69 pci_name(struct pci_dev *pdev)
     70 {
     71 
     72 	/* XXX not sure this has the right format */
     73 	return device_xname(pci_dev_dev(pdev));
     74 }
     75 
     76 /*
     77  * Setup enough of a parent that we can access config space.
     78  * This is gross and grovels pci(4) and ppb(4) internals.
     79  */
     80 static struct pci_dev *
     81 alloc_fake_parent_device(device_t parent, const struct pci_attach_args *pa)
     82 {
     83 
     84 	if (parent == NULL || !device_is_a(parent, "pci"))
     85 		return NULL;
     86 
     87 	device_t pparent = device_parent(parent);
     88 	if (pparent == NULL || !device_is_a(pparent, "ppb"))
     89 		return NULL;
     90 
     91 	struct pci_softc *pcisc = device_private(parent);
     92 	struct ppb_softc *ppbsc = device_private(pparent);
     93 
     94 	struct pci_dev *parentdev = kmem_zalloc(sizeof(*parentdev), KM_SLEEP);
     95 
     96 	/* Copy this device's pci_attach_args{} as a base-line. */
     97 	struct pci_attach_args *npa = &parentdev->pd_pa;
     98 	*npa = *pa;
     99 
    100 	/* Now update with stuff found in parent. */
    101 	npa->pa_iot = pcisc->sc_iot;
    102 	npa->pa_memt = pcisc->sc_memt;
    103 	npa->pa_dmat = pcisc->sc_dmat;
    104 	npa->pa_dmat64 = pcisc->sc_dmat64;
    105 	npa->pa_pc = pcisc->sc_pc;
    106 	npa->pa_flags = 0;	/* XXX? */
    107 
    108 	/* Copy the parent tag, and read some info about it. */
    109 	npa->pa_tag = ppbsc->sc_tag;
    110 	pcireg_t id = pci_conf_read(npa->pa_pc, npa->pa_tag, PCI_ID_REG);
    111 	pcireg_t subid = pci_conf_read(npa->pa_pc, npa->pa_tag,
    112 	    PCI_SUBSYS_ID_REG);
    113 	pcireg_t class = pci_conf_read(npa->pa_pc, npa->pa_tag, PCI_CLASS_REG);
    114 
    115 	/*
    116 	 * Fill in as much of pci_attach_args and pci_dev as reasonably possible.
    117 	 * Most of this is not used currently.
    118 	 */
    119 	int bus, device, function;
    120 	pci_decompose_tag(npa->pa_pc, npa->pa_tag, &bus, &device, &function);
    121 	npa->pa_device = device;
    122 	npa->pa_function = function;
    123 	npa->pa_bus = bus;
    124 	npa->pa_id = id;
    125 	npa->pa_class = class;
    126 	npa->pa_intrswiz = pcisc->sc_intrswiz;
    127 	npa->pa_intrtag = pcisc->sc_intrtag;
    128 	npa->pa_intrpin = PCI_INTERRUPT_PIN_NONE;
    129 
    130 	parentdev->pd_dev = parent;
    131 
    132 	parentdev->bus = NULL;
    133 	parentdev->devfn = device << 3 | function;
    134 	parentdev->vendor = PCI_VENDOR(id);
    135 	parentdev->device = PCI_PRODUCT(id);
    136 	parentdev->subsystem_vendor = PCI_SUBSYS_VENDOR(subid);
    137 	parentdev->subsystem_device = PCI_SUBSYS_ID(subid);
    138 	parentdev->revision = PCI_REVISION(class);
    139 	parentdev->class = __SHIFTOUT(class, 0xffffff00UL); /* ? */
    140 
    141 	return parentdev;
    142 }
    143 
    144 void
    145 linux_pci_dev_init(struct pci_dev *pdev, device_t dev, device_t parent,
    146     const struct pci_attach_args *pa, int kludges)
    147 {
    148 	const uint32_t subsystem_id = pci_conf_read(pa->pa_pc, pa->pa_tag,
    149 	    PCI_SUBSYS_ID_REG);
    150 	unsigned i;
    151 
    152 	memset(pdev, 0, sizeof(*pdev)); /* paranoia */
    153 
    154 	pdev->pd_pa = *pa;
    155 	pdev->pd_kludges = kludges;
    156 	pdev->pd_rom_vaddr = NULL;
    157 	pdev->pd_dev = dev;
    158 #if (NACPICA > 0)
    159 	const int seg = pci_get_segment(pa->pa_pc);
    160 	pdev->pd_ad = acpi_pcidev_find(seg, pa->pa_bus,
    161 	    pa->pa_device, pa->pa_function);
    162 #else
    163 	pdev->pd_ad = NULL;
    164 #endif
    165 	pdev->pd_saved_state = NULL;
    166 	pdev->pd_intr_handles = NULL;
    167 	pdev->pd_drvdata = NULL;
    168 	pdev->bus = kmem_zalloc(sizeof(*pdev->bus), KM_NOSLEEP);
    169 	pdev->bus->pb_pc = pa->pa_pc;
    170 	pdev->bus->pb_dev = parent;
    171 	pdev->bus->number = pa->pa_bus;
    172 	pdev->bus->self = alloc_fake_parent_device(parent, pa);
    173 	pdev->devfn = PCI_DEVFN(pa->pa_device, pa->pa_function);
    174 	pdev->vendor = PCI_VENDOR(pa->pa_id);
    175 	pdev->device = PCI_PRODUCT(pa->pa_id);
    176 	pdev->subsystem_vendor = PCI_SUBSYS_VENDOR(subsystem_id);
    177 	pdev->subsystem_device = PCI_SUBSYS_ID(subsystem_id);
    178 	pdev->revision = PCI_REVISION(pa->pa_class);
    179 	pdev->class = __SHIFTOUT(pa->pa_class, 0xffffff00UL); /* ? */
    180 
    181 	CTASSERT(__arraycount(pdev->pd_resources) == PCI_NUM_RESOURCES);
    182 	for (i = 0; i < PCI_NUM_RESOURCES; i++) {
    183 		const int reg = PCI_BAR(i);
    184 
    185 		pdev->pd_resources[i].type = pci_mapreg_type(pa->pa_pc,
    186 		    pa->pa_tag, reg);
    187 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, reg,
    188 			pdev->pd_resources[i].type,
    189 			&pdev->pd_resources[i].addr,
    190 			&pdev->pd_resources[i].size,
    191 			&pdev->pd_resources[i].flags)) {
    192 			pdev->pd_resources[i].addr = 0;
    193 			pdev->pd_resources[i].size = 0;
    194 			pdev->pd_resources[i].flags = 0;
    195 		}
    196 		pdev->pd_resources[i].kva = NULL;
    197 		pdev->pd_resources[i].mapped = false;
    198 	}
    199 }
    200 
    201 int
    202 pci_find_capability(struct pci_dev *pdev, int cap)
    203 {
    204 
    205 	return pci_get_capability(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, cap,
    206 	    NULL, NULL);
    207 }
    208 
    209 int
    210 pci_read_config_dword(struct pci_dev *pdev, int reg, uint32_t *valuep)
    211 {
    212 
    213 	KASSERT(!ISSET(reg, 3));
    214 	*valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg);
    215 	return 0;
    216 }
    217 
    218 int
    219 pci_read_config_word(struct pci_dev *pdev, int reg, uint16_t *valuep)
    220 {
    221 
    222 	KASSERT(!ISSET(reg, 1));
    223 	*valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
    224 	    (reg &~ 2)) >> (8 * (reg & 2));
    225 	return 0;
    226 }
    227 
    228 int
    229 pci_read_config_byte(struct pci_dev *pdev, int reg, uint8_t *valuep)
    230 {
    231 
    232 	*valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
    233 	    (reg &~ 3)) >> (8 * (reg & 3));
    234 	return 0;
    235 }
    236 
    237 int
    238 pci_write_config_dword(struct pci_dev *pdev, int reg, uint32_t value)
    239 {
    240 
    241 	KASSERT(!ISSET(reg, 3));
    242 	pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg, value);
    243 	return 0;
    244 }
    245 
    246 int
    247 pci_bus_read_config_dword(struct pci_bus *bus, unsigned devfn, int reg,
    248     uint32_t *valuep)
    249 {
    250 	pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
    251 	    PCI_FUNC(devfn));
    252 
    253 	KASSERT(!ISSET(reg, 1));
    254 	*valuep = pci_conf_read(bus->pb_pc, tag, reg & ~3) >> (8 * (reg & 3));
    255 	return 0;
    256 }
    257 
    258 int
    259 pci_bus_read_config_word(struct pci_bus *bus, unsigned devfn, int reg,
    260     uint16_t *valuep)
    261 {
    262 	pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
    263 	    PCI_FUNC(devfn));
    264 
    265 	KASSERT(!ISSET(reg, 1));
    266 	*valuep = pci_conf_read(bus->pb_pc, tag, reg &~ 2) >> (8 * (reg & 2));
    267 	return 0;
    268 }
    269 
    270 int
    271 pci_bus_read_config_byte(struct pci_bus *bus, unsigned devfn, int reg,
    272     uint8_t *valuep)
    273 {
    274 	pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
    275 	    PCI_FUNC(devfn));
    276 
    277 	*valuep = pci_conf_read(bus->pb_pc, tag, reg &~ 3) >> (8 * (reg & 3));
    278 	return 0;
    279 }
    280 
    281 int
    282 pci_bus_write_config_dword(struct pci_bus *bus, unsigned devfn, int reg,
    283     uint32_t value)
    284 {
    285 	pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
    286 	    PCI_FUNC(devfn));
    287 
    288 	KASSERT(!ISSET(reg, 3));
    289 	pci_conf_write(bus->pb_pc, tag, reg, value);
    290 	return 0;
    291 }
    292 
    293 static void
    294 pci_rmw_config(pci_chipset_tag_t pc, pcitag_t tag, int reg, unsigned int bytes,
    295     uint32_t value)
    296 {
    297 	const uint32_t mask = ~((~0UL) << (8 * bytes));
    298 	const int reg32 = (reg &~ 3);
    299 	const unsigned int shift = (8 * (reg & 3));
    300 	uint32_t value32;
    301 
    302 	KASSERT(bytes <= 4);
    303 	KASSERT(!ISSET(value, ~mask));
    304 	value32 = pci_conf_read(pc, tag, reg32);
    305 	value32 &=~ (mask << shift);
    306 	value32 |= (value << shift);
    307 	pci_conf_write(pc, tag, reg32, value32);
    308 }
    309 
    310 int
    311 pci_write_config_word(struct pci_dev *pdev, int reg, uint16_t value)
    312 {
    313 
    314 	KASSERT(!ISSET(reg, 1));
    315 	pci_rmw_config(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg, 2, value);
    316 	return 0;
    317 }
    318 
    319 int
    320 pci_write_config_byte(struct pci_dev *pdev, int reg, uint8_t value)
    321 {
    322 
    323 	pci_rmw_config(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg, 1, value);
    324 	return 0;
    325 }
    326 
    327 int
    328 pci_bus_write_config_word(struct pci_bus *bus, unsigned devfn, int reg,
    329     uint16_t value)
    330 {
    331 	pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
    332 	    PCI_FUNC(devfn));
    333 
    334 	KASSERT(!ISSET(reg, 1));
    335 	pci_rmw_config(bus->pb_pc, tag, reg, 2, value);
    336 	return 0;
    337 }
    338 
    339 int
    340 pci_bus_write_config_byte(struct pci_bus *bus, unsigned devfn, int reg,
    341     uint8_t value)
    342 {
    343 	pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
    344 	    PCI_FUNC(devfn));
    345 
    346 	pci_rmw_config(bus->pb_pc, tag, reg, 1, value);
    347 	return 0;
    348 }
    349 
    350 int
    351 pci_enable_msi(struct pci_dev *pdev)
    352 {
    353 	const struct pci_attach_args *const pa = &pdev->pd_pa;
    354 
    355 	if (pci_msi_alloc_exact(pa, &pdev->pd_intr_handles, 1))
    356 		return -EINVAL;
    357 
    358 	pdev->msi_enabled = 1;
    359 	return 0;
    360 }
    361 
    362 void
    363 pci_disable_msi(struct pci_dev *pdev __unused)
    364 {
    365 	const struct pci_attach_args *const pa = &pdev->pd_pa;
    366 
    367 	if (pdev->pd_intr_handles != NULL) {
    368 		pci_intr_release(pa->pa_pc, pdev->pd_intr_handles, 1);
    369 		pdev->pd_intr_handles = NULL;
    370 	}
    371 	pdev->msi_enabled = 0;
    372 }
    373 
    374 void
    375 pci_set_master(struct pci_dev *pdev)
    376 {
    377 	pcireg_t csr;
    378 
    379 	csr = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
    380 	    PCI_COMMAND_STATUS_REG);
    381 	csr |= PCI_COMMAND_MASTER_ENABLE;
    382 	pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
    383 	    PCI_COMMAND_STATUS_REG, csr);
    384 }
    385 
    386 void
    387 pci_clear_master(struct pci_dev *pdev)
    388 {
    389 	pcireg_t csr;
    390 
    391 	csr = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
    392 	    PCI_COMMAND_STATUS_REG);
    393 	csr &= ~(pcireg_t)PCI_COMMAND_MASTER_ENABLE;
    394 	pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
    395 	    PCI_COMMAND_STATUS_REG, csr);
    396 }
    397 
    398 int
    399 pcie_capability_read_dword(struct pci_dev *pdev, int reg, uint32_t *valuep)
    400 {
    401 	pci_chipset_tag_t pc = pdev->pd_pa.pa_pc;
    402 	pcitag_t tag = pdev->pd_pa.pa_tag;
    403 	int off;
    404 
    405 	*valuep = 0;
    406 
    407 	/* Must have capabilities. */
    408 	if (pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS, &off, NULL) == 0)
    409 		return 1;
    410 
    411 	*valuep = pci_conf_read(pc, tag, off + reg);
    412 
    413 	return 0;
    414 }
    415 
    416 int
    417 pcie_capability_read_word(struct pci_dev *pdev, int reg, uint16_t *valuep)
    418 {
    419 	pci_chipset_tag_t pc = pdev->pd_pa.pa_pc;
    420 	pcitag_t tag = pdev->pd_pa.pa_tag;
    421 	int off;
    422 
    423 	*valuep = 0;
    424 
    425 	/* Must have capabilities. */
    426 	if (pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS, &off, NULL) == 0)
    427 		return 1;
    428 
    429 	*valuep = pci_conf_read(pc, tag, off + (reg &~ 2)) >> (8 * (reg & 2));
    430 
    431 	return 0;
    432 }
    433 
    434 int
    435 pcie_capability_write_dword(struct pci_dev *pdev, int reg, uint32_t value)
    436 {
    437 	pci_chipset_tag_t pc = pdev->pd_pa.pa_pc;
    438 	pcitag_t tag = pdev->pd_pa.pa_tag;
    439 	int off;
    440 
    441 	/* Must have capabilities. */
    442 	if (pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS, &off, NULL) == 0)
    443 		return 1;
    444 
    445 	pci_conf_write(pc, tag, off + reg, value);
    446 
    447 	return 0;
    448 }
    449 
    450 int
    451 pcie_capability_write_word(struct pci_dev *pdev, int reg, uint16_t value)
    452 {
    453 	pci_chipset_tag_t pc = pdev->pd_pa.pa_pc;
    454 	pcitag_t tag = pdev->pd_pa.pa_tag;
    455 	int off;
    456 
    457 	/* Must have capabilities. */
    458 	if (pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS, &off, NULL) == 0)
    459 		return 1;
    460 
    461 	pci_rmw_config(pc, tag, off + reg, 2, value);
    462 
    463 	return 0;
    464 }
    465 
    466 /* From PCIe 5.0 7.5.3.4 "Device Control Register" */
    467 static const unsigned readrqmax[] = {
    468 	128,
    469 	256,
    470 	512,
    471 	1024,
    472 	2048,
    473 	4096,
    474 };
    475 
    476 int
    477 pcie_get_readrq(struct pci_dev *pdev)
    478 {
    479 	pci_chipset_tag_t pc = pdev->pd_pa.pa_pc;
    480 	pcitag_t tag = pdev->pd_pa.pa_tag;
    481 	unsigned val;
    482 	int off;
    483 
    484 	if (pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS, &off, NULL) == 0)
    485 		return -EINVAL; /* XXX NetBSD->Linux */
    486 
    487 	val = __SHIFTOUT(pci_conf_read(pc, tag, off + PCIE_DCSR),
    488 	    PCIE_DCSR_MAX_READ_REQ);
    489 
    490 	if (val >= __arraycount(readrqmax))
    491 		val = 0;
    492 	return readrqmax[val];
    493 }
    494 
    495 int
    496 pcie_set_readrq(struct pci_dev *pdev, int val)
    497 {
    498 	pci_chipset_tag_t pc = pdev->pd_pa.pa_pc;
    499 	pcitag_t tag = pdev->pd_pa.pa_tag;
    500 	pcireg_t reg, newval = 0;
    501 	unsigned i;
    502 	int off;
    503 
    504 	for (i = 0; i < __arraycount(readrqmax); i++) {
    505 		if (readrqmax[i] == val) {
    506 			newval = i;
    507 			break;
    508 		}
    509 	}
    510 
    511 	if (i == __arraycount(readrqmax))
    512 		return -EINVAL;
    513 
    514 	if (pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS, &off, NULL) == 0)
    515 		return -EINVAL; /* XXX NetBSD->Linux */
    516 
    517 	reg = pci_conf_read(pc, tag, off + PCIE_DCSR);
    518 	reg &= ~PCIE_DCSR_MAX_READ_REQ | (newval << 12);
    519 	pci_conf_write(pc, tag, off + PCIE_DCSR, reg);
    520 
    521 	return 0;
    522 }
    523 
    524 bus_addr_t
    525 pcibios_align_resource(void *p, const struct resource *resource,
    526     bus_addr_t addr, bus_size_t size)
    527 {
    528 	panic("pcibios_align_resource has accessed unaligned neurons!");
    529 }
    530 
    531 int
    532 pci_bus_alloc_resource(struct pci_bus *bus, struct resource *resource,
    533     bus_size_t size, bus_size_t align, bus_addr_t start, int type __unused,
    534     bus_addr_t (*align_fn)(void *, const struct resource *, bus_addr_t,
    535 	bus_size_t) __unused,
    536     struct pci_dev *pdev)
    537 {
    538 	const struct pci_attach_args *const pa = &pdev->pd_pa;
    539 	bus_space_tag_t bst;
    540 	int error;
    541 
    542 	switch (resource->flags) {
    543 	case IORESOURCE_MEM:
    544 		bst = pa->pa_memt;
    545 		break;
    546 
    547 	case IORESOURCE_IO:
    548 		bst = pa->pa_iot;
    549 		break;
    550 
    551 	default:
    552 		panic("I don't know what kind of resource you want!");
    553 	}
    554 
    555 	resource->r_bst = bst;
    556 	error = bus_space_alloc(bst, start, __type_max(bus_addr_t),
    557 	    size, align, 0, 0, &resource->start, &resource->r_bsh);
    558 	if (error)
    559 		return error;
    560 
    561 	resource->end = start + (size - 1);
    562 	return 0;
    563 }
    564 
    565 /*
    566  * XXX Mega-kludgerific!  pci_get_bus_and_slot and pci_get_class are
    567  * defined only for their single purposes in i915drm, in
    568  * i915_get_bridge_dev and intel_detect_pch.  We can't define them more
    569  * generally without adapting pci_find_device (and pci_enumerate_bus
    570  * internally) to pass a cookie through.
    571  */
    572 
    573 static int
    574 pci_kludgey_match_bus0_dev0_func0(const struct pci_attach_args *pa)
    575 {
    576 
    577 	/* XXX domain */
    578 	if (pa->pa_bus != 0)
    579 		return 0;
    580 	if (pa->pa_device != 0)
    581 		return 0;
    582 	if (pa->pa_function != 0)
    583 		return 0;
    584 
    585 	return 1;
    586 }
    587 
    588 struct pci_dev *
    589 pci_get_domain_bus_and_slot(int domain, int bus, int slot)
    590 {
    591 	struct pci_attach_args pa;
    592 
    593 	KASSERT(domain == 0);
    594 	KASSERT(bus == 0);
    595 	KASSERT(slot == PCI_DEVFN(0, 0));
    596 
    597 	if (!pci_find_device(&pa, &pci_kludgey_match_bus0_dev0_func0))
    598 		return NULL;
    599 
    600 	struct pci_dev *const pdev = kmem_zalloc(sizeof(*pdev), KM_SLEEP);
    601 	linux_pci_dev_init(pdev, NULL, NULL, &pa, NBPCI_KLUDGE_GET_MUMBLE);
    602 
    603 	return pdev;
    604 }
    605 
    606 static int
    607 pci_kludgey_match_isa_bridge(const struct pci_attach_args *pa)
    608 {
    609 
    610 	if (PCI_CLASS(pa->pa_class) != PCI_CLASS_BRIDGE)
    611 		return 0;
    612 	if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_BRIDGE_ISA)
    613 		return 0;
    614 
    615 	return 1;
    616 }
    617 
    618 void
    619 pci_dev_put(struct pci_dev *pdev)
    620 {
    621 
    622 	if (pdev == NULL)
    623 		return;
    624 
    625 	KASSERT(ISSET(pdev->pd_kludges, NBPCI_KLUDGE_GET_MUMBLE));
    626 	kmem_free(pdev->bus, sizeof(*pdev->bus));
    627 	kmem_free(pdev, sizeof(*pdev));
    628 }
    629 
    630 struct pci_dev *		/* XXX i915 kludge */
    631 pci_get_class(uint32_t class_subclass_shifted __unused, struct pci_dev *from)
    632 {
    633 	struct pci_attach_args pa;
    634 
    635 	KASSERT(class_subclass_shifted == (PCI_CLASS_BRIDGE_ISA << 8));
    636 
    637 	if (from != NULL) {
    638 		pci_dev_put(from);
    639 		return NULL;
    640 	}
    641 
    642 	if (!pci_find_device(&pa, &pci_kludgey_match_isa_bridge))
    643 		return NULL;
    644 
    645 	struct pci_dev *const pdev = kmem_zalloc(sizeof(*pdev), KM_SLEEP);
    646 	linux_pci_dev_init(pdev, NULL, NULL, &pa, NBPCI_KLUDGE_GET_MUMBLE);
    647 
    648 	return pdev;
    649 }
    650 
    651 int
    652 pci_dev_present(const struct pci_device_id *ids)
    653 {
    654 
    655 	/* XXX implement me -- pci_find_device doesn't pass a cookie */
    656 	return 0;
    657 }
    658 
    659 void
    660 pci_unmap_rom(struct pci_dev *pdev, void __pci_rom_iomem *vaddr __unused)
    661 {
    662 
    663 	/* XXX Disable the ROM address decoder.  */
    664 	KASSERT(ISSET(pdev->pd_kludges, NBPCI_KLUDGE_MAP_ROM));
    665 	KASSERT(vaddr == pdev->pd_rom_vaddr);
    666 	bus_space_unmap(pdev->pd_rom_bst, pdev->pd_rom_bsh, pdev->pd_rom_size);
    667 	pdev->pd_kludges &= ~NBPCI_KLUDGE_MAP_ROM;
    668 	pdev->pd_rom_vaddr = NULL;
    669 }
    670 
    671 /* XXX Whattakludge!  Should move this in sys/arch/.  */
    672 static int
    673 pci_map_rom_md(struct pci_dev *pdev)
    674 {
    675 #if defined(__i386__) || defined(__x86_64__) || defined(__ia64__)
    676 	const bus_addr_t rom_base = 0xc0000;
    677 	const bus_size_t rom_size = 0x20000;
    678 	bus_space_handle_t rom_bsh;
    679 	int error;
    680 
    681 	if (PCI_CLASS(pdev->pd_pa.pa_class) != PCI_CLASS_DISPLAY)
    682 		return ENXIO;
    683 	if (PCI_SUBCLASS(pdev->pd_pa.pa_class) != PCI_SUBCLASS_DISPLAY_VGA)
    684 		return ENXIO;
    685 	/* XXX Check whether this is the primary VGA card?  */
    686 	error = bus_space_map(pdev->pd_pa.pa_memt, rom_base, rom_size,
    687 	    (BUS_SPACE_MAP_LINEAR | BUS_SPACE_MAP_PREFETCHABLE), &rom_bsh);
    688 	if (error)
    689 		return ENXIO;
    690 
    691 	pdev->pd_rom_bst = pdev->pd_pa.pa_memt;
    692 	pdev->pd_rom_bsh = rom_bsh;
    693 	pdev->pd_rom_size = rom_size;
    694 	pdev->pd_kludges |= NBPCI_KLUDGE_MAP_ROM;
    695 
    696 	return 0;
    697 #else
    698 	return ENXIO;
    699 #endif
    700 }
    701 
    702 void __pci_rom_iomem *
    703 pci_map_rom(struct pci_dev *pdev, size_t *sizep)
    704 {
    705 
    706 	KASSERT(!ISSET(pdev->pd_kludges, NBPCI_KLUDGE_MAP_ROM));
    707 
    708 	if (pci_mapreg_map(&pdev->pd_pa, PCI_MAPREG_ROM, PCI_MAPREG_TYPE_ROM,
    709 		(BUS_SPACE_MAP_PREFETCHABLE | BUS_SPACE_MAP_LINEAR),
    710 		&pdev->pd_rom_bst, &pdev->pd_rom_bsh, NULL, &pdev->pd_rom_size)
    711 	    != 0)
    712 		goto fail_mi;
    713 	pdev->pd_kludges |= NBPCI_KLUDGE_MAP_ROM;
    714 
    715 	/* XXX This type is obviously wrong in general...  */
    716 	if (pci_find_rom(&pdev->pd_pa, pdev->pd_rom_bst, pdev->pd_rom_bsh,
    717 		pdev->pd_rom_size, PCI_ROM_CODE_TYPE_X86,
    718 		&pdev->pd_rom_found_bsh, &pdev->pd_rom_found_size)) {
    719 		pci_unmap_rom(pdev, NULL);
    720 		goto fail_mi;
    721 	}
    722 	goto success;
    723 
    724 fail_mi:
    725 	if (pci_map_rom_md(pdev) != 0)
    726 		goto fail_md;
    727 
    728 	/* XXX This type is obviously wrong in general...  */
    729 	if (pci_find_rom(&pdev->pd_pa, pdev->pd_rom_bst, pdev->pd_rom_bsh,
    730 		pdev->pd_rom_size, PCI_ROM_CODE_TYPE_X86,
    731 		&pdev->pd_rom_found_bsh, &pdev->pd_rom_found_size)) {
    732 		pci_unmap_rom(pdev, NULL);
    733 		goto fail_md;
    734 	}
    735 
    736 success:
    737 	KASSERT(pdev->pd_rom_found_size <= SIZE_T_MAX);
    738 	*sizep = pdev->pd_rom_found_size;
    739 	pdev->pd_rom_vaddr = bus_space_vaddr(pdev->pd_rom_bst,
    740 	    pdev->pd_rom_found_bsh);
    741 	return pdev->pd_rom_vaddr;
    742 
    743 fail_md:
    744 	return NULL;
    745 }
    746 
    747 void __pci_rom_iomem *
    748 pci_platform_rom(struct pci_dev *pdev __unused, size_t *sizep)
    749 {
    750 
    751 	*sizep = 0;
    752 	return NULL;
    753 }
    754 
    755 int
    756 pci_enable_rom(struct pci_dev *pdev)
    757 {
    758 	const pci_chipset_tag_t pc = pdev->pd_pa.pa_pc;
    759 	const pcitag_t tag = pdev->pd_pa.pa_tag;
    760 	pcireg_t addr;
    761 	int s;
    762 
    763 	/* XXX Don't do anything if the ROM isn't there.  */
    764 
    765 	s = splhigh();
    766 	addr = pci_conf_read(pc, tag, PCI_MAPREG_ROM);
    767 	addr |= PCI_MAPREG_ROM_ENABLE;
    768 	pci_conf_write(pc, tag, PCI_MAPREG_ROM, addr);
    769 	splx(s);
    770 
    771 	return 0;
    772 }
    773 
    774 void
    775 pci_disable_rom(struct pci_dev *pdev)
    776 {
    777 	const pci_chipset_tag_t pc = pdev->pd_pa.pa_pc;
    778 	const pcitag_t tag = pdev->pd_pa.pa_tag;
    779 	pcireg_t addr;
    780 	int s;
    781 
    782 	s = splhigh();
    783 	addr = pci_conf_read(pc, tag, PCI_MAPREG_ROM);
    784 	addr &= ~(pcireg_t)PCI_MAPREG_ROM_ENABLE;
    785 	pci_conf_write(pc, tag, PCI_MAPREG_ROM, addr);
    786 	splx(s);
    787 }
    788 
    789 bus_addr_t
    790 pci_resource_start(struct pci_dev *pdev, unsigned i)
    791 {
    792 
    793 	KASSERT(i < PCI_NUM_RESOURCES);
    794 	return pdev->pd_resources[i].addr;
    795 }
    796 
    797 bus_size_t
    798 pci_resource_len(struct pci_dev *pdev, unsigned i)
    799 {
    800 
    801 	KASSERT(i < PCI_NUM_RESOURCES);
    802 	return pdev->pd_resources[i].size;
    803 }
    804 
    805 bus_addr_t
    806 pci_resource_end(struct pci_dev *pdev, unsigned i)
    807 {
    808 
    809 	return pci_resource_start(pdev, i) + (pci_resource_len(pdev, i) - 1);
    810 }
    811 
    812 int
    813 pci_resource_flags(struct pci_dev *pdev, unsigned i)
    814 {
    815 
    816 	KASSERT(i < PCI_NUM_RESOURCES);
    817 	return pdev->pd_resources[i].flags;
    818 }
    819 
    820 void __pci_iomem *
    821 pci_iomap(struct pci_dev *pdev, unsigned i, bus_size_t size)
    822 {
    823 	int error;
    824 
    825 	KASSERT(i < PCI_NUM_RESOURCES);
    826 	KASSERT(pdev->pd_resources[i].kva == NULL);
    827 
    828 	if (PCI_MAPREG_TYPE(pdev->pd_resources[i].type) != PCI_MAPREG_TYPE_MEM)
    829 		return NULL;
    830 	if (pdev->pd_resources[i].size < size)
    831 		return NULL;
    832 	error = bus_space_map(pdev->pd_pa.pa_memt, pdev->pd_resources[i].addr,
    833 	    size, BUS_SPACE_MAP_LINEAR | pdev->pd_resources[i].flags,
    834 	    &pdev->pd_resources[i].bsh);
    835 	if (error)
    836 		return NULL;
    837 	pdev->pd_resources[i].bst = pdev->pd_pa.pa_memt;
    838 	pdev->pd_resources[i].kva = bus_space_vaddr(pdev->pd_resources[i].bst,
    839 	    pdev->pd_resources[i].bsh);
    840 	pdev->pd_resources[i].mapped = true;
    841 
    842 	return pdev->pd_resources[i].kva;
    843 }
    844 
    845 void
    846 pci_iounmap(struct pci_dev *pdev, void __pci_iomem *kva)
    847 {
    848 	unsigned i;
    849 
    850 	CTASSERT(__arraycount(pdev->pd_resources) == PCI_NUM_RESOURCES);
    851 	for (i = 0; i < PCI_NUM_RESOURCES; i++) {
    852 		if (pdev->pd_resources[i].kva == kva)
    853 			break;
    854 	}
    855 	KASSERT(i < PCI_NUM_RESOURCES);
    856 
    857 	pdev->pd_resources[i].kva = NULL;
    858 	bus_space_unmap(pdev->pd_resources[i].bst, pdev->pd_resources[i].bsh,
    859 	    pdev->pd_resources[i].size);
    860 }
    861 
    862 void
    863 pci_save_state(struct pci_dev *pdev)
    864 {
    865 
    866 	KASSERT(pdev->pd_saved_state == NULL);
    867 	pdev->pd_saved_state = kmem_alloc(sizeof(*pdev->pd_saved_state),
    868 	    KM_SLEEP);
    869 	pci_conf_capture(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
    870 	    pdev->pd_saved_state);
    871 }
    872 
    873 void
    874 pci_restore_state(struct pci_dev *pdev)
    875 {
    876 
    877 	KASSERT(pdev->pd_saved_state != NULL);
    878 	pci_conf_restore(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
    879 	    pdev->pd_saved_state);
    880 	kmem_free(pdev->pd_saved_state, sizeof(*pdev->pd_saved_state));
    881 	pdev->pd_saved_state = NULL;
    882 }
    883 
    884 bool
    885 pci_is_pcie(struct pci_dev *pdev)
    886 {
    887 
    888 	return (pci_find_capability(pdev, PCI_CAP_PCIEXPRESS) != 0);
    889 }
    890 
    891 bool
    892 pci_dma_supported(struct pci_dev *pdev, uintmax_t mask)
    893 {
    894 
    895 	/* XXX Cop-out.  */
    896 	if (mask > DMA_BIT_MASK(32))
    897 		return pci_dma64_available(&pdev->pd_pa);
    898 	else
    899 		return true;
    900 }
    901 
    902 bool
    903 pci_is_thunderbolt_attached(struct pci_dev *pdev)
    904 {
    905 
    906 	/* XXX Cop-out.  */
    907 	return false;
    908 }
    909 
    910 bool
    911 pci_is_root_bus(struct pci_bus *bus)
    912 {
    913 
    914 	return bus->number == 0;
    915 }
    916 
    917 int
    918 pci_domain_nr(struct pci_bus *bus)
    919 {
    920 
    921 	return pci_get_segment(bus->pb_pc);
    922 }
    923 
    924 /*
    925  * We explicitly rename pci_enable/disable_device so that you have to
    926  * review each use of them, since NetBSD's PCI API does _not_ respect
    927  * our local enablecnt here, but there are different parts of NetBSD
    928  * that automatically enable/disable like PMF, so you have to decide
    929  * for each one whether to call it or not.
    930  */
    931 
    932 int
    933 linux_pci_enable_device(struct pci_dev *pdev)
    934 {
    935 	const struct pci_attach_args *pa = &pdev->pd_pa;
    936 	pcireg_t csr;
    937 	int s;
    938 
    939 	if (pdev->pd_enablecnt++)
    940 		return 0;
    941 
    942 	s = splhigh();
    943 	csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
    944 	/* If someone else (firmware) already enabled it, credit them.  */
    945 	if (csr & (PCI_COMMAND_IO_ENABLE|PCI_COMMAND_MEM_ENABLE))
    946 		pdev->pd_enablecnt++;
    947 	csr |= PCI_COMMAND_IO_ENABLE;
    948 	csr |= PCI_COMMAND_MEM_ENABLE;
    949 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, csr);
    950 	splx(s);
    951 
    952 	return 0;
    953 }
    954 
    955 void
    956 linux_pci_disable_device(struct pci_dev *pdev)
    957 {
    958 	const struct pci_attach_args *pa = &pdev->pd_pa;
    959 	pcireg_t csr;
    960 	int s;
    961 
    962 	if (--pdev->pd_enablecnt)
    963 		return;
    964 
    965 	s = splhigh();
    966 	csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
    967 	csr &= ~PCI_COMMAND_IO_ENABLE;
    968 	csr &= ~PCI_COMMAND_MEM_ENABLE;
    969 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, csr);
    970 	splx(s);
    971 }
    972 
    973 void
    974 linux_pci_dev_destroy(struct pci_dev *pdev)
    975 {
    976 	unsigned i;
    977 
    978 	if (pdev->bus->self != NULL) {
    979 		kmem_free(pdev->bus->self, sizeof(*pdev->bus->self));
    980 	}
    981 	if (pdev->bus != NULL) {
    982 		kmem_free(pdev->bus, sizeof(*pdev->bus));
    983 		pdev->bus = NULL;
    984 	}
    985 	if (ISSET(pdev->pd_kludges, NBPCI_KLUDGE_MAP_ROM)) {
    986 		pci_unmap_rom(pdev, pdev->pd_rom_vaddr);
    987 		pdev->pd_rom_vaddr = 0;
    988 	}
    989 	for (i = 0; i < __arraycount(pdev->pd_resources); i++) {
    990 		if (!pdev->pd_resources[i].mapped)
    991 			continue;
    992 		bus_space_unmap(pdev->pd_resources[i].bst,
    993 		    pdev->pd_resources[i].bsh, pdev->pd_resources[i].size);
    994 	}
    995 
    996 	/* There is no way these should be still in use.  */
    997 	KASSERT(pdev->pd_saved_state == NULL);
    998 	KASSERT(pdev->pd_intr_handles == NULL);
    999 }
   1000 
   1001 enum pci_bus_speed
   1002 pcie_get_speed_cap(struct pci_dev *dev)
   1003 {
   1004 	pci_chipset_tag_t pc = dev->pd_pa.pa_pc;
   1005 	pcitag_t tag = dev->pd_pa.pa_tag;
   1006 	pcireg_t lcap, lcap2, xcap;
   1007 	int off;
   1008 
   1009 	/* Must have capabilities. */
   1010 	if (pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS, &off, NULL) == 0)
   1011 		return PCI_SPEED_UNKNOWN;
   1012 
   1013 	/* Only PCIe 3.x has LCAP2. */
   1014 	xcap = pci_conf_read(pc, tag, off + PCIE_XCAP);
   1015 	if (__SHIFTOUT(xcap, PCIE_XCAP_VER_MASK) >= 2) {
   1016 		lcap2 = pci_conf_read(pc, tag, off + PCIE_LCAP2);
   1017 		if (lcap2) {
   1018 			if ((lcap2 & PCIE_LCAP2_SUP_LNKS64) != 0) {
   1019 				return PCIE_SPEED_64_0GT;
   1020 			}
   1021 			if ((lcap2 & PCIE_LCAP2_SUP_LNKS32) != 0) {
   1022 				return PCIE_SPEED_32_0GT;
   1023 			}
   1024 			if ((lcap2 & PCIE_LCAP2_SUP_LNKS16) != 0) {
   1025 				return PCIE_SPEED_16_0GT;
   1026 			}
   1027 			if ((lcap2 & PCIE_LCAP2_SUP_LNKS8) != 0) {
   1028 				return PCIE_SPEED_8_0GT;
   1029 			}
   1030 			if ((lcap2 & PCIE_LCAP2_SUP_LNKS5) != 0) {
   1031 				return PCIE_SPEED_5_0GT;
   1032 			}
   1033 			if ((lcap2 & PCIE_LCAP2_SUP_LNKS2) != 0) {
   1034 				return PCIE_SPEED_2_5GT;
   1035 			}
   1036 		}
   1037 	}
   1038 
   1039 	lcap = pci_conf_read(pc, tag, off + PCIE_LCAP);
   1040 	if ((lcap & PCIE_LCAP_MAX_SPEED) == PCIE_LCAP_MAX_SPEED_64) {
   1041 		return PCIE_SPEED_64_0GT;
   1042 	}
   1043 	if ((lcap & PCIE_LCAP_MAX_SPEED) == PCIE_LCAP_MAX_SPEED_32) {
   1044 		return PCIE_SPEED_32_0GT;
   1045 	}
   1046 	if ((lcap & PCIE_LCAP_MAX_SPEED) == PCIE_LCAP_MAX_SPEED_16) {
   1047 		return PCIE_SPEED_16_0GT;
   1048 	}
   1049 	if ((lcap & PCIE_LCAP_MAX_SPEED) == PCIE_LCAP_MAX_SPEED_8) {
   1050 		return PCIE_SPEED_8_0GT;
   1051 	}
   1052 	if ((lcap & PCIE_LCAP_MAX_SPEED) == PCIE_LCAP_MAX_SPEED_5) {
   1053 		return PCIE_SPEED_5_0GT;
   1054 	}
   1055 	if ((lcap & PCIE_LCAP_MAX_SPEED) == PCIE_LCAP_MAX_SPEED_2) {
   1056 		return PCIE_SPEED_2_5GT;
   1057 	}
   1058 
   1059 	return PCI_SPEED_UNKNOWN;
   1060 }
   1061 
   1062 /*
   1063  * This should walk the tree, it only checks this device currently.
   1064  * It also does not write to limiting_dev (the only caller in drm2
   1065  * currently does not use it.)
   1066  */
   1067 unsigned
   1068 pcie_bandwidth_available(struct pci_dev *dev,
   1069     struct pci_dev **limiting_dev,
   1070     enum pci_bus_speed *speed,
   1071     enum pcie_link_width *width)
   1072 {
   1073 	pci_chipset_tag_t pc = dev->pd_pa.pa_pc;
   1074 	pcitag_t tag = dev->pd_pa.pa_tag;
   1075 	pcireg_t lcsr;
   1076 	unsigned per_line_speed, num_lanes;
   1077 	int off;
   1078 
   1079 	/* Must have capabilities. */
   1080 	if (pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS, &off, NULL) == 0)
   1081 		return 0;
   1082 
   1083 	if (speed)
   1084 		*speed = PCI_SPEED_UNKNOWN;
   1085 	if (width)
   1086 		*width = 0;
   1087 
   1088 	lcsr = pci_conf_read(pc, tag, off + PCIE_LCSR);
   1089 
   1090 	switch (lcsr & PCIE_LCSR_NLW) {
   1091 	case PCIE_LCSR_NLW_X1:
   1092 	case PCIE_LCSR_NLW_X2:
   1093 	case PCIE_LCSR_NLW_X4:
   1094 	case PCIE_LCSR_NLW_X8:
   1095 	case PCIE_LCSR_NLW_X12:
   1096 	case PCIE_LCSR_NLW_X16:
   1097 	case PCIE_LCSR_NLW_X32:
   1098 		num_lanes = __SHIFTOUT(lcsr, PCIE_LCSR_NLW);
   1099 		if (width)
   1100 			*width = num_lanes;
   1101 		break;
   1102 	default:
   1103 		num_lanes = 0;
   1104 		break;
   1105 	}
   1106 
   1107 	switch (__SHIFTOUT(lcsr, PCIE_LCSR_LINKSPEED)) {
   1108 	case PCIE_LCSR_LINKSPEED_2:
   1109 		*speed = PCIE_SPEED_2_5GT;
   1110 		per_line_speed = 2500 * 8 / 10;
   1111 		break;
   1112 	case PCIE_LCSR_LINKSPEED_5:
   1113 		*speed = PCIE_SPEED_5_0GT;
   1114 		per_line_speed = 5000 * 8 / 10;
   1115 		break;
   1116 	case PCIE_LCSR_LINKSPEED_8:
   1117 		*speed = PCIE_SPEED_8_0GT;
   1118 		per_line_speed = 8000 * 128 / 130;
   1119 		break;
   1120 	case PCIE_LCSR_LINKSPEED_16:
   1121 		*speed = PCIE_SPEED_16_0GT;
   1122 		per_line_speed = 16000 * 128 / 130;
   1123 		break;
   1124 	case PCIE_LCSR_LINKSPEED_32:
   1125 		*speed = PCIE_SPEED_32_0GT;
   1126 		per_line_speed = 32000 * 128 / 130;
   1127 		break;
   1128 	case PCIE_LCSR_LINKSPEED_64:
   1129 		*speed = PCIE_SPEED_64_0GT;
   1130 		per_line_speed = 64000 * 128 / 130;
   1131 		break;
   1132 	default:
   1133 		per_line_speed = 0;
   1134 	}
   1135 
   1136 	return num_lanes * per_line_speed;
   1137 }
   1138