Home | History | Annotate | Line # | Download | only in pci
      1 /*	$NetBSD: nouveau_nvkm_subdev_pci_base.c,v 1.11 2021/12/19 12:43:45 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright 2015 Red Hat Inc.
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the "Software"),
      8  * to deal in the Software without restriction, including without limitation
      9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     10  * and/or sell copies of the Software, and to permit persons to whom the
     11  * Software is furnished to do so, subject to the following conditions:
     12  *
     13  * The above copyright notice and this permission notice shall be included in
     14  * all copies or substantial portions of the Software.
     15  *
     16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     22  * OTHER DEALINGS IN THE SOFTWARE.
     23  *
     24  * Authors: Ben Skeggs <bskeggs (at) redhat.com>
     25  */
     26 #include <sys/cdefs.h>
     27 __KERNEL_RCSID(0, "$NetBSD: nouveau_nvkm_subdev_pci_base.c,v 1.11 2021/12/19 12:43:45 riastradh Exp $");
     28 
     29 #include "priv.h"
     30 #include "agp.h"
     31 
     32 #include <core/option.h>
     33 #include <core/pci.h>
     34 #include <subdev/mc.h>
     35 
     36 u32
     37 nvkm_pci_rd32(struct nvkm_pci *pci, u16 addr)
     38 {
     39 	return pci->func->rd32(pci, addr);
     40 }
     41 
     42 void
     43 nvkm_pci_wr08(struct nvkm_pci *pci, u16 addr, u8 data)
     44 {
     45 	pci->func->wr08(pci, addr, data);
     46 }
     47 
     48 void
     49 nvkm_pci_wr32(struct nvkm_pci *pci, u16 addr, u32 data)
     50 {
     51 	pci->func->wr32(pci, addr, data);
     52 }
     53 
     54 u32
     55 nvkm_pci_mask(struct nvkm_pci *pci, u16 addr, u32 mask, u32 value)
     56 {
     57 	u32 data = pci->func->rd32(pci, addr);
     58 	pci->func->wr32(pci, addr, (data & ~mask) | value);
     59 	return data;
     60 }
     61 
     62 void
     63 nvkm_pci_rom_shadow(struct nvkm_pci *pci, bool shadow)
     64 {
     65 	u32 data = nvkm_pci_rd32(pci, 0x0050);
     66 	if (shadow)
     67 		data |=  0x00000001;
     68 	else
     69 		data &= ~0x00000001;
     70 	nvkm_pci_wr32(pci, 0x0050, data);
     71 }
     72 
     73 static irqreturn_t
     74 nvkm_pci_intr(DRM_IRQ_ARGS)
     75 {
     76 	struct nvkm_pci *pci = arg;
     77 	struct nvkm_device *device = pci->subdev.device;
     78 	bool handled = false;
     79 
     80 #ifndef __NetBSD__
     81 	if (pci->irq < 0)
     82 		return IRQ_HANDLED;
     83 #endif
     84 
     85 	nvkm_mc_intr_unarm(device);
     86 	if (pci->msi)
     87 		pci->func->msi_rearm(pci);
     88 	nvkm_mc_intr(device, &handled);
     89 	nvkm_mc_intr_rearm(device);
     90 	return handled ? IRQ_HANDLED : IRQ_NONE;
     91 }
     92 
     93 static int
     94 nvkm_pci_fini(struct nvkm_subdev *subdev, bool suspend)
     95 {
     96 	struct nvkm_pci *pci = nvkm_pci(subdev);
     97 
     98 	if (pci->agp.bridge)
     99 		nvkm_agp_fini(pci);
    100 
    101 	return 0;
    102 }
    103 
    104 static int
    105 nvkm_pci_preinit(struct nvkm_subdev *subdev)
    106 {
    107 	struct nvkm_pci *pci = nvkm_pci(subdev);
    108 	if (pci->agp.bridge)
    109 		nvkm_agp_preinit(pci);
    110 	return 0;
    111 }
    112 
    113 static int
    114 nvkm_pci_oneinit(struct nvkm_subdev *subdev)
    115 {
    116 	struct nvkm_pci *pci = nvkm_pci(subdev);
    117 	struct pci_dev *pdev = pci->pdev;
    118 	int ret;
    119 
    120 	if (pci_is_pcie(pci->pdev)) {
    121 		ret = nvkm_pcie_oneinit(pci);
    122 		if (ret)
    123 			return ret;
    124 	}
    125 
    126 #ifdef __NetBSD__
    127     {
    128 	const char *const name = device_xname(pci_dev_dev(pdev));
    129 	const struct pci_attach_args *pa = &pdev->pd_pa;
    130 	const char *intrstr;
    131 	char intrbuf[PCI_INTRSTR_LEN];
    132 
    133 	if (pdev->msi_enabled) {
    134 		if (pdev->pd_intr_handles == NULL) {
    135 			if ((ret = pci_msi_alloc_exact(pa, &pci->pci_ihp,
    136 			    1))) {
    137 				aprint_error_dev(pci_dev_dev(pdev),
    138 				    "couldn't allocate MSI (%s)\n", name);
    139 				/* XXX errno NetBSD->Linux */
    140 				return -ret;
    141 			}
    142 		} else {
    143 			pci->pci_ihp = pdev->pd_intr_handles;
    144 			pdev->pd_intr_handles = NULL;
    145 		}
    146 	} else {
    147 		if ((ret = pci_intx_alloc(pa, &pci->pci_ihp))) {
    148 			aprint_error_dev(pci_dev_dev(pdev),
    149 			    "couldn't allocate INTx interrupt (%s)\n",
    150 			    name);
    151 
    152 			/* XXX errno NetBSD->Linux */
    153 			return -ret;
    154 		}
    155 	}
    156 
    157 	intrstr = pci_intr_string(pa->pa_pc, pci->pci_ihp[0],
    158 	    intrbuf, sizeof(intrbuf));
    159 	pci->pci_intrcookie = pci_intr_establish_xname(pa->pa_pc,
    160 	    pci->pci_ihp[0], IPL_DRM, nvkm_pci_intr, pci,
    161 	    name);
    162 	if (pci->pci_intrcookie == NULL) {
    163 		aprint_error_dev(pci_dev_dev(pdev),
    164 		    "couldn't establish interrupt at %s (%s)\n", intrstr, name);
    165 		pci_intr_release(pa->pa_pc, pci->pci_ihp, 1);
    166 		pci->pci_ihp = NULL;
    167 		return -EIO;	/* XXX er? */
    168 	}
    169 
    170 	aprint_normal_dev(pci_dev_dev(pdev), "interrupting at %s (%s)\n",
    171 	    intrstr, name);
    172     }
    173 #else
    174 	ret = request_irq(pdev->irq, nvkm_pci_intr, IRQF_SHARED, "nvkm", pci);
    175 	if (ret)
    176 		return ret;
    177 
    178 	pci->irq = pdev->irq;
    179 #endif
    180 	return 0;
    181 }
    182 
    183 static int
    184 nvkm_pci_init(struct nvkm_subdev *subdev)
    185 {
    186 	struct nvkm_pci *pci = nvkm_pci(subdev);
    187 	int ret;
    188 
    189 	if (pci->agp.bridge) {
    190 		ret = nvkm_agp_init(pci);
    191 		if (ret)
    192 			return ret;
    193 	} else if (pci_is_pcie(pci->pdev)) {
    194 		nvkm_pcie_init(pci);
    195 	}
    196 
    197 	if (pci->func->init)
    198 		pci->func->init(pci);
    199 
    200 	/* Ensure MSI interrupts are armed, for the case where there are
    201 	 * already interrupts pending (for whatever reason) at load time.
    202 	 */
    203 	if (pci->msi)
    204 		pci->func->msi_rearm(pci);
    205 
    206 	return 0;
    207 }
    208 
    209 static void *
    210 nvkm_pci_dtor(struct nvkm_subdev *subdev)
    211 {
    212 	struct nvkm_pci *pci = nvkm_pci(subdev);
    213 
    214 	nvkm_agp_dtor(pci);
    215 
    216 #ifdef __NetBSD__
    217 	const struct pci_attach_args *pa = &pci->pdev->pd_pa;
    218 	if (pci->pci_intrcookie != NULL) {
    219 		pci_intr_disestablish(pa->pa_pc, pci->pci_intrcookie);
    220 		pci->pci_intrcookie = NULL;
    221 	}
    222 	if (pci->pci_ihp != NULL) {
    223 		pci_intr_release(pa->pa_pc, pci->pci_ihp, 1);
    224 		pci->pci_ihp = NULL;
    225 	}
    226 #else
    227 	if (pci->irq >= 0) {
    228 		/* freq_irq() will call the handler, we use pci->irq == -1
    229 		 * to signal that it's been torn down and should be a noop.
    230 		 */
    231 		int irq = pci->irq;
    232 		pci->irq = -1;
    233 		free_irq(irq, pci);
    234 	}
    235 #endif
    236 
    237 	if (pci->msi)
    238 		pci_disable_msi(pci->pdev);
    239 
    240 	return nvkm_pci(subdev);
    241 }
    242 
    243 static const struct nvkm_subdev_func
    244 nvkm_pci_func = {
    245 	.dtor = nvkm_pci_dtor,
    246 	.oneinit = nvkm_pci_oneinit,
    247 	.preinit = nvkm_pci_preinit,
    248 	.init = nvkm_pci_init,
    249 	.fini = nvkm_pci_fini,
    250 };
    251 
    252 int
    253 nvkm_pci_new_(const struct nvkm_pci_func *func, struct nvkm_device *device,
    254 	      int index, struct nvkm_pci **ppci)
    255 {
    256 	struct nvkm_pci *pci;
    257 
    258 	if (!(pci = *ppci = kzalloc(sizeof(**ppci), GFP_KERNEL)))
    259 		return -ENOMEM;
    260 	nvkm_subdev_ctor(&nvkm_pci_func, device, index, &pci->subdev);
    261 	pci->func = func;
    262 	pci->pdev = device->func->pci(device)->pdev;
    263 #ifndef __NetBSD__
    264 	pci->irq = -1;
    265 #endif
    266 	pci->pcie.speed = -1;
    267 	pci->pcie.width = -1;
    268 
    269 	if (device->type == NVKM_DEVICE_AGP)
    270 		nvkm_agp_ctor(pci);
    271 
    272 	switch (pci->pdev->device & 0x0ff0) {
    273 	case 0x00f0:
    274 	case 0x02e0:
    275 		/* BR02? NFI how these would be handled yet exactly */
    276 		break;
    277 	default:
    278 		switch (device->chipset) {
    279 		case 0x84:	/* G84, no mode switch with MSI */
    280 		case 0xaa:
    281 			/* reported broken, nv also disable it */
    282 			break;
    283 		default:
    284 			pci->msi = true;
    285 			break;
    286 		}
    287 	}
    288 
    289 #ifdef __BIG_ENDIAN
    290 	pci->msi = false;
    291 #endif
    292 
    293 	pci->msi = nvkm_boolopt(device->cfgopt, "NvMSI", pci->msi);
    294 	if (pci->msi && func->msi_rearm) {
    295 		pci->msi = pci_enable_msi(pci->pdev) == 0;
    296 		if (pci->msi)
    297 			nvkm_debug(&pci->subdev, "MSI enabled\n");
    298 	} else {
    299 		pci->msi = false;
    300 	}
    301 
    302 	return 0;
    303 }
    304