Home | History | Annotate | Line # | Download | only in device
      1 /*	$NetBSD: nouveau_nvkm_engine_device_tegra.c,v 1.4 2024/04/16 14:34:02 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the "Software"),
      8  * to deal in the Software without restriction, including without limitation
      9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     10  * and/or sell copies of the Software, and to permit persons to whom the
     11  * Software is furnished to do so, subject to the following conditions:
     12  *
     13  * The above copyright notice and this permission notice shall be included in
     14  * all copies or substantial portions of the Software.
     15  *
     16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
     22  * DEALINGS IN THE SOFTWARE.
     23  */
     24 #include <sys/cdefs.h>
     25 __KERNEL_RCSID(0, "$NetBSD: nouveau_nvkm_engine_device_tegra.c,v 1.4 2024/04/16 14:34:02 riastradh Exp $");
     26 
     27 #include <core/tegra.h>
     28 #ifdef CONFIG_NOUVEAU_PLATFORM_DRIVER
     29 #include "priv.h"
     30 
     31 #if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
     32 #include <asm/dma-iommu.h>
     33 #endif
     34 
     35 static int
     36 nvkm_device_tegra_power_up(struct nvkm_device_tegra *tdev)
     37 {
     38 	int ret;
     39 
     40 	if (tdev->vdd) {
     41 		ret = regulator_enable(tdev->vdd);
     42 		if (ret)
     43 			goto err_power;
     44 	}
     45 
     46 	ret = clk_prepare_enable(tdev->clk);
     47 	if (ret)
     48 		goto err_clk;
     49 	if (tdev->clk_ref) {
     50 		ret = clk_prepare_enable(tdev->clk_ref);
     51 		if (ret)
     52 			goto err_clk_ref;
     53 	}
     54 	ret = clk_prepare_enable(tdev->clk_pwr);
     55 	if (ret)
     56 		goto err_clk_pwr;
     57 	clk_set_rate(tdev->clk_pwr, 204000000);
     58 	udelay(10);
     59 
     60 	if (!tdev->pdev->dev.pm_domain) {
     61 		reset_control_assert(tdev->rst);
     62 		udelay(10);
     63 
     64 		ret = tegra_powergate_remove_clamping(TEGRA_POWERGATE_3D);
     65 		if (ret)
     66 			goto err_clamp;
     67 		udelay(10);
     68 
     69 		reset_control_deassert(tdev->rst);
     70 		udelay(10);
     71 	}
     72 
     73 	return 0;
     74 
     75 err_clamp:
     76 	clk_disable_unprepare(tdev->clk_pwr);
     77 err_clk_pwr:
     78 	if (tdev->clk_ref)
     79 		clk_disable_unprepare(tdev->clk_ref);
     80 err_clk_ref:
     81 	clk_disable_unprepare(tdev->clk);
     82 err_clk:
     83 	if (tdev->vdd)
     84 		regulator_disable(tdev->vdd);
     85 err_power:
     86 	return ret;
     87 }
     88 
     89 static int
     90 nvkm_device_tegra_power_down(struct nvkm_device_tegra *tdev)
     91 {
     92 	int ret;
     93 
     94 	clk_disable_unprepare(tdev->clk_pwr);
     95 	if (tdev->clk_ref)
     96 		clk_disable_unprepare(tdev->clk_ref);
     97 	clk_disable_unprepare(tdev->clk);
     98 	udelay(10);
     99 
    100 	if (tdev->vdd) {
    101 		ret = regulator_disable(tdev->vdd);
    102 		if (ret)
    103 			return ret;
    104 	}
    105 
    106 	return 0;
    107 }
    108 
    109 static void
    110 nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev)
    111 {
    112 #if IS_ENABLED(CONFIG_IOMMU_API)
    113 	struct device *dev = &tdev->pdev->dev;
    114 	unsigned long pgsize_bitmap;
    115 	int ret;
    116 
    117 #if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
    118 	if (dev->archdata.mapping) {
    119 		struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
    120 
    121 		arm_iommu_detach_device(dev);
    122 		arm_iommu_release_mapping(mapping);
    123 	}
    124 #endif
    125 
    126 	if (!tdev->func->iommu_bit)
    127 		return;
    128 
    129 	mutex_init(&tdev->iommu.mutex);
    130 
    131 	if (iommu_present(&platform_bus_type)) {
    132 		tdev->iommu.domain = iommu_domain_alloc(&platform_bus_type);
    133 		if (!tdev->iommu.domain)
    134 			goto error;
    135 
    136 		/*
    137 		 * A IOMMU is only usable if it supports page sizes smaller
    138 		 * or equal to the system's PAGE_SIZE, with a preference if
    139 		 * both are equal.
    140 		 */
    141 		pgsize_bitmap = tdev->iommu.domain->ops->pgsize_bitmap;
    142 		if (pgsize_bitmap & PAGE_SIZE) {
    143 			tdev->iommu.pgshift = PAGE_SHIFT;
    144 		} else {
    145 			tdev->iommu.pgshift = fls(pgsize_bitmap & ~PAGE_MASK);
    146 			if (tdev->iommu.pgshift == 0) {
    147 				dev_warn(dev, "unsupported IOMMU page size\n");
    148 				goto free_domain;
    149 			}
    150 			tdev->iommu.pgshift -= 1;
    151 		}
    152 
    153 		ret = iommu_attach_device(tdev->iommu.domain, dev);
    154 		if (ret)
    155 			goto free_domain;
    156 
    157 		ret = nvkm_mm_init(&tdev->iommu.mm, 0, 0,
    158 				   (1ULL << tdev->func->iommu_bit) >>
    159 				   tdev->iommu.pgshift, 1);
    160 		if (ret)
    161 			goto detach_device;
    162 	}
    163 
    164 	return;
    165 
    166 detach_device:
    167 	iommu_detach_device(tdev->iommu.domain, dev);
    168 
    169 free_domain:
    170 	iommu_domain_free(tdev->iommu.domain);
    171 
    172 error:
    173 	tdev->iommu.domain = NULL;
    174 	tdev->iommu.pgshift = 0;
    175 	dev_err(dev, "cannot initialize IOMMU MM\n");
    176 #endif
    177 }
    178 
    179 static void
    180 nvkm_device_tegra_remove_iommu(struct nvkm_device_tegra *tdev)
    181 {
    182 #if IS_ENABLED(CONFIG_IOMMU_API)
    183 	if (tdev->iommu.domain) {
    184 		nvkm_mm_fini(&tdev->iommu.mm);
    185 		iommu_detach_device(tdev->iommu.domain, tdev->device.dev);
    186 		iommu_domain_free(tdev->iommu.domain);
    187 	}
    188 #endif
    189 }
    190 
    191 static struct nvkm_device_tegra *
    192 nvkm_device_tegra(struct nvkm_device *device)
    193 {
    194 	return container_of(device, struct nvkm_device_tegra, device);
    195 }
    196 
    197 static struct resource *
    198 nvkm_device_tegra_resource(struct nvkm_device *device, unsigned bar)
    199 {
    200 	struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
    201 	return platform_get_resource(tdev->pdev, IORESOURCE_MEM, bar);
    202 }
    203 
    204 #ifdef __NetBSD__
    205 static bus_space_tag_t
    206 nvkm_device_tegra_resource_tag(struct nvkm_device *device, unsigned bar)
    207 {
    208 	XXX FIXME!
    209 }
    210 #endif
    211 
    212 static resource_size_t
    213 nvkm_device_tegra_resource_addr(struct nvkm_device *device, unsigned bar)
    214 {
    215 	struct resource *res = nvkm_device_tegra_resource(device, bar);
    216 	return res ? res->start : 0;
    217 }
    218 
    219 static resource_size_t
    220 nvkm_device_tegra_resource_size(struct nvkm_device *device, unsigned bar)
    221 {
    222 	struct resource *res = nvkm_device_tegra_resource(device, bar);
    223 	return res ? resource_size(res) : 0;
    224 }
    225 
    226 static irqreturn_t
    227 nvkm_device_tegra_intr(int irq, void *arg)
    228 {
    229 	struct nvkm_device_tegra *tdev = arg;
    230 	struct nvkm_device *device = &tdev->device;
    231 	bool handled = false;
    232 	nvkm_mc_intr_unarm(device);
    233 	nvkm_mc_intr(device, &handled);
    234 	nvkm_mc_intr_rearm(device);
    235 	return handled ? IRQ_HANDLED : IRQ_NONE;
    236 }
    237 
    238 static void
    239 nvkm_device_tegra_fini(struct nvkm_device *device, bool suspend)
    240 {
    241 	struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
    242 	if (tdev->irq) {
    243 		free_irq(tdev->irq, tdev);
    244 		tdev->irq = 0;
    245 	}
    246 }
    247 
    248 static int
    249 nvkm_device_tegra_init(struct nvkm_device *device)
    250 {
    251 	struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
    252 	int irq, ret;
    253 
    254 	irq = platform_get_irq_byname(tdev->pdev, "stall");
    255 	if (irq < 0)
    256 		return irq;
    257 
    258 	ret = request_irq(irq, nvkm_device_tegra_intr,
    259 			  IRQF_SHARED, "nvkm", tdev);
    260 	if (ret)
    261 		return ret;
    262 
    263 	tdev->irq = irq;
    264 	return 0;
    265 }
    266 
    267 static void *
    268 nvkm_device_tegra_dtor(struct nvkm_device *device)
    269 {
    270 	struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
    271 	nvkm_device_tegra_power_down(tdev);
    272 	nvkm_device_tegra_remove_iommu(tdev);
    273 	return tdev;
    274 }
    275 
    276 static const struct nvkm_device_func
    277 nvkm_device_tegra_func = {
    278 	.tegra = nvkm_device_tegra,
    279 	.dtor = nvkm_device_tegra_dtor,
    280 	.init = nvkm_device_tegra_init,
    281 	.fini = nvkm_device_tegra_fini,
    282 #ifdef __NetBSD__
    283 	.resource_tag = nvkm_device_tegra_resource_tag,
    284 #endif
    285 	.resource_addr = nvkm_device_tegra_resource_addr,
    286 	.resource_size = nvkm_device_tegra_resource_size,
    287 	.cpu_coherent = false,
    288 };
    289 
    290 int
    291 nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
    292 		      struct platform_device *pdev,
    293 		      const char *cfg, const char *dbg,
    294 		      bool detect, bool mmio, u64 subdev_mask,
    295 		      struct nvkm_device **pdevice)
    296 {
    297 	struct nvkm_device_tegra *tdev;
    298 	unsigned long rate;
    299 	int ret;
    300 
    301 	if (!(tdev = kzalloc(sizeof(*tdev), GFP_KERNEL)))
    302 		return -ENOMEM;
    303 
    304 	tdev->func = func;
    305 	tdev->pdev = pdev;
    306 
    307 	if (func->require_vdd) {
    308 		tdev->vdd = devm_regulator_get(&pdev->dev, "vdd");
    309 		if (IS_ERR(tdev->vdd)) {
    310 			ret = PTR_ERR(tdev->vdd);
    311 			goto free;
    312 		}
    313 	}
    314 
    315 	tdev->rst = devm_reset_control_get(&pdev->dev, "gpu");
    316 	if (IS_ERR(tdev->rst)) {
    317 		ret = PTR_ERR(tdev->rst);
    318 		goto free;
    319 	}
    320 
    321 	tdev->clk = devm_clk_get(&pdev->dev, "gpu");
    322 	if (IS_ERR(tdev->clk)) {
    323 		ret = PTR_ERR(tdev->clk);
    324 		goto free;
    325 	}
    326 
    327 	rate = clk_get_rate(tdev->clk);
    328 	if (rate == 0) {
    329 		ret = clk_set_rate(tdev->clk, ULONG_MAX);
    330 		if (ret < 0)
    331 			goto free;
    332 
    333 		rate = clk_get_rate(tdev->clk);
    334 
    335 		dev_dbg(&pdev->dev, "GPU clock set to %lu\n", rate);
    336 	}
    337 
    338 	if (func->require_ref_clk)
    339 		tdev->clk_ref = devm_clk_get(&pdev->dev, "ref");
    340 	if (IS_ERR(tdev->clk_ref)) {
    341 		ret = PTR_ERR(tdev->clk_ref);
    342 		goto free;
    343 	}
    344 
    345 	tdev->clk_pwr = devm_clk_get(&pdev->dev, "pwr");
    346 	if (IS_ERR(tdev->clk_pwr)) {
    347 		ret = PTR_ERR(tdev->clk_pwr);
    348 		goto free;
    349 	}
    350 
    351 	/**
    352 	 * The IOMMU bit defines the upper limit of the GPU-addressable space.
    353 	 */
    354 	ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(tdev->func->iommu_bit));
    355 	if (ret)
    356 		goto free;
    357 
    358 	nvkm_device_tegra_probe_iommu(tdev);
    359 
    360 	ret = nvkm_device_tegra_power_up(tdev);
    361 	if (ret)
    362 		goto remove;
    363 
    364 	tdev->gpu_speedo = tegra_sku_info.gpu_speedo_value;
    365 	tdev->gpu_speedo_id = tegra_sku_info.gpu_speedo_id;
    366 	ret = nvkm_device_ctor(&nvkm_device_tegra_func, NULL, &pdev->dev,
    367 			       NVKM_DEVICE_TEGRA, pdev->id,
    368 #ifdef __NetBSD__
    369 			       /*acpidev*/NULL,
    370 #endif
    371 			       /*name*/NULL,
    372 			       cfg, dbg, detect, mmio, subdev_mask,
    373 			       &tdev->device);
    374 	if (ret)
    375 		goto powerdown;
    376 
    377 	*pdevice = &tdev->device;
    378 
    379 	return 0;
    380 
    381 powerdown:
    382 	nvkm_device_tegra_power_down(tdev);
    383 remove:
    384 	nvkm_device_tegra_remove_iommu(tdev);
    385 free:
    386 	kfree(tdev);
    387 	return ret;
    388 }
    389 #else
    390 int
    391 nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
    392 		      struct platform_device *pdev,
    393 		      const char *cfg, const char *dbg,
    394 		      bool detect, bool mmio, u64 subdev_mask,
    395 		      struct nvkm_device **pdevice)
    396 {
    397 	return -ENOSYS;
    398 }
    399 #endif
    400