Home | History | Annotate | Line # | Download | only in fb
      1 /*	$NetBSD: nouveau_nvkm_subdev_fb_gf100.c,v 1.4 2021/12/18 23:45:39 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright 2012 Red Hat Inc.
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the "Software"),
      8  * to deal in the Software without restriction, including without limitation
      9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     10  * and/or sell copies of the Software, and to permit persons to whom the
     11  * Software is furnished to do so, subject to the following conditions:
     12  *
     13  * The above copyright notice and this permission notice shall be included in
     14  * all copies or substantial portions of the Software.
     15  *
     16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     22  * OTHER DEALINGS IN THE SOFTWARE.
     23  *
     24  * Authors: Ben Skeggs
     25  */
     26 #include <sys/cdefs.h>
     27 __KERNEL_RCSID(0, "$NetBSD: nouveau_nvkm_subdev_fb_gf100.c,v 1.4 2021/12/18 23:45:39 riastradh Exp $");
     28 
     29 #include "gf100.h"
     30 #include "ram.h"
     31 
     32 #include <core/memory.h>
     33 #include <core/option.h>
     34 #include <subdev/therm.h>
     35 
     36 void
     37 gf100_fb_intr(struct nvkm_fb *base)
     38 {
     39 	struct gf100_fb *fb = gf100_fb(base);
     40 	struct nvkm_subdev *subdev = &fb->base.subdev;
     41 	struct nvkm_device *device = subdev->device;
     42 	u32 intr = nvkm_rd32(device, 0x000100);
     43 	if (intr & 0x08000000)
     44 		nvkm_debug(subdev, "PFFB intr\n");
     45 	if (intr & 0x00002000)
     46 		nvkm_debug(subdev, "PBFB intr\n");
     47 }
     48 
     49 int
     50 gf100_fb_oneinit(struct nvkm_fb *base)
     51 {
     52 	struct gf100_fb *fb = gf100_fb(base);
     53 	struct nvkm_device *device = fb->base.subdev.device;
     54 	int ret, size = 1 << (fb->base.page ? fb->base.page : 17);
     55 
     56 	size = nvkm_longopt(device->cfgopt, "MmuDebugBufferSize", size);
     57 	size = max(size, 0x1000);
     58 
     59 	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size, 0x1000,
     60 			      true, &fb->base.mmu_rd);
     61 	if (ret)
     62 		return ret;
     63 
     64 	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size, 0x1000,
     65 			      true, &fb->base.mmu_wr);
     66 	if (ret)
     67 		return ret;
     68 
     69 #ifdef __NetBSD__
     70     {
     71 	const bus_dma_tag_t dmat = device->func->dma_tag(device);
     72 	int nsegs;
     73 	int ret;
     74 
     75 	fb->r100c10_page = NULL; /* paranoia */
     76 	fb->r100c10_kva = NULL;
     77 
     78 	/* XXX errno NetBSD->Linux */
     79 	ret = -bus_dmamem_alloc(dmat, PAGE_SIZE, PAGE_SIZE, 0,
     80 	    &fb->r100c10_seg, 1, &nsegs, BUS_DMA_WAITOK);
     81 	if (ret)
     82 fail0:		return ret;
     83 	KASSERT(nsegs == 1);
     84 
     85 	/* XXX errno NetBSD->Linux */
     86 	ret = -bus_dmamap_create(dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
     87 	    BUS_DMA_WAITOK, &fb->r100c10_page);
     88 	if (ret) {
     89 fail1:		bus_dmamem_free(dmat, &fb->r100c10_seg, 1);
     90 		goto fail0;
     91 	}
     92 
     93 	/* XXX errno NetBSD->Linux */
     94 	ret = -bus_dmamem_map(dmat, &fb->r100c10_seg, 1, PAGE_SIZE,
     95 	    &fb->r100c10_kva, BUS_DMA_WAITOK);
     96 	if (ret) {
     97 fail2:		bus_dmamap_destroy(dmat, fb->r100c10_page);
     98 		goto fail1;
     99 	}
    100 	(void)memset(fb->r100c10_kva, 0, PAGE_SIZE);
    101 
    102 	/* XXX errno NetBSD->Linux */
    103 	ret = -bus_dmamap_load(dmat, fb->r100c10_page, fb->r100c10_kva,
    104 	    PAGE_SIZE, NULL, BUS_DMA_WAITOK);
    105 	if (ret) {
    106 fail3: __unused	bus_dmamem_unmap(dmat, fb->r100c10_kva, PAGE_SIZE);
    107 		goto fail2;
    108 	}
    109 
    110 	fb->r100c10 = fb->r100c10_page->dm_segs[0].ds_addr;
    111     }
    112 #else
    113 	fb->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
    114 	if (fb->r100c10_page) {
    115 		fb->r100c10 = dma_map_page(device->dev, fb->r100c10_page, 0,
    116 					   PAGE_SIZE, DMA_BIDIRECTIONAL);
    117 		if (dma_mapping_error(device->dev, fb->r100c10))
    118 			return -EFAULT;
    119 	}
    120 #endif
    121 	return 0;
    122 }
    123 
    124 int
    125 gf100_fb_init_page(struct nvkm_fb *fb)
    126 {
    127 	struct nvkm_device *device = fb->subdev.device;
    128 	switch (fb->page) {
    129 	case 16: nvkm_mask(device, 0x100c80, 0x00000001, 0x00000001); break;
    130 	case 17: nvkm_mask(device, 0x100c80, 0x00000001, 0x00000000); break;
    131 	default:
    132 		return -EINVAL;
    133 	}
    134 	return 0;
    135 }
    136 
    137 void
    138 gf100_fb_init(struct nvkm_fb *base)
    139 {
    140 	struct gf100_fb *fb = gf100_fb(base);
    141 	struct nvkm_device *device = fb->base.subdev.device;
    142 
    143 	if (fb->r100c10_page)
    144 		nvkm_wr32(device, 0x100c10, fb->r100c10 >> 8);
    145 
    146 	if (base->func->clkgate_pack) {
    147 		nvkm_therm_clkgate_init(device->therm,
    148 					base->func->clkgate_pack);
    149 	}
    150 }
    151 
    152 void *
    153 gf100_fb_dtor(struct nvkm_fb *base)
    154 {
    155 	struct gf100_fb *fb = gf100_fb(base);
    156 	struct nvkm_device *device = fb->base.subdev.device;
    157 
    158 	if (fb->r100c10_page) {
    159 #ifdef __NetBSD__
    160 		const bus_dma_tag_t dmat = device->func->dma_tag(device);
    161 
    162 		bus_dmamap_unload(dmat, fb->r100c10_page);
    163 		bus_dmamem_unmap(dmat, fb->r100c10_kva, PAGE_SIZE);
    164 		bus_dmamap_destroy(dmat, fb->r100c10_page);
    165 		bus_dmamem_free(dmat, &fb->r100c10_seg, 1);
    166 		fb->r100c10_page = NULL;
    167 #else
    168 		dma_unmap_page(device->dev, fb->r100c10, PAGE_SIZE,
    169 			       DMA_BIDIRECTIONAL);
    170 		__free_page(fb->r100c10_page);
    171 #endif
    172 	}
    173 
    174 	return fb;
    175 }
    176 
    177 int
    178 gf100_fb_new_(const struct nvkm_fb_func *func, struct nvkm_device *device,
    179 	      int index, struct nvkm_fb **pfb)
    180 {
    181 	struct gf100_fb *fb;
    182 
    183 	if (!(fb = kzalloc(sizeof(*fb), GFP_KERNEL)))
    184 		return -ENOMEM;
    185 	nvkm_fb_ctor(func, device, index, &fb->base);
    186 	*pfb = &fb->base;
    187 
    188 	return 0;
    189 }
    190 
    191 static const struct nvkm_fb_func
    192 gf100_fb = {
    193 	.dtor = gf100_fb_dtor,
    194 	.oneinit = gf100_fb_oneinit,
    195 	.init = gf100_fb_init,
    196 	.init_page = gf100_fb_init_page,
    197 	.intr = gf100_fb_intr,
    198 	.ram_new = gf100_ram_new,
    199 	.default_bigpage = 17,
    200 };
    201 
    202 int
    203 gf100_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
    204 {
    205 	return gf100_fb_new_(&gf100_fb, device, index, pfb);
    206 }
    207