Home | History | Annotate | Line # | Download | only in dma
      1 /*	$NetBSD: nouveau_nvkm_engine_dma_usernv04.c,v 1.3 2021/12/18 23:45:35 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright 2012 Red Hat Inc.
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the "Software"),
      8  * to deal in the Software without restriction, including without limitation
      9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     10  * and/or sell copies of the Software, and to permit persons to whom the
     11  * Software is furnished to do so, subject to the following conditions:
     12  *
     13  * The above copyright notice and this permission notice shall be included in
     14  * all copies or substantial portions of the Software.
     15  *
     16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     22  * OTHER DEALINGS IN THE SOFTWARE.
     23  *
     24  * Authors: Ben Skeggs
     25  */
     26 #include <sys/cdefs.h>
     27 __KERNEL_RCSID(0, "$NetBSD: nouveau_nvkm_engine_dma_usernv04.c,v 1.3 2021/12/18 23:45:35 riastradh Exp $");
     28 
     29 #define nv04_dmaobj(p) container_of((p), struct nv04_dmaobj, base)
     30 #include "user.h"
     31 
     32 #include <core/gpuobj.h>
     33 #include <subdev/fb.h>
     34 #include <subdev/mmu/vmm.h>
     35 
     36 #include <nvif/class.h>
     37 
     38 struct nv04_dmaobj {
     39 	struct nvkm_dmaobj base;
     40 	bool clone;
     41 	u32 flags0;
     42 	u32 flags2;
     43 };
     44 
     45 static int
     46 nv04_dmaobj_bind(struct nvkm_dmaobj *base, struct nvkm_gpuobj *parent,
     47 		 int align, struct nvkm_gpuobj **pgpuobj)
     48 {
     49 	struct nv04_dmaobj *dmaobj = nv04_dmaobj(base);
     50 	struct nvkm_device *device = dmaobj->base.dma->engine.subdev.device;
     51 	u64 offset = dmaobj->base.start & 0xfffff000;
     52 	u64 adjust = dmaobj->base.start & 0x00000fff;
     53 	u32 length = dmaobj->base.limit - dmaobj->base.start;
     54 	int ret;
     55 
     56 	if (dmaobj->clone) {
     57 		struct nvkm_memory *pgt =
     58 			device->mmu->vmm->pd->pt[0]->memory;
     59 		if (!dmaobj->base.start)
     60 			return nvkm_gpuobj_wrap(pgt, pgpuobj);
     61 		nvkm_kmap(pgt);
     62 		offset  = nvkm_ro32(pgt, 8 + (offset >> 10));
     63 		offset &= 0xfffff000;
     64 		nvkm_done(pgt);
     65 	}
     66 
     67 	ret = nvkm_gpuobj_new(device, 16, align, false, parent, pgpuobj);
     68 	if (ret == 0) {
     69 		nvkm_kmap(*pgpuobj);
     70 		nvkm_wo32(*pgpuobj, 0x00, dmaobj->flags0 | (adjust << 20));
     71 		nvkm_wo32(*pgpuobj, 0x04, length);
     72 		nvkm_wo32(*pgpuobj, 0x08, dmaobj->flags2 | offset);
     73 		nvkm_wo32(*pgpuobj, 0x0c, dmaobj->flags2 | offset);
     74 		nvkm_done(*pgpuobj);
     75 	}
     76 
     77 	return ret;
     78 }
     79 
     80 static const struct nvkm_dmaobj_func
     81 nv04_dmaobj_func = {
     82 	.bind = nv04_dmaobj_bind,
     83 };
     84 
     85 int
     86 nv04_dmaobj_new(struct nvkm_dma *dma, const struct nvkm_oclass *oclass,
     87 		void *data, u32 size, struct nvkm_dmaobj **pdmaobj)
     88 {
     89 	struct nvkm_device *device = dma->engine.subdev.device;
     90 	struct nv04_dmaobj *dmaobj;
     91 	int ret;
     92 
     93 	if (!(dmaobj = kzalloc(sizeof(*dmaobj), GFP_KERNEL)))
     94 		return -ENOMEM;
     95 	*pdmaobj = &dmaobj->base;
     96 
     97 	ret = nvkm_dmaobj_ctor(&nv04_dmaobj_func, dma, oclass,
     98 			       &data, &size, &dmaobj->base);
     99 	if (ret)
    100 		return ret;
    101 
    102 	if (dmaobj->base.target == NV_MEM_TARGET_VM) {
    103 		if (device->mmu->func == &nv04_mmu)
    104 			dmaobj->clone = true;
    105 		dmaobj->base.target = NV_MEM_TARGET_PCI;
    106 		dmaobj->base.access = NV_MEM_ACCESS_RW;
    107 	}
    108 
    109 	dmaobj->flags0 = oclass->base.oclass;
    110 	switch (dmaobj->base.target) {
    111 	case NV_MEM_TARGET_VRAM:
    112 		dmaobj->flags0 |= 0x00003000;
    113 		break;
    114 	case NV_MEM_TARGET_PCI:
    115 		dmaobj->flags0 |= 0x00023000;
    116 		break;
    117 	case NV_MEM_TARGET_PCI_NOSNOOP:
    118 		dmaobj->flags0 |= 0x00033000;
    119 		break;
    120 	default:
    121 		return -EINVAL;
    122 	}
    123 
    124 	switch (dmaobj->base.access) {
    125 	case NV_MEM_ACCESS_RO:
    126 		dmaobj->flags0 |= 0x00004000;
    127 		break;
    128 	case NV_MEM_ACCESS_WO:
    129 		dmaobj->flags0 |= 0x00008000;
    130 		/* fall through */
    131 	case NV_MEM_ACCESS_RW:
    132 		dmaobj->flags2 |= 0x00000002;
    133 		break;
    134 	default:
    135 		return -EINVAL;
    136 	}
    137 
    138 	return 0;
    139 }
    140