Home | History | Annotate | Line # | Download | only in fifo
      1 /*	$NetBSD: nouveau_nvkm_engine_fifo_dmanv04.c,v 1.3 2021/12/18 23:45:35 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright 2012 Red Hat Inc.
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the "Software"),
      8  * to deal in the Software without restriction, including without limitation
      9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     10  * and/or sell copies of the Software, and to permit persons to whom the
     11  * Software is furnished to do so, subject to the following conditions:
     12  *
     13  * The above copyright notice and this permission notice shall be included in
     14  * all copies or substantial portions of the Software.
     15  *
     16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     22  * OTHER DEALINGS IN THE SOFTWARE.
     23  *
     24  * Authors: Ben Skeggs
     25  */
     26 #include <sys/cdefs.h>
     27 __KERNEL_RCSID(0, "$NetBSD: nouveau_nvkm_engine_fifo_dmanv04.c,v 1.3 2021/12/18 23:45:35 riastradh Exp $");
     28 
     29 #include "channv04.h"
     30 #include "regsnv04.h"
     31 
     32 #include <core/client.h>
     33 #include <core/ramht.h>
     34 #include <subdev/instmem.h>
     35 
     36 #include <nvif/class.h>
     37 #include <nvif/cl006b.h>
     38 #include <nvif/unpack.h>
     39 
     40 void
     41 nv04_fifo_dma_object_dtor(struct nvkm_fifo_chan *base, int cookie)
     42 {
     43 	struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
     44 	struct nvkm_instmem *imem = chan->fifo->base.engine.subdev.device->imem;
     45 
     46 	mutex_lock(&chan->fifo->base.engine.subdev.mutex);
     47 	nvkm_ramht_remove(imem->ramht, cookie);
     48 	mutex_unlock(&chan->fifo->base.engine.subdev.mutex);
     49 }
     50 
     51 static int
     52 nv04_fifo_dma_object_ctor(struct nvkm_fifo_chan *base,
     53 			  struct nvkm_object *object)
     54 {
     55 	struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
     56 	struct nvkm_instmem *imem = chan->fifo->base.engine.subdev.device->imem;
     57 	u32 context = 0x80000000 | chan->base.chid << 24;
     58 	u32 handle  = object->handle;
     59 	int hash;
     60 
     61 	switch (object->engine->subdev.index) {
     62 	case NVKM_ENGINE_DMAOBJ:
     63 	case NVKM_ENGINE_SW    : context |= 0x00000000; break;
     64 	case NVKM_ENGINE_GR    : context |= 0x00010000; break;
     65 	case NVKM_ENGINE_MPEG  : context |= 0x00020000; break;
     66 	default:
     67 		WARN_ON(1);
     68 		return -EINVAL;
     69 	}
     70 
     71 	mutex_lock(&chan->fifo->base.engine.subdev.mutex);
     72 	hash = nvkm_ramht_insert(imem->ramht, object, chan->base.chid, 4,
     73 				 handle, context);
     74 	mutex_unlock(&chan->fifo->base.engine.subdev.mutex);
     75 	return hash;
     76 }
     77 
     78 void
     79 nv04_fifo_dma_fini(struct nvkm_fifo_chan *base)
     80 {
     81 	struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
     82 	struct nv04_fifo *fifo = chan->fifo;
     83 	struct nvkm_device *device = fifo->base.engine.subdev.device;
     84 	struct nvkm_memory *fctx = device->imem->ramfc;
     85 	const struct nv04_fifo_ramfc *c;
     86 	unsigned long flags;
     87 	u32 mask = fifo->base.nr - 1;
     88 	u32 data = chan->ramfc;
     89 	u32 chid;
     90 
     91 	/* prevent fifo context switches */
     92 	spin_lock_irqsave(&fifo->base.lock, flags);
     93 	nvkm_wr32(device, NV03_PFIFO_CACHES, 0);
     94 
     95 	/* if this channel is active, replace it with a null context */
     96 	chid = nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH1) & mask;
     97 	if (chid == chan->base.chid) {
     98 		nvkm_mask(device, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0);
     99 		nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 0);
    100 		nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0);
    101 
    102 		c = fifo->ramfc;
    103 		nvkm_kmap(fctx);
    104 		do {
    105 			u32 rm = ((1ULL << c->bits) - 1) << c->regs;
    106 			u32 cm = ((1ULL << c->bits) - 1) << c->ctxs;
    107 			u32 rv = (nvkm_rd32(device, c->regp) &  rm) >> c->regs;
    108 			u32 cv = (nvkm_ro32(fctx, c->ctxp + data) & ~cm);
    109 			nvkm_wo32(fctx, c->ctxp + data, cv | (rv << c->ctxs));
    110 		} while ((++c)->bits);
    111 		nvkm_done(fctx);
    112 
    113 		c = fifo->ramfc;
    114 		do {
    115 			nvkm_wr32(device, c->regp, 0x00000000);
    116 		} while ((++c)->bits);
    117 
    118 		nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, 0);
    119 		nvkm_wr32(device, NV03_PFIFO_CACHE1_PUT, 0);
    120 		nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, mask);
    121 		nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1);
    122 		nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
    123 	}
    124 
    125 	/* restore normal operation, after disabling dma mode */
    126 	nvkm_mask(device, NV04_PFIFO_MODE, 1 << chan->base.chid, 0);
    127 	nvkm_wr32(device, NV03_PFIFO_CACHES, 1);
    128 	spin_unlock_irqrestore(&fifo->base.lock, flags);
    129 }
    130 
    131 void
    132 nv04_fifo_dma_init(struct nvkm_fifo_chan *base)
    133 {
    134 	struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
    135 	struct nv04_fifo *fifo = chan->fifo;
    136 	struct nvkm_device *device = fifo->base.engine.subdev.device;
    137 	u32 mask = 1 << chan->base.chid;
    138 	unsigned long flags;
    139 	spin_lock_irqsave(&fifo->base.lock, flags);
    140 	nvkm_mask(device, NV04_PFIFO_MODE, mask, mask);
    141 	spin_unlock_irqrestore(&fifo->base.lock, flags);
    142 }
    143 
    144 void *
    145 nv04_fifo_dma_dtor(struct nvkm_fifo_chan *base)
    146 {
    147 	struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
    148 	struct nv04_fifo *fifo = chan->fifo;
    149 	struct nvkm_instmem *imem = fifo->base.engine.subdev.device->imem;
    150 	const struct nv04_fifo_ramfc *c = fifo->ramfc;
    151 
    152 	nvkm_kmap(imem->ramfc);
    153 	do {
    154 		nvkm_wo32(imem->ramfc, chan->ramfc + c->ctxp, 0x00000000);
    155 	} while ((++c)->bits);
    156 	nvkm_done(imem->ramfc);
    157 	return chan;
    158 }
    159 
    160 const struct nvkm_fifo_chan_func
    161 nv04_fifo_dma_func = {
    162 	.dtor = nv04_fifo_dma_dtor,
    163 	.init = nv04_fifo_dma_init,
    164 	.fini = nv04_fifo_dma_fini,
    165 	.object_ctor = nv04_fifo_dma_object_ctor,
    166 	.object_dtor = nv04_fifo_dma_object_dtor,
    167 };
    168 
    169 static int
    170 nv04_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
    171 		  void *data, u32 size, struct nvkm_object **pobject)
    172 {
    173 	struct nvkm_object *parent = oclass->parent;
    174 	union {
    175 		struct nv03_channel_dma_v0 v0;
    176 	} *args = data;
    177 	struct nv04_fifo *fifo = nv04_fifo(base);
    178 	struct nv04_fifo_chan *chan = NULL;
    179 	struct nvkm_device *device = fifo->base.engine.subdev.device;
    180 	struct nvkm_instmem *imem = device->imem;
    181 	int ret = -ENOSYS;
    182 
    183 	nvif_ioctl(parent, "create channel dma size %d\n", size);
    184 	if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
    185 		nvif_ioctl(parent, "create channel dma vers %d pushbuf %"PRIx64" "
    186 				   "offset %08x\n", args->v0.version,
    187 			   args->v0.pushbuf, args->v0.offset);
    188 		if (!args->v0.pushbuf)
    189 			return -EINVAL;
    190 	} else
    191 		return ret;
    192 
    193 	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
    194 		return -ENOMEM;
    195 	*pobject = &chan->base.object;
    196 
    197 	ret = nvkm_fifo_chan_ctor(&nv04_fifo_dma_func, &fifo->base,
    198 				  0x1000, 0x1000, false, 0, args->v0.pushbuf,
    199 				  (1ULL << NVKM_ENGINE_DMAOBJ) |
    200 				  (1ULL << NVKM_ENGINE_GR) |
    201 				  (1ULL << NVKM_ENGINE_SW),
    202 				  0, 0x800000, 0x10000, oclass, &chan->base);
    203 	chan->fifo = fifo;
    204 	if (ret)
    205 		return ret;
    206 
    207 	args->v0.chid = chan->base.chid;
    208 	chan->ramfc = chan->base.chid * 32;
    209 
    210 	nvkm_kmap(imem->ramfc);
    211 	nvkm_wo32(imem->ramfc, chan->ramfc + 0x00, args->v0.offset);
    212 	nvkm_wo32(imem->ramfc, chan->ramfc + 0x04, args->v0.offset);
    213 	nvkm_wo32(imem->ramfc, chan->ramfc + 0x08, chan->base.push->addr >> 4);
    214 	nvkm_wo32(imem->ramfc, chan->ramfc + 0x10,
    215 			       NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
    216 			       NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
    217 #ifdef __BIG_ENDIAN
    218 			       NV_PFIFO_CACHE1_BIG_ENDIAN |
    219 #endif
    220 			       NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
    221 	nvkm_done(imem->ramfc);
    222 	return 0;
    223 }
    224 
    225 const struct nvkm_fifo_chan_oclass
    226 nv04_fifo_dma_oclass = {
    227 	.base.oclass = NV03_CHANNEL_DMA,
    228 	.base.minver = 0,
    229 	.base.maxver = 0,
    230 	.ctor = nv04_fifo_dma_new,
    231 };
    232