1 /* $NetBSD: nouveau_nvkm_engine_fifo_gpfifogk104.c,v 1.3 2021/12/18 23:45:35 riastradh Exp $ */ 2 3 /* 4 * Copyright 2012 Red Hat Inc. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Ben Skeggs 25 */ 26 #include <sys/cdefs.h> 27 __KERNEL_RCSID(0, "$NetBSD: nouveau_nvkm_engine_fifo_gpfifogk104.c,v 1.3 2021/12/18 23:45:35 riastradh Exp $"); 28 29 #include "changk104.h" 30 #include "cgrp.h" 31 32 #include <core/client.h> 33 #include <core/gpuobj.h> 34 #include <subdev/fb.h> 35 #include <subdev/mmu.h> 36 #include <subdev/timer.h> 37 38 #include <nvif/class.h> 39 #include <nvif/cla06f.h> 40 #include <nvif/unpack.h> 41 42 int 43 gk104_fifo_gpfifo_kick_locked(struct gk104_fifo_chan *chan) 44 { 45 struct gk104_fifo *fifo = chan->fifo; 46 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 47 struct nvkm_device *device = subdev->device; 48 struct nvkm_client *client = chan->base.object.client; 49 struct nvkm_fifo_cgrp *cgrp = chan->cgrp; 50 int ret = 0; 51 52 if (cgrp) 53 nvkm_wr32(device, 0x002634, cgrp->id | 0x01000000); 54 else 55 nvkm_wr32(device, 0x002634, chan->base.chid); 56 if (nvkm_msec(device, 2000, 57 if (!(nvkm_rd32(device, 0x002634) & 0x00100000)) 58 break; 59 ) < 0) { 60 nvkm_error(subdev, "%s %d [%s] kick timeout\n", 61 cgrp ? "tsg" : "channel", 62 cgrp ? cgrp->id : chan->base.chid, client->name); 63 nvkm_fifo_recover_chan(&fifo->base, chan->base.chid); 64 ret = -ETIMEDOUT; 65 } 66 return ret; 67 } 68 69 int 70 gk104_fifo_gpfifo_kick(struct gk104_fifo_chan *chan) 71 { 72 int ret; 73 mutex_lock(&chan->base.fifo->engine.subdev.mutex); 74 ret = gk104_fifo_gpfifo_kick_locked(chan); 75 mutex_unlock(&chan->base.fifo->engine.subdev.mutex); 76 return ret; 77 } 78 79 static u32 80 gk104_fifo_gpfifo_engine_addr(struct nvkm_engine *engine) 81 { 82 switch (engine->subdev.index) { 83 case NVKM_ENGINE_SW : 84 case NVKM_ENGINE_CE0...NVKM_ENGINE_CE_LAST: 85 return 0; 86 case NVKM_ENGINE_GR : return 0x0210; 87 case NVKM_ENGINE_SEC : return 0x0220; 88 case NVKM_ENGINE_MSPDEC: return 0x0250; 89 case NVKM_ENGINE_MSPPP : return 0x0260; 90 case NVKM_ENGINE_MSVLD : return 0x0270; 91 case NVKM_ENGINE_VIC : return 0x0280; 92 case NVKM_ENGINE_MSENC : return 0x0290; 93 case NVKM_ENGINE_NVDEC0: return 0x02100270; 94 case NVKM_ENGINE_NVENC0: return 0x02100290; 95 case NVKM_ENGINE_NVENC1: return 0x0210; 96 default: 97 WARN_ON(1); 98 return 0; 99 } 100 } 101 102 static int 103 gk104_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base, 104 struct nvkm_engine *engine, bool suspend) 105 { 106 struct gk104_fifo_chan *chan = gk104_fifo_chan(base); 107 struct nvkm_gpuobj *inst = chan->base.inst; 108 u32 offset = gk104_fifo_gpfifo_engine_addr(engine); 109 int ret; 110 111 ret = gk104_fifo_gpfifo_kick(chan); 112 if (ret && suspend) 113 return ret; 114 115 if (offset) { 116 nvkm_kmap(inst); 117 nvkm_wo32(inst, (offset & 0xffff) + 0x00, 0x00000000); 118 nvkm_wo32(inst, (offset & 0xffff) + 0x04, 0x00000000); 119 if ((offset >>= 16)) { 120 nvkm_wo32(inst, offset + 0x00, 0x00000000); 121 nvkm_wo32(inst, offset + 0x04, 0x00000000); 122 } 123 nvkm_done(inst); 124 } 125 126 return ret; 127 } 128 129 static int 130 gk104_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *base, 131 struct nvkm_engine *engine) 132 { 133 struct gk104_fifo_chan *chan = gk104_fifo_chan(base); 134 struct nvkm_gpuobj *inst = chan->base.inst; 135 u32 offset = gk104_fifo_gpfifo_engine_addr(engine); 136 137 if (offset) { 138 u64 addr = chan->engn[engine->subdev.index].vma->addr; 139 u32 datalo = lower_32_bits(addr) | 0x00000004; 140 u32 datahi = upper_32_bits(addr); 141 nvkm_kmap(inst); 142 nvkm_wo32(inst, (offset & 0xffff) + 0x00, datalo); 143 nvkm_wo32(inst, (offset & 0xffff) + 0x04, datahi); 144 if ((offset >>= 16)) { 145 nvkm_wo32(inst, offset + 0x00, datalo); 146 nvkm_wo32(inst, offset + 0x04, datahi); 147 } 148 nvkm_done(inst); 149 } 150 151 return 0; 152 } 153 154 void 155 gk104_fifo_gpfifo_engine_dtor(struct nvkm_fifo_chan *base, 156 struct nvkm_engine *engine) 157 { 158 struct gk104_fifo_chan *chan = gk104_fifo_chan(base); 159 nvkm_vmm_put(chan->base.vmm, &chan->engn[engine->subdev.index].vma); 160 nvkm_gpuobj_del(&chan->engn[engine->subdev.index].inst); 161 } 162 163 int 164 gk104_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan *base, 165 struct nvkm_engine *engine, 166 struct nvkm_object *object) 167 { 168 struct gk104_fifo_chan *chan = gk104_fifo_chan(base); 169 int engn = engine->subdev.index; 170 int ret; 171 172 if (!gk104_fifo_gpfifo_engine_addr(engine)) 173 return 0; 174 175 ret = nvkm_object_bind(object, NULL, 0, &chan->engn[engn].inst); 176 if (ret) 177 return ret; 178 179 ret = nvkm_vmm_get(chan->base.vmm, 12, chan->engn[engn].inst->size, 180 &chan->engn[engn].vma); 181 if (ret) 182 return ret; 183 184 return nvkm_memory_map(chan->engn[engn].inst, 0, chan->base.vmm, 185 chan->engn[engn].vma, NULL, 0); 186 } 187 188 void 189 gk104_fifo_gpfifo_fini(struct nvkm_fifo_chan *base) 190 { 191 struct gk104_fifo_chan *chan = gk104_fifo_chan(base); 192 struct gk104_fifo *fifo = chan->fifo; 193 struct nvkm_device *device = fifo->base.engine.subdev.device; 194 u32 coff = chan->base.chid * 8; 195 196 if (!list_empty(&chan->head)) { 197 gk104_fifo_runlist_remove(fifo, chan); 198 nvkm_mask(device, 0x800004 + coff, 0x00000800, 0x00000800); 199 gk104_fifo_gpfifo_kick(chan); 200 gk104_fifo_runlist_update(fifo, chan->runl); 201 } 202 203 nvkm_wr32(device, 0x800000 + coff, 0x00000000); 204 } 205 206 void 207 gk104_fifo_gpfifo_init(struct nvkm_fifo_chan *base) 208 { 209 struct gk104_fifo_chan *chan = gk104_fifo_chan(base); 210 struct gk104_fifo *fifo = chan->fifo; 211 struct nvkm_device *device = fifo->base.engine.subdev.device; 212 u32 addr = chan->base.inst->addr >> 12; 213 u32 coff = chan->base.chid * 8; 214 215 nvkm_mask(device, 0x800004 + coff, 0x000f0000, chan->runl << 16); 216 nvkm_wr32(device, 0x800000 + coff, 0x80000000 | addr); 217 218 if (list_empty(&chan->head) && !chan->killed) { 219 gk104_fifo_runlist_insert(fifo, chan); 220 nvkm_mask(device, 0x800004 + coff, 0x00000400, 0x00000400); 221 gk104_fifo_runlist_update(fifo, chan->runl); 222 nvkm_mask(device, 0x800004 + coff, 0x00000400, 0x00000400); 223 } 224 } 225 226 void * 227 gk104_fifo_gpfifo_dtor(struct nvkm_fifo_chan *base) 228 { 229 struct gk104_fifo_chan *chan = gk104_fifo_chan(base); 230 nvkm_memory_unref(&chan->mthd); 231 kfree(chan->cgrp); 232 return chan; 233 } 234 235 const struct nvkm_fifo_chan_func 236 gk104_fifo_gpfifo_func = { 237 .dtor = gk104_fifo_gpfifo_dtor, 238 .init = gk104_fifo_gpfifo_init, 239 .fini = gk104_fifo_gpfifo_fini, 240 .ntfy = gf100_fifo_chan_ntfy, 241 .engine_ctor = gk104_fifo_gpfifo_engine_ctor, 242 .engine_dtor = gk104_fifo_gpfifo_engine_dtor, 243 .engine_init = gk104_fifo_gpfifo_engine_init, 244 .engine_fini = gk104_fifo_gpfifo_engine_fini, 245 }; 246 247 static int 248 gk104_fifo_gpfifo_new_(struct gk104_fifo *fifo, u64 *runlists, u16 *chid, 249 u64 vmm, u64 ioffset, u64 ilength, u64 *inst, bool priv, 250 const struct nvkm_oclass *oclass, 251 struct nvkm_object **pobject) 252 { 253 struct gk104_fifo_chan *chan; 254 int runlist = ffs(*runlists) -1, ret, i; 255 unsigned long engm; 256 u64 subdevs = 0; 257 u64 usermem; 258 259 if (!vmm || runlist < 0 || runlist >= fifo->runlist_nr) 260 return -EINVAL; 261 *runlists = BIT_ULL(runlist); 262 263 engm = fifo->runlist[runlist].engm; 264 for_each_set_bit(i, &engm, fifo->engine_nr) { 265 if (fifo->engine[i].engine) 266 subdevs |= BIT_ULL(fifo->engine[i].engine->subdev.index); 267 } 268 269 if (subdevs & BIT_ULL(NVKM_ENGINE_GR)) 270 subdevs |= BIT_ULL(NVKM_ENGINE_SW); 271 272 /* Allocate the channel. */ 273 if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL))) 274 return -ENOMEM; 275 *pobject = &chan->base.object; 276 chan->fifo = fifo; 277 chan->runl = runlist; 278 INIT_LIST_HEAD(&chan->head); 279 280 ret = nvkm_fifo_chan_ctor(&gk104_fifo_gpfifo_func, &fifo->base, 281 0x1000, 0x1000, true, vmm, 0, subdevs, 282 1, fifo->user.bar->addr, 0x200, 283 oclass, &chan->base); 284 if (ret) 285 return ret; 286 287 *chid = chan->base.chid; 288 *inst = chan->base.inst->addr; 289 290 /* Hack to support GPUs where even individual channels should be 291 * part of a channel group. 292 */ 293 if (fifo->func->cgrp_force) { 294 if (!(chan->cgrp = kmalloc(sizeof(*chan->cgrp), GFP_KERNEL))) 295 return -ENOMEM; 296 chan->cgrp->id = chan->base.chid; 297 INIT_LIST_HEAD(&chan->cgrp->head); 298 INIT_LIST_HEAD(&chan->cgrp->chan); 299 chan->cgrp->chan_nr = 0; 300 } 301 302 /* Clear channel control registers. */ 303 usermem = chan->base.chid * 0x200; 304 ilength = order_base_2(ilength / 8); 305 306 nvkm_kmap(fifo->user.mem); 307 for (i = 0; i < 0x200; i += 4) 308 nvkm_wo32(fifo->user.mem, usermem + i, 0x00000000); 309 nvkm_done(fifo->user.mem); 310 usermem = nvkm_memory_addr(fifo->user.mem) + usermem; 311 312 /* RAMFC */ 313 nvkm_kmap(chan->base.inst); 314 nvkm_wo32(chan->base.inst, 0x08, lower_32_bits(usermem)); 315 nvkm_wo32(chan->base.inst, 0x0c, upper_32_bits(usermem)); 316 nvkm_wo32(chan->base.inst, 0x10, 0x0000face); 317 nvkm_wo32(chan->base.inst, 0x30, 0xfffff902); 318 nvkm_wo32(chan->base.inst, 0x48, lower_32_bits(ioffset)); 319 nvkm_wo32(chan->base.inst, 0x4c, upper_32_bits(ioffset) | 320 (ilength << 16)); 321 nvkm_wo32(chan->base.inst, 0x84, 0x20400000); 322 nvkm_wo32(chan->base.inst, 0x94, 0x30000001); 323 nvkm_wo32(chan->base.inst, 0x9c, 0x00000100); 324 nvkm_wo32(chan->base.inst, 0xac, 0x0000001f); 325 nvkm_wo32(chan->base.inst, 0xe4, priv ? 0x00000020 : 0x00000000); 326 nvkm_wo32(chan->base.inst, 0xe8, chan->base.chid); 327 nvkm_wo32(chan->base.inst, 0xb8, 0xf8000000); 328 nvkm_wo32(chan->base.inst, 0xf8, 0x10003080); /* 0x002310 */ 329 nvkm_wo32(chan->base.inst, 0xfc, 0x10000010); /* 0x002350 */ 330 nvkm_done(chan->base.inst); 331 return 0; 332 } 333 334 int 335 gk104_fifo_gpfifo_new(struct gk104_fifo *fifo, const struct nvkm_oclass *oclass, 336 void *data, u32 size, struct nvkm_object **pobject) 337 { 338 struct nvkm_object *parent = oclass->parent; 339 union { 340 struct kepler_channel_gpfifo_a_v0 v0; 341 } *args = data; 342 int ret = -ENOSYS; 343 344 nvif_ioctl(parent, "create channel gpfifo size %d\n", size); 345 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) { 346 nvif_ioctl(parent, "create channel gpfifo vers %d vmm %"PRIx64" " 347 "ioffset %016"PRIx64" ilength %08x " 348 "runlist %016"PRIx64" priv %d\n", 349 args->v0.version, args->v0.vmm, args->v0.ioffset, 350 args->v0.ilength, args->v0.runlist, args->v0.priv); 351 if (args->v0.priv && !oclass->client->super) 352 return -EINVAL; 353 return gk104_fifo_gpfifo_new_(fifo, 354 &args->v0.runlist, 355 &args->v0.chid, 356 args->v0.vmm, 357 args->v0.ioffset, 358 args->v0.ilength, 359 &args->v0.inst, 360 args->v0.priv, 361 oclass, pobject); 362 } 363 364 return ret; 365 } 366