1 /* $NetBSD: nouveau_nvkm_engine_fifo_nv50.c,v 1.3 2021/12/18 23:45:35 riastradh Exp $ */ 2 3 /* 4 * Copyright 2012 Red Hat Inc. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Ben Skeggs 25 */ 26 #include <sys/cdefs.h> 27 __KERNEL_RCSID(0, "$NetBSD: nouveau_nvkm_engine_fifo_nv50.c,v 1.3 2021/12/18 23:45:35 riastradh Exp $"); 28 29 #include "nv50.h" 30 #include "channv50.h" 31 32 #include <core/gpuobj.h> 33 34 static void 35 nv50_fifo_runlist_update_locked(struct nv50_fifo *fifo) 36 { 37 struct nvkm_device *device = fifo->base.engine.subdev.device; 38 struct nvkm_memory *cur; 39 int i, p; 40 41 cur = fifo->runlist[fifo->cur_runlist]; 42 fifo->cur_runlist = !fifo->cur_runlist; 43 44 nvkm_kmap(cur); 45 for (i = 0, p = 0; i < fifo->base.nr; i++) { 46 if (nvkm_rd32(device, 0x002600 + (i * 4)) & 0x80000000) 47 nvkm_wo32(cur, p++ * 4, i); 48 } 49 nvkm_done(cur); 50 51 nvkm_wr32(device, 0x0032f4, nvkm_memory_addr(cur) >> 12); 52 nvkm_wr32(device, 0x0032ec, p); 53 nvkm_wr32(device, 0x002500, 0x00000101); 54 } 55 56 void 57 nv50_fifo_runlist_update(struct nv50_fifo *fifo) 58 { 59 mutex_lock(&fifo->base.engine.subdev.mutex); 60 nv50_fifo_runlist_update_locked(fifo); 61 mutex_unlock(&fifo->base.engine.subdev.mutex); 62 } 63 64 int 65 nv50_fifo_oneinit(struct nvkm_fifo *base) 66 { 67 struct nv50_fifo *fifo = nv50_fifo(base); 68 struct nvkm_device *device = fifo->base.engine.subdev.device; 69 int ret; 70 71 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 4, 0x1000, 72 false, &fifo->runlist[0]); 73 if (ret) 74 return ret; 75 76 return nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 4, 0x1000, 77 false, &fifo->runlist[1]); 78 } 79 80 void 81 nv50_fifo_init(struct nvkm_fifo *base) 82 { 83 struct nv50_fifo *fifo = nv50_fifo(base); 84 struct nvkm_device *device = fifo->base.engine.subdev.device; 85 int i; 86 87 nvkm_mask(device, 0x000200, 0x00000100, 0x00000000); 88 nvkm_mask(device, 0x000200, 0x00000100, 0x00000100); 89 nvkm_wr32(device, 0x00250c, 0x6f3cfc34); 90 nvkm_wr32(device, 0x002044, 0x01003fff); 91 92 nvkm_wr32(device, 0x002100, 0xffffffff); 93 nvkm_wr32(device, 0x002140, 0xbfffffff); 94 95 for (i = 0; i < 128; i++) 96 nvkm_wr32(device, 0x002600 + (i * 4), 0x00000000); 97 nv50_fifo_runlist_update_locked(fifo); 98 99 nvkm_wr32(device, 0x003200, 0x00000001); 100 nvkm_wr32(device, 0x003250, 0x00000001); 101 nvkm_wr32(device, 0x002500, 0x00000001); 102 } 103 104 void * 105 nv50_fifo_dtor(struct nvkm_fifo *base) 106 { 107 struct nv50_fifo *fifo = nv50_fifo(base); 108 nvkm_memory_unref(&fifo->runlist[1]); 109 nvkm_memory_unref(&fifo->runlist[0]); 110 return fifo; 111 } 112 113 int 114 nv50_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device, 115 int index, struct nvkm_fifo **pfifo) 116 { 117 struct nv50_fifo *fifo; 118 int ret; 119 120 if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL))) 121 return -ENOMEM; 122 *pfifo = &fifo->base; 123 124 ret = nvkm_fifo_ctor(func, device, index, 128, &fifo->base); 125 if (ret) 126 return ret; 127 128 set_bit(0, fifo->base.mask); /* PIO channel */ 129 set_bit(127, fifo->base.mask); /* inactive channel */ 130 return 0; 131 } 132 133 static const struct nvkm_fifo_func 134 nv50_fifo = { 135 .dtor = nv50_fifo_dtor, 136 .oneinit = nv50_fifo_oneinit, 137 .init = nv50_fifo_init, 138 .intr = nv04_fifo_intr, 139 .pause = nv04_fifo_pause, 140 .start = nv04_fifo_start, 141 .chan = { 142 &nv50_fifo_dma_oclass, 143 &nv50_fifo_gpfifo_oclass, 144 NULL 145 }, 146 }; 147 148 int 149 nv50_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo) 150 { 151 return nv50_fifo_new_(&nv50_fifo, device, index, pfifo); 152 } 153