1 /* $NetBSD: nouveau_nvkm_subdev_instmem_nv40.c,v 1.11 2021/12/19 12:31:19 riastradh Exp $ */ 2 3 /* 4 * Copyright 2012 Red Hat Inc. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Ben Skeggs 25 */ 26 #include <sys/cdefs.h> 27 __KERNEL_RCSID(0, "$NetBSD: nouveau_nvkm_subdev_instmem_nv40.c,v 1.11 2021/12/19 12:31:19 riastradh Exp $"); 28 29 #define nv40_instmem(p) container_of((p), struct nv40_instmem, base) 30 #include "priv.h" 31 32 #include <core/ramht.h> 33 #include <engine/gr/nv40.h> 34 35 #ifdef __NetBSD__ 36 # define __iomem __nvkm_memory_iomem 37 #endif 38 39 struct nv40_instmem { 40 struct nvkm_instmem base; 41 struct nvkm_mm heap; 42 #ifdef __NetBSD__ 43 bus_space_tag_t iomemt; 44 bus_space_handle_t iomemh; 45 bus_size_t iomemsz; 46 #endif 47 void __iomem *iomem; 48 }; 49 50 /****************************************************************************** 51 * instmem object implementation 52 *****************************************************************************/ 53 #define nv40_instobj(p) container_of((p), struct nv40_instobj, base.memory) 54 55 struct nv40_instobj { 56 struct nvkm_instobj base; 57 struct nv40_instmem *imem; 58 struct nvkm_mm_node *node; 59 }; 60 61 static void 62 nv40_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data) 63 { 64 struct nv40_instobj *iobj = nv40_instobj(memory); 65 #ifdef __NetBSD__ 66 bus_space_write_stream_4(iobj->imem->iomemt, iobj->imem->iomemh, 67 iobj->node->offset + offset, data); 68 #else 69 iowrite32_native(data, iobj->imem->iomem + iobj->node->offset + offset); 70 #endif 71 } 72 73 static u32 74 nv40_instobj_rd32(struct nvkm_memory *memory, u64 offset) 75 { 76 struct nv40_instobj *iobj = nv40_instobj(memory); 77 #ifdef __NetBSD__ 78 return bus_space_read_stream_4(iobj->imem->iomemt, iobj->imem->iomemh, 79 iobj->node->offset + offset); 80 #else 81 return ioread32_native(iobj->imem->iomem + iobj->node->offset + offset); 82 #endif 83 } 84 85 static const struct nvkm_memory_ptrs 86 nv40_instobj_ptrs = { 87 .rd32 = nv40_instobj_rd32, 88 .wr32 = nv40_instobj_wr32, 89 }; 90 91 static void 92 nv40_instobj_release(struct nvkm_memory *memory) 93 { 94 wmb(); 95 } 96 97 static void __iomem * 98 nv40_instobj_acquire(struct nvkm_memory *memory) 99 { 100 struct nv40_instobj *iobj = nv40_instobj(memory); 101 return iobj->imem->iomem + iobj->node->offset; 102 } 103 104 static u64 105 nv40_instobj_size(struct nvkm_memory *memory) 106 { 107 return nv40_instobj(memory)->node->length; 108 } 109 110 static u64 111 nv40_instobj_addr(struct nvkm_memory *memory) 112 { 113 return nv40_instobj(memory)->node->offset; 114 } 115 116 static enum nvkm_memory_target 117 nv40_instobj_target(struct nvkm_memory *memory) 118 { 119 return NVKM_MEM_TARGET_INST; 120 } 121 122 static void * 123 nv40_instobj_dtor(struct nvkm_memory *memory) 124 { 125 struct nv40_instobj *iobj = nv40_instobj(memory); 126 mutex_lock(&iobj->imem->base.subdev.mutex); 127 nvkm_mm_free(&iobj->imem->heap, &iobj->node); 128 mutex_unlock(&iobj->imem->base.subdev.mutex); 129 nvkm_instobj_dtor(&iobj->imem->base, &iobj->base); 130 return iobj; 131 } 132 133 static const struct nvkm_memory_func 134 nv40_instobj_func = { 135 .dtor = nv40_instobj_dtor, 136 .target = nv40_instobj_target, 137 .size = nv40_instobj_size, 138 .addr = nv40_instobj_addr, 139 .acquire = nv40_instobj_acquire, 140 .release = nv40_instobj_release, 141 }; 142 143 static int 144 nv40_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero, 145 struct nvkm_memory **pmemory) 146 { 147 struct nv40_instmem *imem = nv40_instmem(base); 148 struct nv40_instobj *iobj; 149 int ret; 150 151 if (!(iobj = kzalloc(sizeof(*iobj), GFP_KERNEL))) 152 return -ENOMEM; 153 *pmemory = &iobj->base.memory; 154 155 nvkm_instobj_ctor(&nv40_instobj_func, &imem->base, &iobj->base); 156 iobj->base.memory.ptrs = &nv40_instobj_ptrs; 157 iobj->imem = imem; 158 159 mutex_lock(&imem->base.subdev.mutex); 160 ret = nvkm_mm_head(&imem->heap, 0, 1, size, size, 161 align ? align : 1, &iobj->node); 162 mutex_unlock(&imem->base.subdev.mutex); 163 return ret; 164 } 165 166 /****************************************************************************** 167 * instmem subdev implementation 168 *****************************************************************************/ 169 170 static u32 171 nv40_instmem_rd32(struct nvkm_instmem *base, u32 addr) 172 { 173 #ifdef __NetBSD__ 174 struct nv40_instmem *imem = nv40_instmem(base); 175 return bus_space_read_stream_4(imem->iomemt, imem->iomemh, addr); 176 #else 177 return ioread32_native(nv40_instmem(base)->iomem + addr); 178 #endif 179 } 180 181 static void 182 nv40_instmem_wr32(struct nvkm_instmem *base, u32 addr, u32 data) 183 { 184 #ifdef __NetBSD__ 185 struct nv40_instmem *imem = nv40_instmem(base); 186 bus_space_write_stream_4(imem->iomemt, imem->iomemh, addr, data); 187 #else 188 iowrite32_native(data, nv40_instmem(base)->iomem + addr); 189 #endif 190 } 191 192 static int 193 nv40_instmem_oneinit(struct nvkm_instmem *base) 194 { 195 struct nv40_instmem *imem = nv40_instmem(base); 196 struct nvkm_device *device = imem->base.subdev.device; 197 int ret, vs; 198 199 /* PRAMIN aperture maps over the end of vram, reserve enough space 200 * to fit graphics contexts for every channel, the magics come 201 * from engine/gr/nv40.c 202 */ 203 vs = hweight8((nvkm_rd32(device, 0x001540) & 0x0000ff00) >> 8); 204 if (device->chipset == 0x40) imem->base.reserved = 0x6aa0 * vs; 205 else if (device->chipset < 0x43) imem->base.reserved = 0x4f00 * vs; 206 else if (nv44_gr_class(device)) imem->base.reserved = 0x4980 * vs; 207 else imem->base.reserved = 0x4a40 * vs; 208 imem->base.reserved += 16 * 1024; 209 imem->base.reserved *= 32; /* per-channel */ 210 imem->base.reserved += 512 * 1024; /* pci(e)gart table */ 211 imem->base.reserved += 512 * 1024; /* object storage */ 212 imem->base.reserved = round_up(imem->base.reserved, 4096); 213 214 ret = nvkm_mm_init(&imem->heap, 0, 0, imem->base.reserved, 1); 215 if (ret) 216 return ret; 217 218 /* 0x00000-0x10000: reserve for probable vbios image */ 219 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x10000, 0, false, 220 &imem->base.vbios); 221 if (ret) 222 return ret; 223 224 /* 0x10000-0x18000: reserve for RAMHT */ 225 ret = nvkm_ramht_new(device, 0x08000, 0, NULL, &imem->base.ramht); 226 if (ret) 227 return ret; 228 229 /* 0x18000-0x18200: reserve for RAMRO 230 * 0x18200-0x20000: padding 231 */ 232 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x08000, 0, false, 233 &imem->base.ramro); 234 if (ret) 235 return ret; 236 237 /* 0x20000-0x21000: reserve for RAMFC 238 * 0x21000-0x40000: padding and some unknown crap 239 */ 240 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x20000, 0, true, 241 &imem->base.ramfc); 242 if (ret) 243 return ret; 244 245 return 0; 246 } 247 248 static void * 249 nv40_instmem_dtor(struct nvkm_instmem *base) 250 { 251 struct nv40_instmem *imem = nv40_instmem(base); 252 nvkm_memory_unref(&imem->base.ramfc); 253 nvkm_memory_unref(&imem->base.ramro); 254 nvkm_ramht_del(&imem->base.ramht); 255 nvkm_memory_unref(&imem->base.vbios); 256 nvkm_mm_fini(&imem->heap); 257 if (imem->iomem) 258 #ifdef __NetBSD__ 259 bus_space_unmap(imem->iomemt, imem->iomemh, imem->iomemsz); 260 #else 261 iounmap(imem->iomem); 262 #endif 263 return imem; 264 } 265 266 static const struct nvkm_instmem_func 267 nv40_instmem = { 268 .dtor = nv40_instmem_dtor, 269 .oneinit = nv40_instmem_oneinit, 270 .rd32 = nv40_instmem_rd32, 271 .wr32 = nv40_instmem_wr32, 272 .memory_new = nv40_instobj_new, 273 .zero = false, 274 }; 275 276 int 277 nv40_instmem_new(struct nvkm_device *device, int index, 278 struct nvkm_instmem **pimem) 279 { 280 struct nv40_instmem *imem; 281 int bar; 282 283 if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL))) 284 return -ENOMEM; 285 nvkm_instmem_ctor(&nv40_instmem, device, index, &imem->base); 286 *pimem = &imem->base; 287 288 /* map bar */ 289 if (device->func->resource_size(device, 2)) 290 bar = 2; 291 else 292 bar = 3; 293 294 #ifdef __NetBSD__ 295 { 296 bus_addr_t iomembase; 297 bus_size_t iomemsz; 298 int ret; 299 300 imem->iomemt = device->func->resource_tag(device, bar); 301 iomembase = device->func->resource_addr(device, bar); 302 iomemsz = device->func->resource_size(device, bar); 303 /* XXX errno NetBSD->Linux */ 304 ret = -bus_space_map(imem->iomemt, iomembase, iomemsz, 305 BUS_SPACE_MAP_LINEAR|BUS_SPACE_MAP_PREFETCHABLE, &imem->iomemh); 306 if (ret) { 307 nvkm_error(&imem->base.subdev, "unable to map PRAMIN BAR %d" 308 ": %d\n", bar, ret); 309 return ret; 310 } 311 imem->iomemsz = iomemsz; 312 imem->iomem = bus_space_vaddr(imem->iomemt, imem->iomemh); 313 } 314 #else 315 imem->iomem = ioremap_wc(device->func->resource_addr(device, bar), 316 device->func->resource_size(device, bar)); 317 if (!imem->iomem) { 318 nvkm_error(&imem->base.subdev, "unable to map PRAMIN BAR\n"); 319 return -EFAULT; 320 } 321 #endif 322 323 return 0; 324 } 325