1 /* $NetBSD: nouveau_nvkm_falcon_v1.c,v 1.2 2021/12/18 23:45:38 riastradh Exp $ */ 2 3 /* 4 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 22 * DEALINGS IN THE SOFTWARE. 23 */ 24 #include <sys/cdefs.h> 25 __KERNEL_RCSID(0, "$NetBSD: nouveau_nvkm_falcon_v1.c,v 1.2 2021/12/18 23:45:38 riastradh Exp $"); 26 27 #include "priv.h" 28 29 #include <core/gpuobj.h> 30 #include <core/memory.h> 31 #include <subdev/timer.h> 32 33 void 34 nvkm_falcon_v1_load_imem(struct nvkm_falcon *falcon, void *data, u32 start, 35 u32 size, u16 tag, u8 port, bool secure) 36 { 37 u8 rem = size % 4; 38 u32 reg; 39 int i; 40 41 size -= rem; 42 43 reg = start | BIT(24) | (secure ? BIT(28) : 0); 44 nvkm_falcon_wr32(falcon, 0x180 + (port * 16), reg); 45 for (i = 0; i < size / 4; i++) { 46 /* write new tag every 256B */ 47 if ((i & 0x3f) == 0) 48 nvkm_falcon_wr32(falcon, 0x188 + (port * 16), tag++); 49 nvkm_falcon_wr32(falcon, 0x184 + (port * 16), ((u32 *)data)[i]); 50 } 51 52 /* 53 * If size is not a multiple of 4, mask the last work to ensure garbage 54 * does not get written 55 */ 56 if (rem) { 57 u32 extra = ((u32 *)data)[i]; 58 59 /* write new tag every 256B */ 60 if ((i & 0x3f) == 0) 61 nvkm_falcon_wr32(falcon, 0x188 + (port * 16), tag++); 62 nvkm_falcon_wr32(falcon, 0x184 + (port * 16), 63 extra & (BIT(rem * 8) - 1)); 64 ++i; 65 } 66 67 /* code must be padded to 0x40 words */ 68 for (; i & 0x3f; i++) 69 nvkm_falcon_wr32(falcon, 0x184 + (port * 16), 0); 70 } 71 72 static void 73 nvkm_falcon_v1_load_emem(struct nvkm_falcon *falcon, void *data, u32 start, 74 u32 size, u8 port) 75 { 76 u8 rem = size % 4; 77 int i; 78 79 size -= rem; 80 81 nvkm_falcon_wr32(falcon, 0xac0 + (port * 8), start | (0x1 << 24)); 82 for (i = 0; i < size / 4; i++) 83 nvkm_falcon_wr32(falcon, 0xac4 + (port * 8), ((u32 *)data)[i]); 84 85 /* 86 * If size is not a multiple of 4, mask the last word to ensure garbage 87 * does not get written 88 */ 89 if (rem) { 90 u32 extra = ((u32 *)data)[i]; 91 92 nvkm_falcon_wr32(falcon, 0xac4 + (port * 8), 93 extra & (BIT(rem * 8) - 1)); 94 } 95 } 96 97 void 98 nvkm_falcon_v1_load_dmem(struct nvkm_falcon *falcon, void *data, u32 start, 99 u32 size, u8 port) 100 { 101 const struct nvkm_falcon_func *func = falcon->func; 102 u8 rem = size % 4; 103 int i; 104 105 if (func->emem_addr && start >= func->emem_addr) 106 return nvkm_falcon_v1_load_emem(falcon, data, 107 start - func->emem_addr, size, 108 port); 109 110 size -= rem; 111 112 nvkm_falcon_wr32(falcon, 0x1c0 + (port * 8), start | (0x1 << 24)); 113 for (i = 0; i < size / 4; i++) 114 nvkm_falcon_wr32(falcon, 0x1c4 + (port * 8), ((u32 *)data)[i]); 115 116 /* 117 * If size is not a multiple of 4, mask the last word to ensure garbage 118 * does not get written 119 */ 120 if (rem) { 121 u32 extra = ((u32 *)data)[i]; 122 123 nvkm_falcon_wr32(falcon, 0x1c4 + (port * 8), 124 extra & (BIT(rem * 8) - 1)); 125 } 126 } 127 128 static void 129 nvkm_falcon_v1_read_emem(struct nvkm_falcon *falcon, u32 start, u32 size, 130 u8 port, void *data) 131 { 132 u8 rem = size % 4; 133 int i; 134 135 size -= rem; 136 137 nvkm_falcon_wr32(falcon, 0xac0 + (port * 8), start | (0x1 << 25)); 138 for (i = 0; i < size / 4; i++) 139 ((u32 *)data)[i] = nvkm_falcon_rd32(falcon, 0xac4 + (port * 8)); 140 141 /* 142 * If size is not a multiple of 4, mask the last word to ensure garbage 143 * does not get read 144 */ 145 if (rem) { 146 u32 extra = nvkm_falcon_rd32(falcon, 0xac4 + (port * 8)); 147 148 for (i = size; i < size + rem; i++) { 149 ((u8 *)data)[i] = (u8)(extra & 0xff); 150 extra >>= 8; 151 } 152 } 153 } 154 155 void 156 nvkm_falcon_v1_read_dmem(struct nvkm_falcon *falcon, u32 start, u32 size, 157 u8 port, void *data) 158 { 159 const struct nvkm_falcon_func *func = falcon->func; 160 u8 rem = size % 4; 161 int i; 162 163 if (func->emem_addr && start >= func->emem_addr) 164 return nvkm_falcon_v1_read_emem(falcon, start - func->emem_addr, 165 size, port, data); 166 167 size -= rem; 168 169 nvkm_falcon_wr32(falcon, 0x1c0 + (port * 8), start | (0x1 << 25)); 170 for (i = 0; i < size / 4; i++) 171 ((u32 *)data)[i] = nvkm_falcon_rd32(falcon, 0x1c4 + (port * 8)); 172 173 /* 174 * If size is not a multiple of 4, mask the last word to ensure garbage 175 * does not get read 176 */ 177 if (rem) { 178 u32 extra = nvkm_falcon_rd32(falcon, 0x1c4 + (port * 8)); 179 180 for (i = size; i < size + rem; i++) { 181 ((u8 *)data)[i] = (u8)(extra & 0xff); 182 extra >>= 8; 183 } 184 } 185 } 186 187 void 188 nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_memory *ctx) 189 { 190 const u32 fbif = falcon->func->fbif; 191 u32 inst_loc; 192 193 /* disable instance block binding */ 194 if (ctx == NULL) { 195 nvkm_falcon_wr32(falcon, 0x10c, 0x0); 196 return; 197 } 198 199 nvkm_falcon_wr32(falcon, 0x10c, 0x1); 200 201 /* setup apertures - virtual */ 202 nvkm_falcon_wr32(falcon, fbif + 4 * FALCON_DMAIDX_UCODE, 0x4); 203 nvkm_falcon_wr32(falcon, fbif + 4 * FALCON_DMAIDX_VIRT, 0x0); 204 /* setup apertures - physical */ 205 nvkm_falcon_wr32(falcon, fbif + 4 * FALCON_DMAIDX_PHYS_VID, 0x4); 206 nvkm_falcon_wr32(falcon, fbif + 4 * FALCON_DMAIDX_PHYS_SYS_COH, 0x5); 207 nvkm_falcon_wr32(falcon, fbif + 4 * FALCON_DMAIDX_PHYS_SYS_NCOH, 0x6); 208 209 /* Set context */ 210 switch (nvkm_memory_target(ctx)) { 211 case NVKM_MEM_TARGET_VRAM: inst_loc = 0; break; 212 case NVKM_MEM_TARGET_HOST: inst_loc = 2; break; 213 case NVKM_MEM_TARGET_NCOH: inst_loc = 3; break; 214 default: 215 WARN_ON(1); 216 return; 217 } 218 219 /* Enable context */ 220 nvkm_falcon_mask(falcon, 0x048, 0x1, 0x1); 221 nvkm_falcon_wr32(falcon, 0x054, 222 ((nvkm_memory_addr(ctx) >> 12) & 0xfffffff) | 223 (inst_loc << 28) | (1 << 30)); 224 225 nvkm_falcon_mask(falcon, 0x090, 0x10000, 0x10000); 226 nvkm_falcon_mask(falcon, 0x0a4, 0x8, 0x8); 227 } 228 229 void 230 nvkm_falcon_v1_set_start_addr(struct nvkm_falcon *falcon, u32 start_addr) 231 { 232 nvkm_falcon_wr32(falcon, 0x104, start_addr); 233 } 234 235 void 236 nvkm_falcon_v1_start(struct nvkm_falcon *falcon) 237 { 238 u32 reg = nvkm_falcon_rd32(falcon, 0x100); 239 240 if (reg & BIT(6)) 241 nvkm_falcon_wr32(falcon, 0x130, 0x2); 242 else 243 nvkm_falcon_wr32(falcon, 0x100, 0x2); 244 } 245 246 int 247 nvkm_falcon_v1_wait_for_halt(struct nvkm_falcon *falcon, u32 ms) 248 { 249 struct nvkm_device *device = falcon->owner->device; 250 int ret; 251 252 ret = nvkm_wait_msec(device, ms, falcon->addr + 0x100, 0x10, 0x10); 253 if (ret < 0) 254 return ret; 255 256 return 0; 257 } 258 259 int 260 nvkm_falcon_v1_clear_interrupt(struct nvkm_falcon *falcon, u32 mask) 261 { 262 struct nvkm_device *device = falcon->owner->device; 263 int ret; 264 265 /* clear interrupt(s) */ 266 nvkm_falcon_mask(falcon, 0x004, mask, mask); 267 /* wait until interrupts are cleared */ 268 ret = nvkm_wait_msec(device, 10, falcon->addr + 0x008, mask, 0x0); 269 if (ret < 0) 270 return ret; 271 272 return 0; 273 } 274 275 static int 276 falcon_v1_wait_idle(struct nvkm_falcon *falcon) 277 { 278 struct nvkm_device *device = falcon->owner->device; 279 int ret; 280 281 ret = nvkm_wait_msec(device, 10, falcon->addr + 0x04c, 0xffff, 0x0); 282 if (ret < 0) 283 return ret; 284 285 return 0; 286 } 287 288 int 289 nvkm_falcon_v1_enable(struct nvkm_falcon *falcon) 290 { 291 struct nvkm_device *device = falcon->owner->device; 292 int ret; 293 294 ret = nvkm_wait_msec(device, 10, falcon->addr + 0x10c, 0x6, 0x0); 295 if (ret < 0) { 296 nvkm_error(falcon->user, "Falcon mem scrubbing timeout\n"); 297 return ret; 298 } 299 300 ret = falcon_v1_wait_idle(falcon); 301 if (ret) 302 return ret; 303 304 /* enable IRQs */ 305 nvkm_falcon_wr32(falcon, 0x010, 0xff); 306 307 return 0; 308 } 309 310 void 311 nvkm_falcon_v1_disable(struct nvkm_falcon *falcon) 312 { 313 /* disable IRQs and wait for any previous code to complete */ 314 nvkm_falcon_wr32(falcon, 0x014, 0xff); 315 falcon_v1_wait_idle(falcon); 316 } 317 318 static const struct nvkm_falcon_func 319 nvkm_falcon_v1 = { 320 .load_imem = nvkm_falcon_v1_load_imem, 321 .load_dmem = nvkm_falcon_v1_load_dmem, 322 .read_dmem = nvkm_falcon_v1_read_dmem, 323 .bind_context = nvkm_falcon_v1_bind_context, 324 .start = nvkm_falcon_v1_start, 325 .wait_for_halt = nvkm_falcon_v1_wait_for_halt, 326 .clear_interrupt = nvkm_falcon_v1_clear_interrupt, 327 .enable = nvkm_falcon_v1_enable, 328 .disable = nvkm_falcon_v1_disable, 329 .set_start_addr = nvkm_falcon_v1_set_start_addr, 330 }; 331 332 int 333 nvkm_falcon_v1_new(struct nvkm_subdev *owner, const char *name, u32 addr, 334 struct nvkm_falcon **pfalcon) 335 { 336 struct nvkm_falcon *falcon; 337 if (!(falcon = *pfalcon = kzalloc(sizeof(*falcon), GFP_KERNEL))) 338 return -ENOMEM; 339 nvkm_falcon_ctor(&nvkm_falcon_v1, owner, name, addr, falcon); 340 return 0; 341 } 342