1 /* $NetBSD: nouveau_nvkm_engine_gr_gf100.c,v 1.7 2021/12/19 11:34:45 riastradh Exp $ */ 2 3 /* 4 * Copyright 2012 Red Hat Inc. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Ben Skeggs 25 */ 26 #include <sys/cdefs.h> 27 __KERNEL_RCSID(0, "$NetBSD: nouveau_nvkm_engine_gr_gf100.c,v 1.7 2021/12/19 11:34:45 riastradh Exp $"); 28 29 #include "gf100.h" 30 #include "ctxgf100.h" 31 #include "fuc/os.h" 32 33 #include <core/client.h> 34 #include <core/firmware.h> 35 #include <core/option.h> 36 #include <subdev/acr.h> 37 #include <subdev/fb.h> 38 #include <subdev/mc.h> 39 #include <subdev/pmu.h> 40 #include <subdev/therm.h> 41 #include <subdev/timer.h> 42 #include <engine/fifo.h> 43 44 #include <nvif/class.h> 45 #include <nvif/cl9097.h> 46 #include <nvif/if900d.h> 47 #include <nvif/unpack.h> 48 49 #include <linux/nbsd-namespace.h> 50 51 /******************************************************************************* 52 * Zero Bandwidth Clear 53 ******************************************************************************/ 54 55 static void 56 gf100_gr_zbc_clear_color(struct gf100_gr *gr, int zbc) 57 { 58 struct nvkm_device *device = gr->base.engine.subdev.device; 59 if (gr->zbc_color[zbc].format) { 60 nvkm_wr32(device, 0x405804, gr->zbc_color[zbc].ds[0]); 61 nvkm_wr32(device, 0x405808, gr->zbc_color[zbc].ds[1]); 62 nvkm_wr32(device, 0x40580c, gr->zbc_color[zbc].ds[2]); 63 nvkm_wr32(device, 0x405810, gr->zbc_color[zbc].ds[3]); 64 } 65 nvkm_wr32(device, 0x405814, gr->zbc_color[zbc].format); 66 nvkm_wr32(device, 0x405820, zbc); 67 nvkm_wr32(device, 0x405824, 0x00000004); /* TRIGGER | WRITE | COLOR */ 68 } 69 70 static int 71 gf100_gr_zbc_color_get(struct gf100_gr *gr, int format, 72 const u32 ds[4], const u32 l2[4]) 73 { 74 struct nvkm_ltc *ltc = gr->base.engine.subdev.device->ltc; 75 int zbc = -ENOSPC, i; 76 77 for (i = ltc->zbc_min; i <= ltc->zbc_max; i++) { 78 if (gr->zbc_color[i].format) { 79 if (gr->zbc_color[i].format != format) 80 continue; 81 if (memcmp(gr->zbc_color[i].ds, ds, sizeof( 82 gr->zbc_color[i].ds))) 83 continue; 84 if (memcmp(gr->zbc_color[i].l2, l2, sizeof( 85 gr->zbc_color[i].l2))) { 86 WARN_ON(1); 87 return -EINVAL; 88 } 89 return i; 90 } else { 91 zbc = (zbc < 0) ? i : zbc; 92 } 93 } 94 95 if (zbc < 0) 96 return zbc; 97 98 memcpy(gr->zbc_color[zbc].ds, ds, sizeof(gr->zbc_color[zbc].ds)); 99 memcpy(gr->zbc_color[zbc].l2, l2, sizeof(gr->zbc_color[zbc].l2)); 100 gr->zbc_color[zbc].format = format; 101 nvkm_ltc_zbc_color_get(ltc, zbc, l2); 102 gr->func->zbc->clear_color(gr, zbc); 103 return zbc; 104 } 105 106 static void 107 gf100_gr_zbc_clear_depth(struct gf100_gr *gr, int zbc) 108 { 109 struct nvkm_device *device = gr->base.engine.subdev.device; 110 if (gr->zbc_depth[zbc].format) 111 nvkm_wr32(device, 0x405818, gr->zbc_depth[zbc].ds); 112 nvkm_wr32(device, 0x40581c, gr->zbc_depth[zbc].format); 113 nvkm_wr32(device, 0x405820, zbc); 114 nvkm_wr32(device, 0x405824, 0x00000005); /* TRIGGER | WRITE | DEPTH */ 115 } 116 117 static int 118 gf100_gr_zbc_depth_get(struct gf100_gr *gr, int format, 119 const u32 ds, const u32 l2) 120 { 121 struct nvkm_ltc *ltc = gr->base.engine.subdev.device->ltc; 122 int zbc = -ENOSPC, i; 123 124 for (i = ltc->zbc_min; i <= ltc->zbc_max; i++) { 125 if (gr->zbc_depth[i].format) { 126 if (gr->zbc_depth[i].format != format) 127 continue; 128 if (gr->zbc_depth[i].ds != ds) 129 continue; 130 if (gr->zbc_depth[i].l2 != l2) { 131 WARN_ON(1); 132 return -EINVAL; 133 } 134 return i; 135 } else { 136 zbc = (zbc < 0) ? i : zbc; 137 } 138 } 139 140 if (zbc < 0) 141 return zbc; 142 143 gr->zbc_depth[zbc].format = format; 144 gr->zbc_depth[zbc].ds = ds; 145 gr->zbc_depth[zbc].l2 = l2; 146 nvkm_ltc_zbc_depth_get(ltc, zbc, l2); 147 gr->func->zbc->clear_depth(gr, zbc); 148 return zbc; 149 } 150 151 const struct gf100_gr_func_zbc 152 gf100_gr_zbc = { 153 .clear_color = gf100_gr_zbc_clear_color, 154 .clear_depth = gf100_gr_zbc_clear_depth, 155 }; 156 157 /******************************************************************************* 158 * Graphics object classes 159 ******************************************************************************/ 160 #define gf100_gr_object(p) container_of((p), struct gf100_gr_object, object) 161 162 struct gf100_gr_object { 163 struct nvkm_object object; 164 struct gf100_gr_chan *chan; 165 }; 166 167 static int 168 gf100_fermi_mthd_zbc_color(struct nvkm_object *object, void *data, u32 size) 169 { 170 struct gf100_gr *gr = gf100_gr(nvkm_gr(object->engine)); 171 union { 172 struct fermi_a_zbc_color_v0 v0; 173 } *args = data; 174 int ret = -ENOSYS; 175 176 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) { 177 switch (args->v0.format) { 178 case FERMI_A_ZBC_COLOR_V0_FMT_ZERO: 179 case FERMI_A_ZBC_COLOR_V0_FMT_UNORM_ONE: 180 case FERMI_A_ZBC_COLOR_V0_FMT_RF32_GF32_BF32_AF32: 181 case FERMI_A_ZBC_COLOR_V0_FMT_R16_G16_B16_A16: 182 case FERMI_A_ZBC_COLOR_V0_FMT_RN16_GN16_BN16_AN16: 183 case FERMI_A_ZBC_COLOR_V0_FMT_RS16_GS16_BS16_AS16: 184 case FERMI_A_ZBC_COLOR_V0_FMT_RU16_GU16_BU16_AU16: 185 case FERMI_A_ZBC_COLOR_V0_FMT_RF16_GF16_BF16_AF16: 186 case FERMI_A_ZBC_COLOR_V0_FMT_A8R8G8B8: 187 case FERMI_A_ZBC_COLOR_V0_FMT_A8RL8GL8BL8: 188 case FERMI_A_ZBC_COLOR_V0_FMT_A2B10G10R10: 189 case FERMI_A_ZBC_COLOR_V0_FMT_AU2BU10GU10RU10: 190 case FERMI_A_ZBC_COLOR_V0_FMT_A8B8G8R8: 191 case FERMI_A_ZBC_COLOR_V0_FMT_A8BL8GL8RL8: 192 case FERMI_A_ZBC_COLOR_V0_FMT_AN8BN8GN8RN8: 193 case FERMI_A_ZBC_COLOR_V0_FMT_AS8BS8GS8RS8: 194 case FERMI_A_ZBC_COLOR_V0_FMT_AU8BU8GU8RU8: 195 case FERMI_A_ZBC_COLOR_V0_FMT_A2R10G10B10: 196 case FERMI_A_ZBC_COLOR_V0_FMT_BF10GF11RF11: 197 ret = gf100_gr_zbc_color_get(gr, args->v0.format, 198 args->v0.ds, 199 args->v0.l2); 200 if (ret >= 0) { 201 args->v0.index = ret; 202 return 0; 203 } 204 break; 205 default: 206 return -EINVAL; 207 } 208 } 209 210 return ret; 211 } 212 213 static int 214 gf100_fermi_mthd_zbc_depth(struct nvkm_object *object, void *data, u32 size) 215 { 216 struct gf100_gr *gr = gf100_gr(nvkm_gr(object->engine)); 217 union { 218 struct fermi_a_zbc_depth_v0 v0; 219 } *args = data; 220 int ret = -ENOSYS; 221 222 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) { 223 switch (args->v0.format) { 224 case FERMI_A_ZBC_DEPTH_V0_FMT_FP32: 225 ret = gf100_gr_zbc_depth_get(gr, args->v0.format, 226 args->v0.ds, 227 args->v0.l2); 228 return (ret >= 0) ? 0 : -ENOSPC; 229 default: 230 return -EINVAL; 231 } 232 } 233 234 return ret; 235 } 236 237 static int 238 gf100_fermi_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size) 239 { 240 nvif_ioctl(object, "fermi mthd %08x\n", mthd); 241 switch (mthd) { 242 case FERMI_A_ZBC_COLOR: 243 return gf100_fermi_mthd_zbc_color(object, data, size); 244 case FERMI_A_ZBC_DEPTH: 245 return gf100_fermi_mthd_zbc_depth(object, data, size); 246 default: 247 break; 248 } 249 return -EINVAL; 250 } 251 252 const struct nvkm_object_func 253 gf100_fermi = { 254 .mthd = gf100_fermi_mthd, 255 }; 256 257 static void 258 gf100_gr_mthd_set_shader_exceptions(struct nvkm_device *device, u32 data) 259 { 260 nvkm_wr32(device, 0x419e44, data ? 0xffffffff : 0x00000000); 261 nvkm_wr32(device, 0x419e4c, data ? 0xffffffff : 0x00000000); 262 } 263 264 static bool 265 gf100_gr_mthd_sw(struct nvkm_device *device, u16 class, u32 mthd, u32 data) 266 { 267 switch (class & 0x00ff) { 268 case 0x97: 269 case 0xc0: 270 switch (mthd) { 271 case 0x1528: 272 gf100_gr_mthd_set_shader_exceptions(device, data); 273 return true; 274 default: 275 break; 276 } 277 break; 278 default: 279 break; 280 } 281 return false; 282 } 283 284 static const struct nvkm_object_func 285 gf100_gr_object_func = { 286 }; 287 288 static int 289 gf100_gr_object_new(const struct nvkm_oclass *oclass, void *data, u32 size, 290 struct nvkm_object **pobject) 291 { 292 struct gf100_gr_chan *chan = gf100_gr_chan(oclass->parent); 293 struct gf100_gr_object *object; 294 295 if (!(object = kzalloc(sizeof(*object), GFP_KERNEL))) 296 return -ENOMEM; 297 *pobject = &object->object; 298 299 nvkm_object_ctor(oclass->base.func ? oclass->base.func : 300 &gf100_gr_object_func, oclass, &object->object); 301 object->chan = chan; 302 return 0; 303 } 304 305 static int 306 gf100_gr_object_get(struct nvkm_gr *base, int index, struct nvkm_sclass *sclass) 307 { 308 struct gf100_gr *gr = gf100_gr(base); 309 int c = 0; 310 311 while (gr->func->sclass[c].oclass) { 312 if (c++ == index) { 313 *sclass = gr->func->sclass[index]; 314 sclass->ctor = gf100_gr_object_new; 315 return index; 316 } 317 } 318 319 return c; 320 } 321 322 /******************************************************************************* 323 * PGRAPH context 324 ******************************************************************************/ 325 326 static int 327 gf100_gr_chan_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent, 328 int align, struct nvkm_gpuobj **pgpuobj) 329 { 330 struct gf100_gr_chan *chan = gf100_gr_chan(object); 331 struct gf100_gr *gr = chan->gr; 332 int ret, i; 333 334 ret = nvkm_gpuobj_new(gr->base.engine.subdev.device, gr->size, 335 align, false, parent, pgpuobj); 336 if (ret) 337 return ret; 338 339 nvkm_kmap(*pgpuobj); 340 for (i = 0; i < gr->size; i += 4) 341 nvkm_wo32(*pgpuobj, i, gr->data[i / 4]); 342 343 if (!gr->firmware) { 344 nvkm_wo32(*pgpuobj, 0x00, chan->mmio_nr / 2); 345 nvkm_wo32(*pgpuobj, 0x04, chan->mmio_vma->addr >> 8); 346 } else { 347 nvkm_wo32(*pgpuobj, 0xf4, 0); 348 nvkm_wo32(*pgpuobj, 0xf8, 0); 349 nvkm_wo32(*pgpuobj, 0x10, chan->mmio_nr / 2); 350 nvkm_wo32(*pgpuobj, 0x14, lower_32_bits(chan->mmio_vma->addr)); 351 nvkm_wo32(*pgpuobj, 0x18, upper_32_bits(chan->mmio_vma->addr)); 352 nvkm_wo32(*pgpuobj, 0x1c, 1); 353 nvkm_wo32(*pgpuobj, 0x20, 0); 354 nvkm_wo32(*pgpuobj, 0x28, 0); 355 nvkm_wo32(*pgpuobj, 0x2c, 0); 356 } 357 nvkm_done(*pgpuobj); 358 return 0; 359 } 360 361 static void * 362 gf100_gr_chan_dtor(struct nvkm_object *object) 363 { 364 struct gf100_gr_chan *chan = gf100_gr_chan(object); 365 int i; 366 367 for (i = 0; i < ARRAY_SIZE(chan->data); i++) { 368 nvkm_vmm_put(chan->vmm, &chan->data[i].vma); 369 nvkm_memory_unref(&chan->data[i].mem); 370 } 371 372 nvkm_vmm_put(chan->vmm, &chan->mmio_vma); 373 nvkm_memory_unref(&chan->mmio); 374 nvkm_vmm_unref(&chan->vmm); 375 return chan; 376 } 377 378 static const struct nvkm_object_func 379 gf100_gr_chan = { 380 .dtor = gf100_gr_chan_dtor, 381 .bind = gf100_gr_chan_bind, 382 }; 383 384 static int 385 gf100_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch, 386 const struct nvkm_oclass *oclass, 387 struct nvkm_object **pobject) 388 { 389 struct gf100_gr *gr = gf100_gr(base); 390 struct gf100_gr_data *data = gr->mmio_data; 391 struct gf100_gr_mmio *mmio = gr->mmio_list; 392 struct gf100_gr_chan *chan; 393 struct gf100_vmm_map_v0 args = { .priv = 1 }; 394 struct nvkm_device *device = gr->base.engine.subdev.device; 395 int ret, i; 396 397 if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL))) 398 return -ENOMEM; 399 nvkm_object_ctor(&gf100_gr_chan, oclass, &chan->object); 400 chan->gr = gr; 401 chan->vmm = nvkm_vmm_ref(fifoch->vmm); 402 *pobject = &chan->object; 403 404 /* allocate memory for a "mmio list" buffer that's used by the HUB 405 * fuc to modify some per-context register settings on first load 406 * of the context. 407 */ 408 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x100, 409 false, &chan->mmio); 410 if (ret) 411 return ret; 412 413 ret = nvkm_vmm_get(fifoch->vmm, 12, 0x1000, &chan->mmio_vma); 414 if (ret) 415 return ret; 416 417 ret = nvkm_memory_map(chan->mmio, 0, fifoch->vmm, 418 chan->mmio_vma, &args, sizeof(args)); 419 if (ret) 420 return ret; 421 422 /* allocate buffers referenced by mmio list */ 423 for (i = 0; data->size && i < ARRAY_SIZE(gr->mmio_data); i++) { 424 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 425 data->size, data->align, false, 426 &chan->data[i].mem); 427 if (ret) 428 return ret; 429 430 ret = nvkm_vmm_get(fifoch->vmm, 12, 431 nvkm_memory_size(chan->data[i].mem), 432 &chan->data[i].vma); 433 if (ret) 434 return ret; 435 436 args.priv = data->priv; 437 438 ret = nvkm_memory_map(chan->data[i].mem, 0, chan->vmm, 439 chan->data[i].vma, &args, sizeof(args)); 440 if (ret) 441 return ret; 442 443 data++; 444 } 445 446 /* finally, fill in the mmio list and point the context at it */ 447 nvkm_kmap(chan->mmio); 448 for (i = 0; mmio->addr && i < ARRAY_SIZE(gr->mmio_list); i++) { 449 u32 addr = mmio->addr; 450 u32 data = mmio->data; 451 452 if (mmio->buffer >= 0) { 453 u64 info = chan->data[mmio->buffer].vma->addr; 454 data |= info >> mmio->shift; 455 } 456 457 nvkm_wo32(chan->mmio, chan->mmio_nr++ * 4, addr); 458 nvkm_wo32(chan->mmio, chan->mmio_nr++ * 4, data); 459 mmio++; 460 } 461 nvkm_done(chan->mmio); 462 return 0; 463 } 464 465 /******************************************************************************* 466 * PGRAPH register lists 467 ******************************************************************************/ 468 469 const struct gf100_gr_init 470 gf100_gr_init_main_0[] = { 471 { 0x400080, 1, 0x04, 0x003083c2 }, 472 { 0x400088, 1, 0x04, 0x00006fe7 }, 473 { 0x40008c, 1, 0x04, 0x00000000 }, 474 { 0x400090, 1, 0x04, 0x00000030 }, 475 { 0x40013c, 1, 0x04, 0x013901f7 }, 476 { 0x400140, 1, 0x04, 0x00000100 }, 477 { 0x400144, 1, 0x04, 0x00000000 }, 478 { 0x400148, 1, 0x04, 0x00000110 }, 479 { 0x400138, 1, 0x04, 0x00000000 }, 480 { 0x400130, 2, 0x04, 0x00000000 }, 481 { 0x400124, 1, 0x04, 0x00000002 }, 482 {} 483 }; 484 485 const struct gf100_gr_init 486 gf100_gr_init_fe_0[] = { 487 { 0x40415c, 1, 0x04, 0x00000000 }, 488 { 0x404170, 1, 0x04, 0x00000000 }, 489 {} 490 }; 491 492 const struct gf100_gr_init 493 gf100_gr_init_pri_0[] = { 494 { 0x404488, 2, 0x04, 0x00000000 }, 495 {} 496 }; 497 498 const struct gf100_gr_init 499 gf100_gr_init_rstr2d_0[] = { 500 { 0x407808, 1, 0x04, 0x00000000 }, 501 {} 502 }; 503 504 const struct gf100_gr_init 505 gf100_gr_init_pd_0[] = { 506 { 0x406024, 1, 0x04, 0x00000000 }, 507 {} 508 }; 509 510 const struct gf100_gr_init 511 gf100_gr_init_ds_0[] = { 512 { 0x405844, 1, 0x04, 0x00ffffff }, 513 { 0x405850, 1, 0x04, 0x00000000 }, 514 { 0x405908, 1, 0x04, 0x00000000 }, 515 {} 516 }; 517 518 const struct gf100_gr_init 519 gf100_gr_init_scc_0[] = { 520 { 0x40803c, 1, 0x04, 0x00000000 }, 521 {} 522 }; 523 524 const struct gf100_gr_init 525 gf100_gr_init_prop_0[] = { 526 { 0x4184a0, 1, 0x04, 0x00000000 }, 527 {} 528 }; 529 530 const struct gf100_gr_init 531 gf100_gr_init_gpc_unk_0[] = { 532 { 0x418604, 1, 0x04, 0x00000000 }, 533 { 0x418680, 1, 0x04, 0x00000000 }, 534 { 0x418714, 1, 0x04, 0x80000000 }, 535 { 0x418384, 1, 0x04, 0x00000000 }, 536 {} 537 }; 538 539 const struct gf100_gr_init 540 gf100_gr_init_setup_0[] = { 541 { 0x418814, 3, 0x04, 0x00000000 }, 542 {} 543 }; 544 545 const struct gf100_gr_init 546 gf100_gr_init_crstr_0[] = { 547 { 0x418b04, 1, 0x04, 0x00000000 }, 548 {} 549 }; 550 551 const struct gf100_gr_init 552 gf100_gr_init_setup_1[] = { 553 { 0x4188c8, 1, 0x04, 0x80000000 }, 554 { 0x4188cc, 1, 0x04, 0x00000000 }, 555 { 0x4188d0, 1, 0x04, 0x00010000 }, 556 { 0x4188d4, 1, 0x04, 0x00000001 }, 557 {} 558 }; 559 560 const struct gf100_gr_init 561 gf100_gr_init_zcull_0[] = { 562 { 0x418910, 1, 0x04, 0x00010001 }, 563 { 0x418914, 1, 0x04, 0x00000301 }, 564 { 0x418918, 1, 0x04, 0x00800000 }, 565 { 0x418980, 1, 0x04, 0x77777770 }, 566 { 0x418984, 3, 0x04, 0x77777777 }, 567 {} 568 }; 569 570 const struct gf100_gr_init 571 gf100_gr_init_gpm_0[] = { 572 { 0x418c04, 1, 0x04, 0x00000000 }, 573 { 0x418c88, 1, 0x04, 0x00000000 }, 574 {} 575 }; 576 577 const struct gf100_gr_init 578 gf100_gr_init_gpc_unk_1[] = { 579 { 0x418d00, 1, 0x04, 0x00000000 }, 580 { 0x418f08, 1, 0x04, 0x00000000 }, 581 { 0x418e00, 1, 0x04, 0x00000050 }, 582 { 0x418e08, 1, 0x04, 0x00000000 }, 583 {} 584 }; 585 586 const struct gf100_gr_init 587 gf100_gr_init_gcc_0[] = { 588 { 0x41900c, 1, 0x04, 0x00000000 }, 589 { 0x419018, 1, 0x04, 0x00000000 }, 590 {} 591 }; 592 593 const struct gf100_gr_init 594 gf100_gr_init_tpccs_0[] = { 595 { 0x419d08, 2, 0x04, 0x00000000 }, 596 { 0x419d10, 1, 0x04, 0x00000014 }, 597 {} 598 }; 599 600 const struct gf100_gr_init 601 gf100_gr_init_tex_0[] = { 602 { 0x419ab0, 1, 0x04, 0x00000000 }, 603 { 0x419ab8, 1, 0x04, 0x000000e7 }, 604 { 0x419abc, 2, 0x04, 0x00000000 }, 605 {} 606 }; 607 608 const struct gf100_gr_init 609 gf100_gr_init_pe_0[] = { 610 { 0x41980c, 3, 0x04, 0x00000000 }, 611 { 0x419844, 1, 0x04, 0x00000000 }, 612 { 0x41984c, 1, 0x04, 0x00005bc5 }, 613 { 0x419850, 4, 0x04, 0x00000000 }, 614 {} 615 }; 616 617 const struct gf100_gr_init 618 gf100_gr_init_l1c_0[] = { 619 { 0x419c98, 1, 0x04, 0x00000000 }, 620 { 0x419ca8, 1, 0x04, 0x80000000 }, 621 { 0x419cb4, 1, 0x04, 0x00000000 }, 622 { 0x419cb8, 1, 0x04, 0x00008bf4 }, 623 { 0x419cbc, 1, 0x04, 0x28137606 }, 624 { 0x419cc0, 2, 0x04, 0x00000000 }, 625 {} 626 }; 627 628 const struct gf100_gr_init 629 gf100_gr_init_wwdx_0[] = { 630 { 0x419bd4, 1, 0x04, 0x00800000 }, 631 { 0x419bdc, 1, 0x04, 0x00000000 }, 632 {} 633 }; 634 635 const struct gf100_gr_init 636 gf100_gr_init_tpccs_1[] = { 637 { 0x419d2c, 1, 0x04, 0x00000000 }, 638 {} 639 }; 640 641 const struct gf100_gr_init 642 gf100_gr_init_mpc_0[] = { 643 { 0x419c0c, 1, 0x04, 0x00000000 }, 644 {} 645 }; 646 647 static const struct gf100_gr_init 648 gf100_gr_init_sm_0[] = { 649 { 0x419e00, 1, 0x04, 0x00000000 }, 650 { 0x419ea0, 1, 0x04, 0x00000000 }, 651 { 0x419ea4, 1, 0x04, 0x00000100 }, 652 { 0x419ea8, 1, 0x04, 0x00001100 }, 653 { 0x419eac, 1, 0x04, 0x11100702 }, 654 { 0x419eb0, 1, 0x04, 0x00000003 }, 655 { 0x419eb4, 4, 0x04, 0x00000000 }, 656 { 0x419ec8, 1, 0x04, 0x06060618 }, 657 { 0x419ed0, 1, 0x04, 0x0eff0e38 }, 658 { 0x419ed4, 1, 0x04, 0x011104f1 }, 659 { 0x419edc, 1, 0x04, 0x00000000 }, 660 { 0x419f00, 1, 0x04, 0x00000000 }, 661 { 0x419f2c, 1, 0x04, 0x00000000 }, 662 {} 663 }; 664 665 const struct gf100_gr_init 666 gf100_gr_init_be_0[] = { 667 { 0x40880c, 1, 0x04, 0x00000000 }, 668 { 0x408910, 9, 0x04, 0x00000000 }, 669 { 0x408950, 1, 0x04, 0x00000000 }, 670 { 0x408954, 1, 0x04, 0x0000ffff }, 671 { 0x408984, 1, 0x04, 0x00000000 }, 672 { 0x408988, 1, 0x04, 0x08040201 }, 673 { 0x40898c, 1, 0x04, 0x80402010 }, 674 {} 675 }; 676 677 const struct gf100_gr_init 678 gf100_gr_init_fe_1[] = { 679 { 0x4040f0, 1, 0x04, 0x00000000 }, 680 {} 681 }; 682 683 const struct gf100_gr_init 684 gf100_gr_init_pe_1[] = { 685 { 0x419880, 1, 0x04, 0x00000002 }, 686 {} 687 }; 688 689 static const struct gf100_gr_pack 690 gf100_gr_pack_mmio[] = { 691 { gf100_gr_init_main_0 }, 692 { gf100_gr_init_fe_0 }, 693 { gf100_gr_init_pri_0 }, 694 { gf100_gr_init_rstr2d_0 }, 695 { gf100_gr_init_pd_0 }, 696 { gf100_gr_init_ds_0 }, 697 { gf100_gr_init_scc_0 }, 698 { gf100_gr_init_prop_0 }, 699 { gf100_gr_init_gpc_unk_0 }, 700 { gf100_gr_init_setup_0 }, 701 { gf100_gr_init_crstr_0 }, 702 { gf100_gr_init_setup_1 }, 703 { gf100_gr_init_zcull_0 }, 704 { gf100_gr_init_gpm_0 }, 705 { gf100_gr_init_gpc_unk_1 }, 706 { gf100_gr_init_gcc_0 }, 707 { gf100_gr_init_tpccs_0 }, 708 { gf100_gr_init_tex_0 }, 709 { gf100_gr_init_pe_0 }, 710 { gf100_gr_init_l1c_0 }, 711 { gf100_gr_init_wwdx_0 }, 712 { gf100_gr_init_tpccs_1 }, 713 { gf100_gr_init_mpc_0 }, 714 { gf100_gr_init_sm_0 }, 715 { gf100_gr_init_be_0 }, 716 { gf100_gr_init_fe_1 }, 717 { gf100_gr_init_pe_1 }, 718 {} 719 }; 720 721 /******************************************************************************* 722 * PGRAPH engine/subdev functions 723 ******************************************************************************/ 724 725 static u32 726 gf100_gr_ctxsw_inst(struct nvkm_gr *gr) 727 { 728 return nvkm_rd32(gr->engine.subdev.device, 0x409b00); 729 } 730 731 static int 732 gf100_gr_fecs_ctrl_ctxsw(struct gf100_gr *gr, u32 mthd) 733 { 734 struct nvkm_device *device = gr->base.engine.subdev.device; 735 736 nvkm_wr32(device, 0x409804, 0xffffffff); 737 nvkm_wr32(device, 0x409840, 0xffffffff); 738 nvkm_wr32(device, 0x409500, 0xffffffff); 739 nvkm_wr32(device, 0x409504, mthd); 740 nvkm_msec(device, 2000, 741 u32 stat = nvkm_rd32(device, 0x409804); 742 if (stat == 0x00000002) 743 return -EIO; 744 if (stat == 0x00000001) 745 return 0; 746 ); 747 748 return -ETIMEDOUT; 749 } 750 751 static int 752 gf100_gr_fecs_start_ctxsw(struct nvkm_gr *base) 753 { 754 struct gf100_gr *gr = gf100_gr(base); 755 int ret = 0; 756 757 mutex_lock(&gr->fecs.mutex); 758 if (!--gr->fecs.disable) { 759 if (WARN_ON(ret = gf100_gr_fecs_ctrl_ctxsw(gr, 0x39))) 760 gr->fecs.disable++; 761 } 762 mutex_unlock(&gr->fecs.mutex); 763 return ret; 764 } 765 766 static int 767 gf100_gr_fecs_stop_ctxsw(struct nvkm_gr *base) 768 { 769 struct gf100_gr *gr = gf100_gr(base); 770 int ret = 0; 771 772 mutex_lock(&gr->fecs.mutex); 773 if (!gr->fecs.disable++) { 774 if (WARN_ON(ret = gf100_gr_fecs_ctrl_ctxsw(gr, 0x38))) 775 gr->fecs.disable--; 776 } 777 mutex_unlock(&gr->fecs.mutex); 778 return ret; 779 } 780 781 int 782 gf100_gr_fecs_bind_pointer(struct gf100_gr *gr, u32 inst) 783 { 784 struct nvkm_device *device = gr->base.engine.subdev.device; 785 786 nvkm_wr32(device, 0x409840, 0x00000030); 787 nvkm_wr32(device, 0x409500, inst); 788 nvkm_wr32(device, 0x409504, 0x00000003); 789 nvkm_msec(device, 2000, 790 u32 stat = nvkm_rd32(device, 0x409800); 791 if (stat & 0x00000020) 792 return -EIO; 793 if (stat & 0x00000010) 794 return 0; 795 ); 796 797 return -ETIMEDOUT; 798 } 799 800 static int 801 gf100_gr_fecs_set_reglist_virtual_address(struct gf100_gr *gr, u64 addr) 802 { 803 struct nvkm_device *device = gr->base.engine.subdev.device; 804 805 nvkm_wr32(device, 0x409810, addr >> 8); 806 nvkm_wr32(device, 0x409800, 0x00000000); 807 nvkm_wr32(device, 0x409500, 0x00000001); 808 nvkm_wr32(device, 0x409504, 0x00000032); 809 nvkm_msec(device, 2000, 810 if (nvkm_rd32(device, 0x409800) == 0x00000001) 811 return 0; 812 ); 813 814 return -ETIMEDOUT; 815 } 816 817 static int 818 gf100_gr_fecs_set_reglist_bind_instance(struct gf100_gr *gr, u32 inst) 819 { 820 struct nvkm_device *device = gr->base.engine.subdev.device; 821 822 nvkm_wr32(device, 0x409810, inst); 823 nvkm_wr32(device, 0x409800, 0x00000000); 824 nvkm_wr32(device, 0x409500, 0x00000001); 825 nvkm_wr32(device, 0x409504, 0x00000031); 826 nvkm_msec(device, 2000, 827 if (nvkm_rd32(device, 0x409800) == 0x00000001) 828 return 0; 829 ); 830 831 return -ETIMEDOUT; 832 } 833 834 static int 835 gf100_gr_fecs_discover_reglist_image_size(struct gf100_gr *gr, u32 *psize) 836 { 837 struct nvkm_device *device = gr->base.engine.subdev.device; 838 839 nvkm_wr32(device, 0x409800, 0x00000000); 840 nvkm_wr32(device, 0x409500, 0x00000001); 841 nvkm_wr32(device, 0x409504, 0x00000030); 842 nvkm_msec(device, 2000, 843 if ((*psize = nvkm_rd32(device, 0x409800))) 844 return 0; 845 ); 846 847 return -ETIMEDOUT; 848 } 849 850 static int 851 gf100_gr_fecs_elpg_bind(struct gf100_gr *gr) 852 { 853 u32 size; 854 int ret; 855 856 ret = gf100_gr_fecs_discover_reglist_image_size(gr, &size); 857 if (ret) 858 return ret; 859 860 /*XXX: We need to allocate + map the above into PMU's inst block, 861 * which which means we probably need a proper PMU before we 862 * even bother. 863 */ 864 865 ret = gf100_gr_fecs_set_reglist_bind_instance(gr, 0); 866 if (ret) 867 return ret; 868 869 return gf100_gr_fecs_set_reglist_virtual_address(gr, 0); 870 } 871 872 static int 873 gf100_gr_fecs_discover_pm_image_size(struct gf100_gr *gr, u32 *psize) 874 { 875 struct nvkm_device *device = gr->base.engine.subdev.device; 876 877 nvkm_wr32(device, 0x409840, 0xffffffff); 878 nvkm_wr32(device, 0x409500, 0x00000000); 879 nvkm_wr32(device, 0x409504, 0x00000025); 880 nvkm_msec(device, 2000, 881 if ((*psize = nvkm_rd32(device, 0x409800))) 882 return 0; 883 ); 884 885 return -ETIMEDOUT; 886 } 887 888 static int 889 gf100_gr_fecs_discover_zcull_image_size(struct gf100_gr *gr, u32 *psize) 890 { 891 struct nvkm_device *device = gr->base.engine.subdev.device; 892 893 nvkm_wr32(device, 0x409840, 0xffffffff); 894 nvkm_wr32(device, 0x409500, 0x00000000); 895 nvkm_wr32(device, 0x409504, 0x00000016); 896 nvkm_msec(device, 2000, 897 if ((*psize = nvkm_rd32(device, 0x409800))) 898 return 0; 899 ); 900 901 return -ETIMEDOUT; 902 } 903 904 static int 905 gf100_gr_fecs_discover_image_size(struct gf100_gr *gr, u32 *psize) 906 { 907 struct nvkm_device *device = gr->base.engine.subdev.device; 908 909 nvkm_wr32(device, 0x409840, 0xffffffff); 910 nvkm_wr32(device, 0x409500, 0x00000000); 911 nvkm_wr32(device, 0x409504, 0x00000010); 912 nvkm_msec(device, 2000, 913 if ((*psize = nvkm_rd32(device, 0x409800))) 914 return 0; 915 ); 916 917 return -ETIMEDOUT; 918 } 919 920 static void 921 gf100_gr_fecs_set_watchdog_timeout(struct gf100_gr *gr, u32 timeout) 922 { 923 struct nvkm_device *device = gr->base.engine.subdev.device; 924 925 nvkm_wr32(device, 0x409840, 0xffffffff); 926 nvkm_wr32(device, 0x409500, timeout); 927 nvkm_wr32(device, 0x409504, 0x00000021); 928 } 929 930 static bool 931 gf100_gr_chsw_load(struct nvkm_gr *base) 932 { 933 struct gf100_gr *gr = gf100_gr(base); 934 if (!gr->firmware) { 935 u32 trace = nvkm_rd32(gr->base.engine.subdev.device, 0x40981c); 936 if (trace & 0x00000040) 937 return true; 938 } else { 939 u32 mthd = nvkm_rd32(gr->base.engine.subdev.device, 0x409808); 940 if (mthd & 0x00080000) 941 return true; 942 } 943 return false; 944 } 945 946 int 947 gf100_gr_rops(struct gf100_gr *gr) 948 { 949 struct nvkm_device *device = gr->base.engine.subdev.device; 950 return (nvkm_rd32(device, 0x409604) & 0x001f0000) >> 16; 951 } 952 953 void 954 gf100_gr_zbc_init(struct gf100_gr *gr) 955 { 956 const u32 zero[] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 957 0x00000000, 0x00000000, 0x00000000, 0x00000000 }; 958 const u32 one[] = { 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 959 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff }; 960 const u32 f32_0[] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 961 0x00000000, 0x00000000, 0x00000000, 0x00000000 }; 962 const u32 f32_1[] = { 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, 963 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000 }; 964 struct nvkm_ltc *ltc = gr->base.engine.subdev.device->ltc; 965 int index, c = ltc->zbc_min, d = ltc->zbc_min, s = ltc->zbc_min; 966 967 if (!gr->zbc_color[0].format) { 968 gf100_gr_zbc_color_get(gr, 1, & zero[0], &zero[4]); c++; 969 gf100_gr_zbc_color_get(gr, 2, & one[0], &one[4]); c++; 970 gf100_gr_zbc_color_get(gr, 4, &f32_0[0], &f32_0[4]); c++; 971 gf100_gr_zbc_color_get(gr, 4, &f32_1[0], &f32_1[4]); c++; 972 gf100_gr_zbc_depth_get(gr, 1, 0x00000000, 0x00000000); d++; 973 gf100_gr_zbc_depth_get(gr, 1, 0x3f800000, 0x3f800000); d++; 974 if (gr->func->zbc->stencil_get) { 975 gr->func->zbc->stencil_get(gr, 1, 0x00, 0x00); s++; 976 gr->func->zbc->stencil_get(gr, 1, 0x01, 0x01); s++; 977 gr->func->zbc->stencil_get(gr, 1, 0xff, 0xff); s++; 978 } 979 } 980 981 for (index = c; index <= ltc->zbc_max; index++) 982 gr->func->zbc->clear_color(gr, index); 983 for (index = d; index <= ltc->zbc_max; index++) 984 gr->func->zbc->clear_depth(gr, index); 985 986 if (gr->func->zbc->clear_stencil) { 987 for (index = s; index <= ltc->zbc_max; index++) 988 gr->func->zbc->clear_stencil(gr, index); 989 } 990 } 991 992 /** 993 * Wait until GR goes idle. GR is considered idle if it is disabled by the 994 * MC (0x200) register, or GR is not busy and a context switch is not in 995 * progress. 996 */ 997 int 998 gf100_gr_wait_idle(struct gf100_gr *gr) 999 { 1000 struct nvkm_subdev *subdev = &gr->base.engine.subdev; 1001 struct nvkm_device *device = subdev->device; 1002 unsigned long end_jiffies = jiffies + msecs_to_jiffies(2000); 1003 bool gr_enabled, ctxsw_active, gr_busy; 1004 1005 do { 1006 /* 1007 * required to make sure FIFO_ENGINE_STATUS (0x2640) is 1008 * up-to-date 1009 */ 1010 nvkm_rd32(device, 0x400700); 1011 1012 gr_enabled = nvkm_rd32(device, 0x200) & 0x1000; 1013 ctxsw_active = nvkm_rd32(device, 0x2640) & 0x8000; 1014 gr_busy = nvkm_rd32(device, 0x40060c) & 0x1; 1015 1016 if (!gr_enabled || (!gr_busy && !ctxsw_active)) 1017 return 0; 1018 } while (time_before(jiffies, end_jiffies)); 1019 1020 nvkm_error(subdev, 1021 "wait for idle timeout (en: %d, ctxsw: %d, busy: %d)\n", 1022 gr_enabled, ctxsw_active, gr_busy); 1023 return -EAGAIN; 1024 } 1025 1026 void 1027 gf100_gr_mmio(struct gf100_gr *gr, const struct gf100_gr_pack *p) 1028 { 1029 struct nvkm_device *device = gr->base.engine.subdev.device; 1030 const struct gf100_gr_pack *pack; 1031 const struct gf100_gr_init *init; 1032 1033 pack_for_each_init(init, pack, p) { 1034 u32 next = init->addr + init->count * init->pitch; 1035 u32 addr = init->addr; 1036 while (addr < next) { 1037 nvkm_wr32(device, addr, init->data); 1038 addr += init->pitch; 1039 } 1040 } 1041 } 1042 1043 void 1044 gf100_gr_icmd(struct gf100_gr *gr, const struct gf100_gr_pack *p) 1045 { 1046 struct nvkm_device *device = gr->base.engine.subdev.device; 1047 const struct gf100_gr_pack *pack; 1048 const struct gf100_gr_init *init; 1049 u32 data = 0; 1050 1051 nvkm_wr32(device, 0x400208, 0x80000000); 1052 1053 pack_for_each_init(init, pack, p) { 1054 u32 next = init->addr + init->count * init->pitch; 1055 u32 addr = init->addr; 1056 1057 if ((pack == p && init == p->init) || data != init->data) { 1058 nvkm_wr32(device, 0x400204, init->data); 1059 data = init->data; 1060 } 1061 1062 while (addr < next) { 1063 nvkm_wr32(device, 0x400200, addr); 1064 /** 1065 * Wait for GR to go idle after submitting a 1066 * GO_IDLE bundle 1067 */ 1068 if ((addr & 0xffff) == 0xe100) 1069 gf100_gr_wait_idle(gr); 1070 nvkm_msec(device, 2000, 1071 if (!(nvkm_rd32(device, 0x400700) & 0x00000004)) 1072 break; 1073 ); 1074 addr += init->pitch; 1075 } 1076 } 1077 1078 nvkm_wr32(device, 0x400208, 0x00000000); 1079 } 1080 1081 void 1082 gf100_gr_mthd(struct gf100_gr *gr, const struct gf100_gr_pack *p) 1083 { 1084 struct nvkm_device *device = gr->base.engine.subdev.device; 1085 const struct gf100_gr_pack *pack; 1086 const struct gf100_gr_init *init; 1087 u32 data = 0; 1088 1089 pack_for_each_init(init, pack, p) { 1090 u32 ctrl = 0x80000000 | pack->type; 1091 u32 next = init->addr + init->count * init->pitch; 1092 u32 addr = init->addr; 1093 1094 if ((pack == p && init == p->init) || data != init->data) { 1095 nvkm_wr32(device, 0x40448c, init->data); 1096 data = init->data; 1097 } 1098 1099 while (addr < next) { 1100 nvkm_wr32(device, 0x404488, ctrl | (addr << 14)); 1101 addr += init->pitch; 1102 } 1103 } 1104 } 1105 1106 u64 1107 gf100_gr_units(struct nvkm_gr *base) 1108 { 1109 struct gf100_gr *gr = gf100_gr(base); 1110 u64 cfg; 1111 1112 cfg = (u32)gr->gpc_nr; 1113 cfg |= (u32)gr->tpc_total << 8; 1114 cfg |= (u64)gr->rop_nr << 32; 1115 1116 return cfg; 1117 } 1118 1119 static const struct nvkm_bitfield gf100_dispatch_error[] = { 1120 { 0x00000001, "INJECTED_BUNDLE_ERROR" }, 1121 { 0x00000002, "CLASS_SUBCH_MISMATCH" }, 1122 { 0x00000004, "SUBCHSW_DURING_NOTIFY" }, 1123 {} 1124 }; 1125 1126 static const struct nvkm_bitfield gf100_m2mf_error[] = { 1127 { 0x00000001, "PUSH_TOO_MUCH_DATA" }, 1128 { 0x00000002, "PUSH_NOT_ENOUGH_DATA" }, 1129 {} 1130 }; 1131 1132 static const struct nvkm_bitfield gf100_unk6_error[] = { 1133 { 0x00000001, "TEMP_TOO_SMALL" }, 1134 {} 1135 }; 1136 1137 static const struct nvkm_bitfield gf100_ccache_error[] = { 1138 { 0x00000001, "INTR" }, 1139 { 0x00000002, "LDCONST_OOB" }, 1140 {} 1141 }; 1142 1143 static const struct nvkm_bitfield gf100_macro_error[] = { 1144 { 0x00000001, "TOO_FEW_PARAMS" }, 1145 { 0x00000002, "TOO_MANY_PARAMS" }, 1146 { 0x00000004, "ILLEGAL_OPCODE" }, 1147 { 0x00000008, "DOUBLE_BRANCH" }, 1148 { 0x00000010, "WATCHDOG" }, 1149 {} 1150 }; 1151 1152 static const struct nvkm_bitfield gk104_sked_error[] = { 1153 { 0x00000040, "CTA_RESUME" }, 1154 { 0x00000080, "CONSTANT_BUFFER_SIZE" }, 1155 { 0x00000200, "LOCAL_MEMORY_SIZE_POS" }, 1156 { 0x00000400, "LOCAL_MEMORY_SIZE_NEG" }, 1157 { 0x00000800, "WARP_CSTACK_SIZE" }, 1158 { 0x00001000, "TOTAL_TEMP_SIZE" }, 1159 { 0x00002000, "REGISTER_COUNT" }, 1160 { 0x00040000, "TOTAL_THREADS" }, 1161 { 0x00100000, "PROGRAM_OFFSET" }, 1162 { 0x00200000, "SHARED_MEMORY_SIZE" }, 1163 { 0x00800000, "CTA_THREAD_DIMENSION_ZERO" }, 1164 { 0x01000000, "MEMORY_WINDOW_OVERLAP" }, 1165 { 0x02000000, "SHARED_CONFIG_TOO_SMALL" }, 1166 { 0x04000000, "TOTAL_REGISTER_COUNT" }, 1167 {} 1168 }; 1169 1170 static const struct nvkm_bitfield gf100_gpc_rop_error[] = { 1171 { 0x00000002, "RT_PITCH_OVERRUN" }, 1172 { 0x00000010, "RT_WIDTH_OVERRUN" }, 1173 { 0x00000020, "RT_HEIGHT_OVERRUN" }, 1174 { 0x00000080, "ZETA_STORAGE_TYPE_MISMATCH" }, 1175 { 0x00000100, "RT_STORAGE_TYPE_MISMATCH" }, 1176 { 0x00000400, "RT_LINEAR_MISMATCH" }, 1177 {} 1178 }; 1179 1180 static void 1181 gf100_gr_trap_gpc_rop(struct gf100_gr *gr, int gpc) 1182 { 1183 struct nvkm_subdev *subdev = &gr->base.engine.subdev; 1184 struct nvkm_device *device = subdev->device; 1185 char error[128]; 1186 u32 trap[4]; 1187 1188 trap[0] = nvkm_rd32(device, GPC_UNIT(gpc, 0x0420)) & 0x3fffffff; 1189 trap[1] = nvkm_rd32(device, GPC_UNIT(gpc, 0x0434)); 1190 trap[2] = nvkm_rd32(device, GPC_UNIT(gpc, 0x0438)); 1191 trap[3] = nvkm_rd32(device, GPC_UNIT(gpc, 0x043c)); 1192 1193 nvkm_snprintbf(error, sizeof(error), gf100_gpc_rop_error, trap[0]); 1194 1195 nvkm_error(subdev, "GPC%d/PROP trap: %08x [%s] x = %u, y = %u, " 1196 "format = %x, storage type = %x\n", 1197 gpc, trap[0], error, trap[1] & 0xffff, trap[1] >> 16, 1198 (trap[2] >> 8) & 0x3f, trap[3] & 0xff); 1199 nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000); 1200 } 1201 1202 const struct nvkm_enum gf100_mp_warp_error[] = { 1203 { 0x01, "STACK_ERROR" }, 1204 { 0x02, "API_STACK_ERROR" }, 1205 { 0x03, "RET_EMPTY_STACK_ERROR" }, 1206 { 0x04, "PC_WRAP" }, 1207 { 0x05, "MISALIGNED_PC" }, 1208 { 0x06, "PC_OVERFLOW" }, 1209 { 0x07, "MISALIGNED_IMMC_ADDR" }, 1210 { 0x08, "MISALIGNED_REG" }, 1211 { 0x09, "ILLEGAL_INSTR_ENCODING" }, 1212 { 0x0a, "ILLEGAL_SPH_INSTR_COMBO" }, 1213 { 0x0b, "ILLEGAL_INSTR_PARAM" }, 1214 { 0x0c, "INVALID_CONST_ADDR" }, 1215 { 0x0d, "OOR_REG" }, 1216 { 0x0e, "OOR_ADDR" }, 1217 { 0x0f, "MISALIGNED_ADDR" }, 1218 { 0x10, "INVALID_ADDR_SPACE" }, 1219 { 0x11, "ILLEGAL_INSTR_PARAM2" }, 1220 { 0x12, "INVALID_CONST_ADDR_LDC" }, 1221 { 0x13, "GEOMETRY_SM_ERROR" }, 1222 { 0x14, "DIVERGENT" }, 1223 { 0x15, "WARP_EXIT" }, 1224 {} 1225 }; 1226 1227 const struct nvkm_bitfield gf100_mp_global_error[] = { 1228 { 0x00000001, "SM_TO_SM_FAULT" }, 1229 { 0x00000002, "L1_ERROR" }, 1230 { 0x00000004, "MULTIPLE_WARP_ERRORS" }, 1231 { 0x00000008, "PHYSICAL_STACK_OVERFLOW" }, 1232 { 0x00000010, "BPT_INT" }, 1233 { 0x00000020, "BPT_PAUSE" }, 1234 { 0x00000040, "SINGLE_STEP_COMPLETE" }, 1235 { 0x20000000, "ECC_SEC_ERROR" }, 1236 { 0x40000000, "ECC_DED_ERROR" }, 1237 { 0x80000000, "TIMEOUT" }, 1238 {} 1239 }; 1240 1241 void 1242 gf100_gr_trap_mp(struct gf100_gr *gr, int gpc, int tpc) 1243 { 1244 struct nvkm_subdev *subdev = &gr->base.engine.subdev; 1245 struct nvkm_device *device = subdev->device; 1246 u32 werr = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x648)); 1247 u32 gerr = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x650)); 1248 const struct nvkm_enum *warp; 1249 char glob[128]; 1250 1251 nvkm_snprintbf(glob, sizeof(glob), gf100_mp_global_error, gerr); 1252 warp = nvkm_enum_find(gf100_mp_warp_error, werr & 0xffff); 1253 1254 nvkm_error(subdev, "GPC%i/TPC%i/MP trap: " 1255 "global %08x [%s] warp %04x [%s]\n", 1256 gpc, tpc, gerr, glob, werr, warp ? warp->name : ""); 1257 1258 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x648), 0x00000000); 1259 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x650), gerr); 1260 } 1261 1262 static void 1263 gf100_gr_trap_tpc(struct gf100_gr *gr, int gpc, int tpc) 1264 { 1265 struct nvkm_subdev *subdev = &gr->base.engine.subdev; 1266 struct nvkm_device *device = subdev->device; 1267 u32 stat = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x0508)); 1268 1269 if (stat & 0x00000001) { 1270 u32 trap = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x0224)); 1271 nvkm_error(subdev, "GPC%d/TPC%d/TEX: %08x\n", gpc, tpc, trap); 1272 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x0224), 0xc0000000); 1273 stat &= ~0x00000001; 1274 } 1275 1276 if (stat & 0x00000002) { 1277 gr->func->trap_mp(gr, gpc, tpc); 1278 stat &= ~0x00000002; 1279 } 1280 1281 if (stat & 0x00000004) { 1282 u32 trap = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x0084)); 1283 nvkm_error(subdev, "GPC%d/TPC%d/POLY: %08x\n", gpc, tpc, trap); 1284 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x0084), 0xc0000000); 1285 stat &= ~0x00000004; 1286 } 1287 1288 if (stat & 0x00000008) { 1289 u32 trap = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x048c)); 1290 nvkm_error(subdev, "GPC%d/TPC%d/L1C: %08x\n", gpc, tpc, trap); 1291 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x048c), 0xc0000000); 1292 stat &= ~0x00000008; 1293 } 1294 1295 if (stat & 0x00000010) { 1296 u32 trap = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x0430)); 1297 nvkm_error(subdev, "GPC%d/TPC%d/MPC: %08x\n", gpc, tpc, trap); 1298 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x0430), 0xc0000000); 1299 stat &= ~0x00000010; 1300 } 1301 1302 if (stat) { 1303 nvkm_error(subdev, "GPC%d/TPC%d/%08x: unknown\n", gpc, tpc, stat); 1304 } 1305 } 1306 1307 static void 1308 gf100_gr_trap_gpc(struct gf100_gr *gr, int gpc) 1309 { 1310 struct nvkm_subdev *subdev = &gr->base.engine.subdev; 1311 struct nvkm_device *device = subdev->device; 1312 u32 stat = nvkm_rd32(device, GPC_UNIT(gpc, 0x2c90)); 1313 int tpc; 1314 1315 if (stat & 0x00000001) { 1316 gf100_gr_trap_gpc_rop(gr, gpc); 1317 stat &= ~0x00000001; 1318 } 1319 1320 if (stat & 0x00000002) { 1321 u32 trap = nvkm_rd32(device, GPC_UNIT(gpc, 0x0900)); 1322 nvkm_error(subdev, "GPC%d/ZCULL: %08x\n", gpc, trap); 1323 nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000); 1324 stat &= ~0x00000002; 1325 } 1326 1327 if (stat & 0x00000004) { 1328 u32 trap = nvkm_rd32(device, GPC_UNIT(gpc, 0x1028)); 1329 nvkm_error(subdev, "GPC%d/CCACHE: %08x\n", gpc, trap); 1330 nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000); 1331 stat &= ~0x00000004; 1332 } 1333 1334 if (stat & 0x00000008) { 1335 u32 trap = nvkm_rd32(device, GPC_UNIT(gpc, 0x0824)); 1336 nvkm_error(subdev, "GPC%d/ESETUP: %08x\n", gpc, trap); 1337 nvkm_wr32(device, GPC_UNIT(gpc, 0x0824), 0xc0000000); 1338 stat &= ~0x00000009; 1339 } 1340 1341 for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) { 1342 u32 mask = 0x00010000 << tpc; 1343 if (stat & mask) { 1344 gf100_gr_trap_tpc(gr, gpc, tpc); 1345 nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), mask); 1346 stat &= ~mask; 1347 } 1348 } 1349 1350 if (stat) { 1351 nvkm_error(subdev, "GPC%d/%08x: unknown\n", gpc, stat); 1352 } 1353 } 1354 1355 static void 1356 gf100_gr_trap_intr(struct gf100_gr *gr) 1357 { 1358 struct nvkm_subdev *subdev = &gr->base.engine.subdev; 1359 struct nvkm_device *device = subdev->device; 1360 char error[128]; 1361 u32 trap = nvkm_rd32(device, 0x400108); 1362 int rop, gpc; 1363 1364 if (trap & 0x00000001) { 1365 u32 stat = nvkm_rd32(device, 0x404000); 1366 1367 nvkm_snprintbf(error, sizeof(error), gf100_dispatch_error, 1368 stat & 0x3fffffff); 1369 nvkm_error(subdev, "DISPATCH %08x [%s]\n", stat, error); 1370 nvkm_wr32(device, 0x404000, 0xc0000000); 1371 nvkm_wr32(device, 0x400108, 0x00000001); 1372 trap &= ~0x00000001; 1373 } 1374 1375 if (trap & 0x00000002) { 1376 u32 stat = nvkm_rd32(device, 0x404600); 1377 1378 nvkm_snprintbf(error, sizeof(error), gf100_m2mf_error, 1379 stat & 0x3fffffff); 1380 nvkm_error(subdev, "M2MF %08x [%s]\n", stat, error); 1381 1382 nvkm_wr32(device, 0x404600, 0xc0000000); 1383 nvkm_wr32(device, 0x400108, 0x00000002); 1384 trap &= ~0x00000002; 1385 } 1386 1387 if (trap & 0x00000008) { 1388 u32 stat = nvkm_rd32(device, 0x408030); 1389 1390 nvkm_snprintbf(error, sizeof(error), gf100_ccache_error, 1391 stat & 0x3fffffff); 1392 nvkm_error(subdev, "CCACHE %08x [%s]\n", stat, error); 1393 nvkm_wr32(device, 0x408030, 0xc0000000); 1394 nvkm_wr32(device, 0x400108, 0x00000008); 1395 trap &= ~0x00000008; 1396 } 1397 1398 if (trap & 0x00000010) { 1399 u32 stat = nvkm_rd32(device, 0x405840); 1400 nvkm_error(subdev, "SHADER %08x, sph: 0x%06x, stage: 0x%02x\n", 1401 stat, stat & 0xffffff, (stat >> 24) & 0x3f); 1402 nvkm_wr32(device, 0x405840, 0xc0000000); 1403 nvkm_wr32(device, 0x400108, 0x00000010); 1404 trap &= ~0x00000010; 1405 } 1406 1407 if (trap & 0x00000040) { 1408 u32 stat = nvkm_rd32(device, 0x40601c); 1409 1410 nvkm_snprintbf(error, sizeof(error), gf100_unk6_error, 1411 stat & 0x3fffffff); 1412 nvkm_error(subdev, "UNK6 %08x [%s]\n", stat, error); 1413 1414 nvkm_wr32(device, 0x40601c, 0xc0000000); 1415 nvkm_wr32(device, 0x400108, 0x00000040); 1416 trap &= ~0x00000040; 1417 } 1418 1419 if (trap & 0x00000080) { 1420 u32 stat = nvkm_rd32(device, 0x404490); 1421 u32 pc = nvkm_rd32(device, 0x404494); 1422 u32 op = nvkm_rd32(device, 0x40449c); 1423 1424 nvkm_snprintbf(error, sizeof(error), gf100_macro_error, 1425 stat & 0x1fffffff); 1426 nvkm_error(subdev, "MACRO %08x [%s], pc: 0x%03x%s, op: 0x%08x\n", 1427 stat, error, pc & 0x7ff, 1428 (pc & 0x10000000) ? "" : " (invalid)", 1429 op); 1430 1431 nvkm_wr32(device, 0x404490, 0xc0000000); 1432 nvkm_wr32(device, 0x400108, 0x00000080); 1433 trap &= ~0x00000080; 1434 } 1435 1436 if (trap & 0x00000100) { 1437 u32 stat = nvkm_rd32(device, 0x407020) & 0x3fffffff; 1438 1439 nvkm_snprintbf(error, sizeof(error), gk104_sked_error, stat); 1440 nvkm_error(subdev, "SKED: %08x [%s]\n", stat, error); 1441 1442 if (stat) 1443 nvkm_wr32(device, 0x407020, 0x40000000); 1444 nvkm_wr32(device, 0x400108, 0x00000100); 1445 trap &= ~0x00000100; 1446 } 1447 1448 if (trap & 0x01000000) { 1449 u32 stat = nvkm_rd32(device, 0x400118); 1450 for (gpc = 0; stat && gpc < gr->gpc_nr; gpc++) { 1451 u32 mask = 0x00000001 << gpc; 1452 if (stat & mask) { 1453 gf100_gr_trap_gpc(gr, gpc); 1454 nvkm_wr32(device, 0x400118, mask); 1455 stat &= ~mask; 1456 } 1457 } 1458 nvkm_wr32(device, 0x400108, 0x01000000); 1459 trap &= ~0x01000000; 1460 } 1461 1462 if (trap & 0x02000000) { 1463 for (rop = 0; rop < gr->rop_nr; rop++) { 1464 u32 statz = nvkm_rd32(device, ROP_UNIT(rop, 0x070)); 1465 u32 statc = nvkm_rd32(device, ROP_UNIT(rop, 0x144)); 1466 nvkm_error(subdev, "ROP%d %08x %08x\n", 1467 rop, statz, statc); 1468 nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0xc0000000); 1469 nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0xc0000000); 1470 } 1471 nvkm_wr32(device, 0x400108, 0x02000000); 1472 trap &= ~0x02000000; 1473 } 1474 1475 if (trap) { 1476 nvkm_error(subdev, "TRAP UNHANDLED %08x\n", trap); 1477 nvkm_wr32(device, 0x400108, trap); 1478 } 1479 } 1480 1481 static void 1482 gf100_gr_ctxctl_debug_unit(struct gf100_gr *gr, u32 base) 1483 { 1484 struct nvkm_subdev *subdev = &gr->base.engine.subdev; 1485 struct nvkm_device *device = subdev->device; 1486 nvkm_error(subdev, "%06x - done %08x\n", base, 1487 nvkm_rd32(device, base + 0x400)); 1488 nvkm_error(subdev, "%06x - stat %08x %08x %08x %08x\n", base, 1489 nvkm_rd32(device, base + 0x800), 1490 nvkm_rd32(device, base + 0x804), 1491 nvkm_rd32(device, base + 0x808), 1492 nvkm_rd32(device, base + 0x80c)); 1493 nvkm_error(subdev, "%06x - stat %08x %08x %08x %08x\n", base, 1494 nvkm_rd32(device, base + 0x810), 1495 nvkm_rd32(device, base + 0x814), 1496 nvkm_rd32(device, base + 0x818), 1497 nvkm_rd32(device, base + 0x81c)); 1498 } 1499 1500 void 1501 gf100_gr_ctxctl_debug(struct gf100_gr *gr) 1502 { 1503 struct nvkm_device *device = gr->base.engine.subdev.device; 1504 u32 gpcnr = nvkm_rd32(device, 0x409604) & 0xffff; 1505 u32 gpc; 1506 1507 gf100_gr_ctxctl_debug_unit(gr, 0x409000); 1508 for (gpc = 0; gpc < gpcnr; gpc++) 1509 gf100_gr_ctxctl_debug_unit(gr, 0x502000 + (gpc * 0x8000)); 1510 } 1511 1512 static void 1513 gf100_gr_ctxctl_isr(struct gf100_gr *gr) 1514 { 1515 struct nvkm_subdev *subdev = &gr->base.engine.subdev; 1516 struct nvkm_device *device = subdev->device; 1517 u32 stat = nvkm_rd32(device, 0x409c18); 1518 1519 if (!gr->firmware && (stat & 0x00000001)) { 1520 u32 code = nvkm_rd32(device, 0x409814); 1521 if (code == E_BAD_FWMTHD) { 1522 u32 class = nvkm_rd32(device, 0x409808); 1523 u32 addr = nvkm_rd32(device, 0x40980c); 1524 u32 subc = (addr & 0x00070000) >> 16; 1525 u32 mthd = (addr & 0x00003ffc); 1526 u32 data = nvkm_rd32(device, 0x409810); 1527 1528 nvkm_error(subdev, "FECS MTHD subc %d class %04x " 1529 "mthd %04x data %08x\n", 1530 subc, class, mthd, data); 1531 } else { 1532 nvkm_error(subdev, "FECS ucode error %d\n", code); 1533 } 1534 nvkm_wr32(device, 0x409c20, 0x00000001); 1535 stat &= ~0x00000001; 1536 } 1537 1538 if (!gr->firmware && (stat & 0x00080000)) { 1539 nvkm_error(subdev, "FECS watchdog timeout\n"); 1540 gf100_gr_ctxctl_debug(gr); 1541 nvkm_wr32(device, 0x409c20, 0x00080000); 1542 stat &= ~0x00080000; 1543 } 1544 1545 if (stat) { 1546 nvkm_error(subdev, "FECS %08x\n", stat); 1547 gf100_gr_ctxctl_debug(gr); 1548 nvkm_wr32(device, 0x409c20, stat); 1549 } 1550 } 1551 1552 static void 1553 gf100_gr_intr(struct nvkm_gr *base) 1554 { 1555 struct gf100_gr *gr = gf100_gr(base); 1556 struct nvkm_subdev *subdev = &gr->base.engine.subdev; 1557 struct nvkm_device *device = subdev->device; 1558 struct nvkm_fifo_chan *chan; 1559 unsigned long flags; 1560 u64 inst = nvkm_rd32(device, 0x409b00) & 0x0fffffff; 1561 u32 stat = nvkm_rd32(device, 0x400100); 1562 u32 addr = nvkm_rd32(device, 0x400704); 1563 u32 mthd = (addr & 0x00003ffc); 1564 u32 subc = (addr & 0x00070000) >> 16; 1565 u32 data = nvkm_rd32(device, 0x400708); 1566 u32 code = nvkm_rd32(device, 0x400110); 1567 u32 class; 1568 const char *name = "unknown"; 1569 int chid = -1; 1570 1571 chan = nvkm_fifo_chan_inst(device->fifo, (u64)inst << 12, &flags); 1572 if (chan) { 1573 name = chan->object.client->name; 1574 chid = chan->chid; 1575 } 1576 1577 if (device->card_type < NV_E0 || subc < 4) 1578 class = nvkm_rd32(device, 0x404200 + (subc * 4)); 1579 else 1580 class = 0x0000; 1581 1582 if (stat & 0x00000001) { 1583 /* 1584 * notifier interrupt, only needed for cyclestats 1585 * can be safely ignored 1586 */ 1587 nvkm_wr32(device, 0x400100, 0x00000001); 1588 stat &= ~0x00000001; 1589 } 1590 1591 if (stat & 0x00000010) { 1592 if (!gf100_gr_mthd_sw(device, class, mthd, data)) { 1593 nvkm_error(subdev, "ILLEGAL_MTHD ch %d [%010"PRIx64" %s] " 1594 "subc %d class %04x mthd %04x data %08x\n", 1595 chid, inst << 12, name, subc, 1596 class, mthd, data); 1597 } 1598 nvkm_wr32(device, 0x400100, 0x00000010); 1599 stat &= ~0x00000010; 1600 } 1601 1602 if (stat & 0x00000020) { 1603 nvkm_error(subdev, "ILLEGAL_CLASS ch %d [%010"PRIx64" %s] " 1604 "subc %d class %04x mthd %04x data %08x\n", 1605 chid, inst << 12, name, subc, class, mthd, data); 1606 nvkm_wr32(device, 0x400100, 0x00000020); 1607 stat &= ~0x00000020; 1608 } 1609 1610 if (stat & 0x00100000) { 1611 const struct nvkm_enum *en = 1612 nvkm_enum_find(nv50_data_error_names, code); 1613 nvkm_error(subdev, "DATA_ERROR %08x [%s] ch %d [%010"PRIx64" %s] " 1614 "subc %d class %04x mthd %04x data %08x\n", 1615 code, en ? en->name : "", chid, inst << 12, 1616 name, subc, class, mthd, data); 1617 nvkm_wr32(device, 0x400100, 0x00100000); 1618 stat &= ~0x00100000; 1619 } 1620 1621 if (stat & 0x00200000) { 1622 nvkm_error(subdev, "TRAP ch %d [%010"PRIx64" %s]\n", 1623 chid, inst << 12, name); 1624 gf100_gr_trap_intr(gr); 1625 nvkm_wr32(device, 0x400100, 0x00200000); 1626 stat &= ~0x00200000; 1627 } 1628 1629 if (stat & 0x00080000) { 1630 gf100_gr_ctxctl_isr(gr); 1631 nvkm_wr32(device, 0x400100, 0x00080000); 1632 stat &= ~0x00080000; 1633 } 1634 1635 if (stat) { 1636 nvkm_error(subdev, "intr %08x\n", stat); 1637 nvkm_wr32(device, 0x400100, stat); 1638 } 1639 1640 nvkm_wr32(device, 0x400500, 0x00010001); 1641 nvkm_fifo_chan_put(device->fifo, flags, &chan); 1642 } 1643 1644 static void 1645 gf100_gr_init_fw(struct nvkm_falcon *falcon, 1646 struct nvkm_blob *code, struct nvkm_blob *data) 1647 { 1648 nvkm_falcon_load_dmem(falcon, data->data, 0x0, data->size, 0); 1649 nvkm_falcon_load_imem(falcon, code->data, 0x0, code->size, 0, 0, false); 1650 } 1651 1652 static void 1653 gf100_gr_init_csdata(struct gf100_gr *gr, 1654 const struct gf100_gr_pack *pack, 1655 u32 falcon, u32 starstar, u32 base) 1656 { 1657 struct nvkm_device *device = gr->base.engine.subdev.device; 1658 const struct gf100_gr_pack *iter; 1659 const struct gf100_gr_init *init; 1660 u32 addr = ~0, prev = ~0, xfer = 0; 1661 u32 star, temp; 1662 1663 nvkm_wr32(device, falcon + 0x01c0, 0x02000000 + starstar); 1664 star = nvkm_rd32(device, falcon + 0x01c4); 1665 temp = nvkm_rd32(device, falcon + 0x01c4); 1666 if (temp > star) 1667 star = temp; 1668 nvkm_wr32(device, falcon + 0x01c0, 0x01000000 + star); 1669 1670 pack_for_each_init(init, iter, pack) { 1671 u32 head = init->addr - base; 1672 u32 tail = head + init->count * init->pitch; 1673 while (head < tail) { 1674 if (head != prev + 4 || xfer >= 32) { 1675 if (xfer) { 1676 u32 data = ((--xfer << 26) | addr); 1677 nvkm_wr32(device, falcon + 0x01c4, data); 1678 star += 4; 1679 } 1680 addr = head; 1681 xfer = 0; 1682 } 1683 prev = head; 1684 xfer = xfer + 1; 1685 head = head + init->pitch; 1686 } 1687 } 1688 1689 nvkm_wr32(device, falcon + 0x01c4, (--xfer << 26) | addr); 1690 nvkm_wr32(device, falcon + 0x01c0, 0x01000004 + starstar); 1691 nvkm_wr32(device, falcon + 0x01c4, star + 4); 1692 } 1693 1694 /* Initialize context from an external (secure or not) firmware */ 1695 static int 1696 gf100_gr_init_ctxctl_ext(struct gf100_gr *gr) 1697 { 1698 struct nvkm_subdev *subdev = &gr->base.engine.subdev; 1699 struct nvkm_device *device = subdev->device; 1700 u32 lsf_mask = 0; 1701 int ret; 1702 1703 /* load fuc microcode */ 1704 nvkm_mc_unk260(device, 0); 1705 1706 /* securely-managed falcons must be reset using secure boot */ 1707 1708 if (!nvkm_acr_managed_falcon(device, NVKM_ACR_LSF_FECS)) { 1709 gf100_gr_init_fw(&gr->fecs.falcon, &gr->fecs.inst, 1710 &gr->fecs.data); 1711 } else { 1712 lsf_mask |= BIT(NVKM_ACR_LSF_FECS); 1713 } 1714 1715 if (!nvkm_acr_managed_falcon(device, NVKM_ACR_LSF_GPCCS)) { 1716 gf100_gr_init_fw(&gr->gpccs.falcon, &gr->gpccs.inst, 1717 &gr->gpccs.data); 1718 } else { 1719 lsf_mask |= BIT(NVKM_ACR_LSF_GPCCS); 1720 } 1721 1722 if (lsf_mask) { 1723 ret = nvkm_acr_bootstrap_falcons(device, lsf_mask); 1724 if (ret) 1725 return ret; 1726 } 1727 1728 nvkm_mc_unk260(device, 1); 1729 1730 /* start both of them running */ 1731 nvkm_wr32(device, 0x409840, 0xffffffff); 1732 nvkm_wr32(device, 0x41a10c, 0x00000000); 1733 nvkm_wr32(device, 0x40910c, 0x00000000); 1734 1735 nvkm_falcon_start(&gr->gpccs.falcon); 1736 nvkm_falcon_start(&gr->fecs.falcon); 1737 1738 if (nvkm_msec(device, 2000, 1739 if (nvkm_rd32(device, 0x409800) & 0x00000001) 1740 break; 1741 ) < 0) 1742 return -EBUSY; 1743 1744 gf100_gr_fecs_set_watchdog_timeout(gr, 0x7fffffff); 1745 1746 /* Determine how much memory is required to store main context image. */ 1747 ret = gf100_gr_fecs_discover_image_size(gr, &gr->size); 1748 if (ret) 1749 return ret; 1750 1751 /* Determine how much memory is required to store ZCULL image. */ 1752 ret = gf100_gr_fecs_discover_zcull_image_size(gr, &gr->size_zcull); 1753 if (ret) 1754 return ret; 1755 1756 /* Determine how much memory is required to store PerfMon image. */ 1757 ret = gf100_gr_fecs_discover_pm_image_size(gr, &gr->size_pm); 1758 if (ret) 1759 return ret; 1760 1761 /*XXX: We (likely) require PMU support to even bother with this. 1762 * 1763 * Also, it seems like not all GPUs support ELPG. Traces I 1764 * have here show RM enabling it on Kepler/Turing, but none 1765 * of the GPUs between those. NVGPU decides this by PCIID. 1766 */ 1767 if (0) { 1768 ret = gf100_gr_fecs_elpg_bind(gr); 1769 if (ret) 1770 return ret; 1771 } 1772 1773 /* Generate golden context image. */ 1774 if (gr->data == NULL) { 1775 int ret = gf100_grctx_generate(gr); 1776 if (ret) { 1777 nvkm_error(subdev, "failed to construct context\n"); 1778 return ret; 1779 } 1780 } 1781 1782 return 0; 1783 } 1784 1785 static int 1786 gf100_gr_init_ctxctl_int(struct gf100_gr *gr) 1787 { 1788 const struct gf100_grctx_func *grctx = gr->func->grctx; 1789 struct nvkm_subdev *subdev = &gr->base.engine.subdev; 1790 struct nvkm_device *device = subdev->device; 1791 1792 if (!gr->func->fecs.ucode) { 1793 return -ENOSYS; 1794 } 1795 1796 /* load HUB microcode */ 1797 nvkm_mc_unk260(device, 0); 1798 nvkm_falcon_load_dmem(&gr->fecs.falcon, 1799 gr->func->fecs.ucode->data.data, 0x0, 1800 gr->func->fecs.ucode->data.size, 0); 1801 nvkm_falcon_load_imem(&gr->fecs.falcon, 1802 gr->func->fecs.ucode->code.data, 0x0, 1803 gr->func->fecs.ucode->code.size, 0, 0, false); 1804 1805 /* load GPC microcode */ 1806 nvkm_falcon_load_dmem(&gr->gpccs.falcon, 1807 gr->func->gpccs.ucode->data.data, 0x0, 1808 gr->func->gpccs.ucode->data.size, 0); 1809 nvkm_falcon_load_imem(&gr->gpccs.falcon, 1810 gr->func->gpccs.ucode->code.data, 0x0, 1811 gr->func->gpccs.ucode->code.size, 0, 0, false); 1812 nvkm_mc_unk260(device, 1); 1813 1814 /* load register lists */ 1815 gf100_gr_init_csdata(gr, grctx->hub, 0x409000, 0x000, 0x000000); 1816 gf100_gr_init_csdata(gr, grctx->gpc_0, 0x41a000, 0x000, 0x418000); 1817 gf100_gr_init_csdata(gr, grctx->gpc_1, 0x41a000, 0x000, 0x418000); 1818 gf100_gr_init_csdata(gr, grctx->tpc, 0x41a000, 0x004, 0x419800); 1819 gf100_gr_init_csdata(gr, grctx->ppc, 0x41a000, 0x008, 0x41be00); 1820 1821 /* start HUB ucode running, it'll init the GPCs */ 1822 nvkm_wr32(device, 0x40910c, 0x00000000); 1823 nvkm_wr32(device, 0x409100, 0x00000002); 1824 if (nvkm_msec(device, 2000, 1825 if (nvkm_rd32(device, 0x409800) & 0x80000000) 1826 break; 1827 ) < 0) { 1828 gf100_gr_ctxctl_debug(gr); 1829 return -EBUSY; 1830 } 1831 1832 gr->size = nvkm_rd32(device, 0x409804); 1833 if (gr->data == NULL) { 1834 int ret = gf100_grctx_generate(gr); 1835 if (ret) { 1836 nvkm_error(subdev, "failed to construct context\n"); 1837 return ret; 1838 } 1839 } 1840 1841 return 0; 1842 } 1843 1844 int 1845 gf100_gr_init_ctxctl(struct gf100_gr *gr) 1846 { 1847 int ret; 1848 1849 if (gr->firmware) 1850 ret = gf100_gr_init_ctxctl_ext(gr); 1851 else 1852 ret = gf100_gr_init_ctxctl_int(gr); 1853 1854 return ret; 1855 } 1856 1857 void 1858 gf100_gr_oneinit_sm_id(struct gf100_gr *gr) 1859 { 1860 int tpc, gpc; 1861 for (tpc = 0; tpc < gr->tpc_max; tpc++) { 1862 for (gpc = 0; gpc < gr->gpc_nr; gpc++) { 1863 if (tpc < gr->tpc_nr[gpc]) { 1864 gr->sm[gr->sm_nr].gpc = gpc; 1865 gr->sm[gr->sm_nr].tpc = tpc; 1866 gr->sm_nr++; 1867 } 1868 } 1869 } 1870 } 1871 1872 void 1873 gf100_gr_oneinit_tiles(struct gf100_gr *gr) 1874 { 1875 static const u8 primes[] = { 1876 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61 1877 }; 1878 int init_frac[GPC_MAX], init_err[GPC_MAX], run_err[GPC_MAX], i, j; 1879 u32 mul_factor, comm_denom; 1880 u8 gpc_map[GPC_MAX]; 1881 bool sorted; 1882 1883 switch (gr->tpc_total) { 1884 case 15: gr->screen_tile_row_offset = 0x06; break; 1885 case 14: gr->screen_tile_row_offset = 0x05; break; 1886 case 13: gr->screen_tile_row_offset = 0x02; break; 1887 case 11: gr->screen_tile_row_offset = 0x07; break; 1888 case 10: gr->screen_tile_row_offset = 0x06; break; 1889 case 7: 1890 case 5: gr->screen_tile_row_offset = 0x01; break; 1891 case 3: gr->screen_tile_row_offset = 0x02; break; 1892 case 2: 1893 case 1: gr->screen_tile_row_offset = 0x01; break; 1894 default: gr->screen_tile_row_offset = 0x03; 1895 for (i = 0; i < ARRAY_SIZE(primes); i++) { 1896 if (gr->tpc_total % primes[i]) { 1897 gr->screen_tile_row_offset = primes[i]; 1898 break; 1899 } 1900 } 1901 break; 1902 } 1903 1904 /* Sort GPCs by TPC count, highest-to-lowest. */ 1905 for (i = 0; i < gr->gpc_nr; i++) 1906 gpc_map[i] = i; 1907 sorted = false; 1908 1909 while (!sorted) { 1910 for (sorted = true, i = 0; i < gr->gpc_nr - 1; i++) { 1911 if (gr->tpc_nr[gpc_map[i + 1]] > 1912 gr->tpc_nr[gpc_map[i + 0]]) { 1913 u8 swap = gpc_map[i]; 1914 gpc_map[i + 0] = gpc_map[i + 1]; 1915 gpc_map[i + 1] = swap; 1916 sorted = false; 1917 } 1918 } 1919 } 1920 1921 /* Determine tile->GPC mapping */ 1922 mul_factor = gr->gpc_nr * gr->tpc_max; 1923 if (mul_factor & 1) 1924 mul_factor = 2; 1925 else 1926 mul_factor = 1; 1927 1928 comm_denom = gr->gpc_nr * gr->tpc_max * mul_factor; 1929 1930 for (i = 0; i < gr->gpc_nr; i++) { 1931 init_frac[i] = gr->tpc_nr[gpc_map[i]] * gr->gpc_nr * mul_factor; 1932 init_err[i] = i * gr->tpc_max * mul_factor - comm_denom/2; 1933 run_err[i] = init_frac[i] + init_err[i]; 1934 } 1935 1936 for (i = 0; i < gr->tpc_total;) { 1937 for (j = 0; j < gr->gpc_nr; j++) { 1938 if ((run_err[j] * 2) >= comm_denom) { 1939 gr->tile[i++] = gpc_map[j]; 1940 run_err[j] += init_frac[j] - comm_denom; 1941 } else { 1942 run_err[j] += init_frac[j]; 1943 } 1944 } 1945 } 1946 } 1947 1948 static int 1949 gf100_gr_oneinit(struct nvkm_gr *base) 1950 { 1951 struct gf100_gr *gr = gf100_gr(base); 1952 struct nvkm_subdev *subdev = &gr->base.engine.subdev; 1953 struct nvkm_device *device = subdev->device; 1954 int i, j; 1955 1956 nvkm_pmu_pgob(device->pmu, false); 1957 1958 gr->rop_nr = gr->func->rops(gr); 1959 gr->gpc_nr = nvkm_rd32(device, 0x409604) & 0x0000001f; 1960 for (i = 0; i < gr->gpc_nr; i++) { 1961 gr->tpc_nr[i] = nvkm_rd32(device, GPC_UNIT(i, 0x2608)); 1962 gr->tpc_max = max(gr->tpc_max, gr->tpc_nr[i]); 1963 gr->tpc_total += gr->tpc_nr[i]; 1964 gr->ppc_nr[i] = gr->func->ppc_nr; 1965 for (j = 0; j < gr->ppc_nr[i]; j++) { 1966 gr->ppc_tpc_mask[i][j] = 1967 nvkm_rd32(device, GPC_UNIT(i, 0x0c30 + (j * 4))); 1968 if (gr->ppc_tpc_mask[i][j] == 0) 1969 continue; 1970 gr->ppc_mask[i] |= (1 << j); 1971 gr->ppc_tpc_nr[i][j] = hweight8(gr->ppc_tpc_mask[i][j]); 1972 if (gr->ppc_tpc_min == 0 || 1973 gr->ppc_tpc_min > gr->ppc_tpc_nr[i][j]) 1974 gr->ppc_tpc_min = gr->ppc_tpc_nr[i][j]; 1975 if (gr->ppc_tpc_max < gr->ppc_tpc_nr[i][j]) 1976 gr->ppc_tpc_max = gr->ppc_tpc_nr[i][j]; 1977 } 1978 } 1979 1980 memset(gr->tile, 0xff, sizeof(gr->tile)); 1981 gr->func->oneinit_tiles(gr); 1982 gr->func->oneinit_sm_id(gr); 1983 return 0; 1984 } 1985 1986 static int 1987 gf100_gr_init_(struct nvkm_gr *base) 1988 { 1989 struct gf100_gr *gr = gf100_gr(base); 1990 struct nvkm_subdev *subdev = &base->engine.subdev; 1991 u32 ret; 1992 1993 nvkm_pmu_pgob(gr->base.engine.subdev.device->pmu, false); 1994 1995 ret = nvkm_falcon_get(&gr->fecs.falcon, subdev); 1996 if (ret) 1997 return ret; 1998 1999 ret = nvkm_falcon_get(&gr->gpccs.falcon, subdev); 2000 if (ret) 2001 return ret; 2002 2003 return gr->func->init(gr); 2004 } 2005 2006 static int 2007 gf100_gr_fini(struct nvkm_gr *base, bool suspend) 2008 { 2009 struct gf100_gr *gr = gf100_gr(base); 2010 struct nvkm_subdev *subdev = &gr->base.engine.subdev; 2011 nvkm_falcon_put(&gr->gpccs.falcon, subdev); 2012 nvkm_falcon_put(&gr->fecs.falcon, subdev); 2013 return 0; 2014 } 2015 2016 static void * 2017 gf100_gr_dtor(struct nvkm_gr *base) 2018 { 2019 struct gf100_gr *gr = gf100_gr(base); 2020 2021 kfree(gr->data); 2022 2023 nvkm_falcon_dtor(&gr->gpccs.falcon); 2024 nvkm_falcon_dtor(&gr->fecs.falcon); 2025 2026 nvkm_blob_dtor(&gr->fecs.inst); 2027 nvkm_blob_dtor(&gr->fecs.data); 2028 nvkm_blob_dtor(&gr->gpccs.inst); 2029 nvkm_blob_dtor(&gr->gpccs.data); 2030 2031 vfree(gr->bundle); 2032 vfree(gr->method); 2033 vfree(gr->sw_ctx); 2034 vfree(gr->sw_nonctx); 2035 2036 mutex_destroy(&gr->fecs.mutex); 2037 2038 return gr; 2039 } 2040 2041 static const struct nvkm_gr_func 2042 gf100_gr_ = { 2043 .dtor = gf100_gr_dtor, 2044 .oneinit = gf100_gr_oneinit, 2045 .init = gf100_gr_init_, 2046 .fini = gf100_gr_fini, 2047 .intr = gf100_gr_intr, 2048 .units = gf100_gr_units, 2049 .chan_new = gf100_gr_chan_new, 2050 .object_get = gf100_gr_object_get, 2051 .chsw_load = gf100_gr_chsw_load, 2052 .ctxsw.pause = gf100_gr_fecs_stop_ctxsw, 2053 .ctxsw.resume = gf100_gr_fecs_start_ctxsw, 2054 .ctxsw.inst = gf100_gr_ctxsw_inst, 2055 }; 2056 2057 static const struct nvkm_falcon_func 2058 gf100_gr_flcn = { 2059 .fbif = 0x600, 2060 .load_imem = nvkm_falcon_v1_load_imem, 2061 .load_dmem = nvkm_falcon_v1_load_dmem, 2062 .read_dmem = nvkm_falcon_v1_read_dmem, 2063 .bind_context = nvkm_falcon_v1_bind_context, 2064 .wait_for_halt = nvkm_falcon_v1_wait_for_halt, 2065 .clear_interrupt = nvkm_falcon_v1_clear_interrupt, 2066 .set_start_addr = nvkm_falcon_v1_set_start_addr, 2067 .start = nvkm_falcon_v1_start, 2068 .enable = nvkm_falcon_v1_enable, 2069 .disable = nvkm_falcon_v1_disable, 2070 }; 2071 2072 int 2073 gf100_gr_new_(const struct gf100_gr_fwif *fwif, 2074 struct nvkm_device *device, int index, struct nvkm_gr **pgr) 2075 { 2076 struct gf100_gr *gr; 2077 int ret; 2078 2079 if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL))) 2080 return -ENOMEM; 2081 *pgr = &gr->base; 2082 2083 ret = nvkm_gr_ctor(&gf100_gr_, device, index, true, &gr->base); 2084 if (ret) 2085 return ret; 2086 2087 fwif = nvkm_firmware_load(&gr->base.engine.subdev, fwif, "Gr", gr); 2088 if (IS_ERR(fwif)) 2089 return -ENODEV; 2090 2091 gr->func = fwif->func; 2092 2093 ret = nvkm_falcon_ctor(&gf100_gr_flcn, &gr->base.engine.subdev, 2094 "fecs", 0x409000, &gr->fecs.falcon); 2095 if (ret) 2096 return ret; 2097 2098 mutex_init(&gr->fecs.mutex); 2099 2100 ret = nvkm_falcon_ctor(&gf100_gr_flcn, &gr->base.engine.subdev, 2101 "gpccs", 0x41a000, &gr->gpccs.falcon); 2102 if (ret) 2103 return ret; 2104 2105 return 0; 2106 } 2107 2108 void 2109 gf100_gr_init_num_tpc_per_gpc(struct gf100_gr *gr, bool pd, bool ds) 2110 { 2111 struct nvkm_device *device = gr->base.engine.subdev.device; 2112 int gpc, i, j; 2113 u32 data; 2114 2115 for (gpc = 0, i = 0; i < 4; i++) { 2116 for (data = 0, j = 0; j < 8 && gpc < gr->gpc_nr; j++, gpc++) 2117 data |= gr->tpc_nr[gpc] << (j * 4); 2118 if (pd) 2119 nvkm_wr32(device, 0x406028 + (i * 4), data); 2120 if (ds) 2121 nvkm_wr32(device, 0x405870 + (i * 4), data); 2122 } 2123 } 2124 2125 void 2126 gf100_gr_init_400054(struct gf100_gr *gr) 2127 { 2128 nvkm_wr32(gr->base.engine.subdev.device, 0x400054, 0x34ce3464); 2129 } 2130 2131 void 2132 gf100_gr_init_shader_exceptions(struct gf100_gr *gr, int gpc, int tpc) 2133 { 2134 struct nvkm_device *device = gr->base.engine.subdev.device; 2135 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe); 2136 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f); 2137 } 2138 2139 void 2140 gf100_gr_init_tex_hww_esr(struct gf100_gr *gr, int gpc, int tpc) 2141 { 2142 struct nvkm_device *device = gr->base.engine.subdev.device; 2143 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000); 2144 } 2145 2146 void 2147 gf100_gr_init_419eb4(struct gf100_gr *gr) 2148 { 2149 struct nvkm_device *device = gr->base.engine.subdev.device; 2150 nvkm_mask(device, 0x419eb4, 0x00001000, 0x00001000); 2151 } 2152 2153 void 2154 gf100_gr_init_419cc0(struct gf100_gr *gr) 2155 { 2156 struct nvkm_device *device = gr->base.engine.subdev.device; 2157 int gpc, tpc; 2158 2159 nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008); 2160 2161 for (gpc = 0; gpc < gr->gpc_nr; gpc++) { 2162 for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) 2163 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000); 2164 } 2165 } 2166 2167 void 2168 gf100_gr_init_40601c(struct gf100_gr *gr) 2169 { 2170 nvkm_wr32(gr->base.engine.subdev.device, 0x40601c, 0xc0000000); 2171 } 2172 2173 void 2174 gf100_gr_init_fecs_exceptions(struct gf100_gr *gr) 2175 { 2176 const u32 data = gr->firmware ? 0x000e0000 : 0x000e0001; 2177 nvkm_wr32(gr->base.engine.subdev.device, 0x409c24, data); 2178 } 2179 2180 void 2181 gf100_gr_init_gpc_mmu(struct gf100_gr *gr) 2182 { 2183 struct nvkm_device *device = gr->base.engine.subdev.device; 2184 struct nvkm_fb *fb = device->fb; 2185 2186 nvkm_wr32(device, 0x418880, nvkm_rd32(device, 0x100c80) & 0x00000001); 2187 nvkm_wr32(device, 0x4188a4, 0x03000000); 2188 nvkm_wr32(device, 0x418888, 0x00000000); 2189 nvkm_wr32(device, 0x41888c, 0x00000000); 2190 nvkm_wr32(device, 0x418890, 0x00000000); 2191 nvkm_wr32(device, 0x418894, 0x00000000); 2192 nvkm_wr32(device, 0x4188b4, nvkm_memory_addr(fb->mmu_wr) >> 8); 2193 nvkm_wr32(device, 0x4188b8, nvkm_memory_addr(fb->mmu_rd) >> 8); 2194 } 2195 2196 void 2197 gf100_gr_init_num_active_ltcs(struct gf100_gr *gr) 2198 { 2199 struct nvkm_device *device = gr->base.engine.subdev.device; 2200 nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800)); 2201 } 2202 2203 void 2204 gf100_gr_init_zcull(struct gf100_gr *gr) 2205 { 2206 struct nvkm_device *device = gr->base.engine.subdev.device; 2207 const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total); 2208 const u8 tile_nr = ALIGN(gr->tpc_total, 32); 2209 u8 bank[GPC_MAX] = {}, gpc, i, j; 2210 u32 data; 2211 2212 for (i = 0; i < tile_nr; i += 8) { 2213 for (data = 0, j = 0; j < 8 && i + j < gr->tpc_total; j++) { 2214 data |= bank[gr->tile[i + j]] << (j * 4); 2215 bank[gr->tile[i + j]]++; 2216 } 2217 nvkm_wr32(device, GPC_BCAST(0x0980 + ((i / 8) * 4)), data); 2218 } 2219 2220 for (gpc = 0; gpc < gr->gpc_nr; gpc++) { 2221 nvkm_wr32(device, GPC_UNIT(gpc, 0x0914), 2222 gr->screen_tile_row_offset << 8 | gr->tpc_nr[gpc]); 2223 nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 | 2224 gr->tpc_total); 2225 nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918); 2226 } 2227 2228 nvkm_wr32(device, GPC_BCAST(0x1bd4), magicgpc918); 2229 } 2230 2231 void 2232 gf100_gr_init_vsc_stream_master(struct gf100_gr *gr) 2233 { 2234 struct nvkm_device *device = gr->base.engine.subdev.device; 2235 nvkm_mask(device, TPC_UNIT(0, 0, 0x05c), 0x00000001, 0x00000001); 2236 } 2237 2238 int 2239 gf100_gr_init(struct gf100_gr *gr) 2240 { 2241 struct nvkm_device *device = gr->base.engine.subdev.device; 2242 int gpc, tpc, rop; 2243 2244 if (gr->func->init_419bd8) 2245 gr->func->init_419bd8(gr); 2246 2247 gr->func->init_gpc_mmu(gr); 2248 2249 if (gr->sw_nonctx) 2250 gf100_gr_mmio(gr, gr->sw_nonctx); 2251 else 2252 gf100_gr_mmio(gr, gr->func->mmio); 2253 2254 gf100_gr_wait_idle(gr); 2255 2256 if (gr->func->init_r405a14) 2257 gr->func->init_r405a14(gr); 2258 2259 if (gr->func->clkgate_pack) 2260 nvkm_therm_clkgate_init(device->therm, gr->func->clkgate_pack); 2261 2262 if (gr->func->init_bios) 2263 gr->func->init_bios(gr); 2264 2265 gr->func->init_vsc_stream_master(gr); 2266 gr->func->init_zcull(gr); 2267 gr->func->init_num_active_ltcs(gr); 2268 if (gr->func->init_rop_active_fbps) 2269 gr->func->init_rop_active_fbps(gr); 2270 if (gr->func->init_bios_2) 2271 gr->func->init_bios_2(gr); 2272 if (gr->func->init_swdx_pes_mask) 2273 gr->func->init_swdx_pes_mask(gr); 2274 if (gr->func->init_fs) 2275 gr->func->init_fs(gr); 2276 2277 nvkm_wr32(device, 0x400500, 0x00010001); 2278 2279 nvkm_wr32(device, 0x400100, 0xffffffff); 2280 nvkm_wr32(device, 0x40013c, 0xffffffff); 2281 nvkm_wr32(device, 0x400124, 0x00000002); 2282 2283 gr->func->init_fecs_exceptions(gr); 2284 if (gr->func->init_ds_hww_esr_2) 2285 gr->func->init_ds_hww_esr_2(gr); 2286 2287 nvkm_wr32(device, 0x404000, 0xc0000000); 2288 nvkm_wr32(device, 0x404600, 0xc0000000); 2289 nvkm_wr32(device, 0x408030, 0xc0000000); 2290 2291 if (gr->func->init_40601c) 2292 gr->func->init_40601c(gr); 2293 2294 nvkm_wr32(device, 0x406018, 0xc0000000); 2295 nvkm_wr32(device, 0x404490, 0xc0000000); 2296 2297 if (gr->func->init_sked_hww_esr) 2298 gr->func->init_sked_hww_esr(gr); 2299 2300 nvkm_wr32(device, 0x405840, 0xc0000000); 2301 nvkm_wr32(device, 0x405844, 0x00ffffff); 2302 2303 if (gr->func->init_419cc0) 2304 gr->func->init_419cc0(gr); 2305 if (gr->func->init_419eb4) 2306 gr->func->init_419eb4(gr); 2307 if (gr->func->init_419c9c) 2308 gr->func->init_419c9c(gr); 2309 2310 if (gr->func->init_ppc_exceptions) 2311 gr->func->init_ppc_exceptions(gr); 2312 2313 for (gpc = 0; gpc < gr->gpc_nr; gpc++) { 2314 nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000); 2315 nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000); 2316 nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000); 2317 nvkm_wr32(device, GPC_UNIT(gpc, 0x0824), 0xc0000000); 2318 for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) { 2319 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff); 2320 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff); 2321 if (gr->func->init_tex_hww_esr) 2322 gr->func->init_tex_hww_esr(gr, gpc, tpc); 2323 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000); 2324 if (gr->func->init_504430) 2325 gr->func->init_504430(gr, gpc, tpc); 2326 gr->func->init_shader_exceptions(gr, gpc, tpc); 2327 } 2328 nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), 0xffffffff); 2329 nvkm_wr32(device, GPC_UNIT(gpc, 0x2c94), 0xffffffff); 2330 } 2331 2332 for (rop = 0; rop < gr->rop_nr; rop++) { 2333 nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0x40000000); 2334 nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0x40000000); 2335 nvkm_wr32(device, ROP_UNIT(rop, 0x204), 0xffffffff); 2336 nvkm_wr32(device, ROP_UNIT(rop, 0x208), 0xffffffff); 2337 } 2338 2339 nvkm_wr32(device, 0x400108, 0xffffffff); 2340 nvkm_wr32(device, 0x400138, 0xffffffff); 2341 nvkm_wr32(device, 0x400118, 0xffffffff); 2342 nvkm_wr32(device, 0x400130, 0xffffffff); 2343 nvkm_wr32(device, 0x40011c, 0xffffffff); 2344 nvkm_wr32(device, 0x400134, 0xffffffff); 2345 2346 if (gr->func->init_400054) 2347 gr->func->init_400054(gr); 2348 2349 gf100_gr_zbc_init(gr); 2350 2351 if (gr->func->init_4188a4) 2352 gr->func->init_4188a4(gr); 2353 2354 return gf100_gr_init_ctxctl(gr); 2355 } 2356 2357 #include "fuc/hubgf100.fuc3.h" 2358 2359 struct gf100_gr_ucode 2360 gf100_gr_fecs_ucode = { 2361 .code.data = gf100_grhub_code, 2362 .code.size = sizeof(gf100_grhub_code), 2363 .data.data = gf100_grhub_data, 2364 .data.size = sizeof(gf100_grhub_data), 2365 }; 2366 2367 #include "fuc/gpcgf100.fuc3.h" 2368 2369 struct gf100_gr_ucode 2370 gf100_gr_gpccs_ucode = { 2371 .code.data = gf100_grgpc_code, 2372 .code.size = sizeof(gf100_grgpc_code), 2373 .data.data = gf100_grgpc_data, 2374 .data.size = sizeof(gf100_grgpc_data), 2375 }; 2376 2377 static const struct gf100_gr_func 2378 gf100_gr = { 2379 .oneinit_tiles = gf100_gr_oneinit_tiles, 2380 .oneinit_sm_id = gf100_gr_oneinit_sm_id, 2381 .init = gf100_gr_init, 2382 .init_gpc_mmu = gf100_gr_init_gpc_mmu, 2383 .init_vsc_stream_master = gf100_gr_init_vsc_stream_master, 2384 .init_zcull = gf100_gr_init_zcull, 2385 .init_num_active_ltcs = gf100_gr_init_num_active_ltcs, 2386 .init_fecs_exceptions = gf100_gr_init_fecs_exceptions, 2387 .init_40601c = gf100_gr_init_40601c, 2388 .init_419cc0 = gf100_gr_init_419cc0, 2389 .init_419eb4 = gf100_gr_init_419eb4, 2390 .init_tex_hww_esr = gf100_gr_init_tex_hww_esr, 2391 .init_shader_exceptions = gf100_gr_init_shader_exceptions, 2392 .init_400054 = gf100_gr_init_400054, 2393 .trap_mp = gf100_gr_trap_mp, 2394 .mmio = gf100_gr_pack_mmio, 2395 .fecs.ucode = &gf100_gr_fecs_ucode, 2396 .gpccs.ucode = &gf100_gr_gpccs_ucode, 2397 .rops = gf100_gr_rops, 2398 .grctx = &gf100_grctx, 2399 .zbc = &gf100_gr_zbc, 2400 .sclass = { 2401 { -1, -1, FERMI_TWOD_A }, 2402 { -1, -1, FERMI_MEMORY_TO_MEMORY_FORMAT_A }, 2403 { -1, -1, FERMI_A, &gf100_fermi }, 2404 { -1, -1, FERMI_COMPUTE_A }, 2405 {} 2406 } 2407 }; 2408 2409 int 2410 gf100_gr_nofw(struct gf100_gr *gr, int ver, const struct gf100_gr_fwif *fwif) 2411 { 2412 gr->firmware = false; 2413 return 0; 2414 } 2415 2416 static int 2417 gf100_gr_load_fw(struct gf100_gr *gr, const char *name, 2418 struct nvkm_blob *blob) 2419 { 2420 struct nvkm_subdev *subdev = &gr->base.engine.subdev; 2421 struct nvkm_device *device = subdev->device; 2422 const struct firmware *fw; 2423 char f[32]; 2424 int ret; 2425 2426 snprintf(f, sizeof(f), "nouveau/nv%02x_%s", device->chipset, name); 2427 ret = request_firmware(&fw, f, device->dev); 2428 if (ret) { 2429 snprintf(f, sizeof(f), "nouveau/%s", name); 2430 ret = request_firmware(&fw, f, device->dev); 2431 if (ret) { 2432 nvkm_error(subdev, "failed to load %s\n", name); 2433 return ret; 2434 } 2435 } 2436 2437 blob->size = fw->size; 2438 blob->data = kmemdup(fw->data, blob->size, GFP_KERNEL); 2439 release_firmware(fw); 2440 return (blob->data != NULL) ? 0 : -ENOMEM; 2441 } 2442 2443 int 2444 gf100_gr_load(struct gf100_gr *gr, int ver, const struct gf100_gr_fwif *fwif) 2445 { 2446 struct nvkm_device *device = gr->base.engine.subdev.device; 2447 2448 if (!nvkm_boolopt(device->cfgopt, "NvGrUseFW", false)) 2449 return -EINVAL; 2450 2451 if (gf100_gr_load_fw(gr, "fuc409c", &gr->fecs.inst) || 2452 gf100_gr_load_fw(gr, "fuc409d", &gr->fecs.data) || 2453 gf100_gr_load_fw(gr, "fuc41ac", &gr->gpccs.inst) || 2454 gf100_gr_load_fw(gr, "fuc41ad", &gr->gpccs.data)) 2455 return -ENOENT; 2456 2457 gr->firmware = true; 2458 return 0; 2459 } 2460 2461 static const struct gf100_gr_fwif 2462 gf100_gr_fwif[] = { 2463 { -1, gf100_gr_load, &gf100_gr }, 2464 { -1, gf100_gr_nofw, &gf100_gr }, 2465 {} 2466 }; 2467 2468 int 2469 gf100_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) 2470 { 2471 return gf100_gr_new_(gf100_gr_fwif, device, index, pgr); 2472 } 2473