1 /* $NetBSD: nouveau_nvkm_subdev_fb_ramnv50.c,v 1.3 2021/12/18 23:45:39 riastradh Exp $ */ 2 3 /* 4 * Copyright 2013 Red Hat Inc. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Ben Skeggs 25 */ 26 #include <sys/cdefs.h> 27 __KERNEL_RCSID(0, "$NetBSD: nouveau_nvkm_subdev_fb_ramnv50.c,v 1.3 2021/12/18 23:45:39 riastradh Exp $"); 28 29 #define nv50_ram(p) container_of((p), struct nv50_ram, base) 30 #include "ram.h" 31 #include "ramseq.h" 32 #include "nv50.h" 33 34 #include <core/option.h> 35 #include <subdev/bios.h> 36 #include <subdev/bios/perf.h> 37 #include <subdev/bios/pll.h> 38 #include <subdev/bios/rammap.h> 39 #include <subdev/bios/timing.h> 40 #include <subdev/clk/pll.h> 41 #include <subdev/gpio.h> 42 43 struct nv50_ramseq { 44 struct hwsq base; 45 struct hwsq_reg r_0x002504; 46 struct hwsq_reg r_0x004008; 47 struct hwsq_reg r_0x00400c; 48 struct hwsq_reg r_0x00c040; 49 struct hwsq_reg r_0x100200; 50 struct hwsq_reg r_0x100210; 51 struct hwsq_reg r_0x10021c; 52 struct hwsq_reg r_0x1002d0; 53 struct hwsq_reg r_0x1002d4; 54 struct hwsq_reg r_0x1002dc; 55 struct hwsq_reg r_0x10053c; 56 struct hwsq_reg r_0x1005a0; 57 struct hwsq_reg r_0x1005a4; 58 struct hwsq_reg r_0x100710; 59 struct hwsq_reg r_0x100714; 60 struct hwsq_reg r_0x100718; 61 struct hwsq_reg r_0x10071c; 62 struct hwsq_reg r_0x100da0; 63 struct hwsq_reg r_0x100e20; 64 struct hwsq_reg r_0x100e24; 65 struct hwsq_reg r_0x611200; 66 struct hwsq_reg r_timing[9]; 67 struct hwsq_reg r_mr[4]; 68 struct hwsq_reg r_gpio[4]; 69 }; 70 71 struct nv50_ram { 72 struct nvkm_ram base; 73 struct nv50_ramseq hwsq; 74 }; 75 76 #define T(t) cfg->timing_10_##t 77 static int 78 nv50_ram_timing_calc(struct nv50_ram *ram, u32 *timing) 79 { 80 struct nvbios_ramcfg *cfg = &ram->base.target.bios; 81 struct nvkm_subdev *subdev = &ram->base.fb->subdev; 82 struct nvkm_device *device = subdev->device; 83 u32 cur2, cur4, cur7, cur8; 84 u8 unkt3b; 85 86 cur2 = nvkm_rd32(device, 0x100228); 87 cur4 = nvkm_rd32(device, 0x100230); 88 cur7 = nvkm_rd32(device, 0x10023c); 89 cur8 = nvkm_rd32(device, 0x100240); 90 91 switch ((!T(CWL)) * ram->base.type) { 92 case NVKM_RAM_TYPE_DDR2: 93 T(CWL) = T(CL) - 1; 94 break; 95 case NVKM_RAM_TYPE_GDDR3: 96 T(CWL) = ((cur2 & 0xff000000) >> 24) + 1; 97 break; 98 } 99 100 /* XXX: N=1 is not proper statistics */ 101 if (device->chipset == 0xa0) { 102 unkt3b = 0x19 + ram->base.next->bios.rammap_00_16_40; 103 timing[6] = (0x2d + T(CL) - T(CWL) + 104 ram->base.next->bios.rammap_00_16_40) << 16 | 105 T(CWL) << 8 | 106 (0x2f + T(CL) - T(CWL)); 107 } else { 108 unkt3b = 0x16; 109 timing[6] = (0x2b + T(CL) - T(CWL)) << 16 | 110 max_t(s8, T(CWL) - 2, 1) << 8 | 111 (0x2e + T(CL) - T(CWL)); 112 } 113 114 timing[0] = (T(RP) << 24 | T(RAS) << 16 | T(RFC) << 8 | T(RC)); 115 timing[1] = (T(WR) + 1 + T(CWL)) << 24 | 116 max_t(u8, T(18), 1) << 16 | 117 (T(WTR) + 1 + T(CWL)) << 8 | 118 (3 + T(CL) - T(CWL)); 119 timing[2] = (T(CWL) - 1) << 24 | 120 (T(RRD) << 16) | 121 (T(RCDWR) << 8) | 122 T(RCDRD); 123 timing[3] = (unkt3b - 2 + T(CL)) << 24 | 124 unkt3b << 16 | 125 (T(CL) - 1) << 8 | 126 (T(CL) - 1); 127 timing[4] = (cur4 & 0xffff0000) | 128 T(13) << 8 | 129 T(13); 130 timing[5] = T(RFC) << 24 | 131 max_t(u8, T(RCDRD), T(RCDWR)) << 16 | 132 T(RP); 133 /* Timing 6 is already done above */ 134 timing[7] = (cur7 & 0xff00ffff) | (T(CL) - 1) << 16; 135 timing[8] = (cur8 & 0xffffff00); 136 137 /* XXX: P.version == 1 only has DDR2 and GDDR3? */ 138 if (ram->base.type == NVKM_RAM_TYPE_DDR2) { 139 timing[5] |= (T(CL) + 3) << 8; 140 timing[8] |= (T(CL) - 4); 141 } else 142 if (ram->base.type == NVKM_RAM_TYPE_GDDR3) { 143 timing[5] |= (T(CL) + 2) << 8; 144 timing[8] |= (T(CL) - 2); 145 } 146 147 nvkm_debug(subdev, " 220: %08x %08x %08x %08x\n", 148 timing[0], timing[1], timing[2], timing[3]); 149 nvkm_debug(subdev, " 230: %08x %08x %08x %08x\n", 150 timing[4], timing[5], timing[6], timing[7]); 151 nvkm_debug(subdev, " 240: %08x\n", timing[8]); 152 return 0; 153 } 154 155 static int 156 nv50_ram_timing_read(struct nv50_ram *ram, u32 *timing) 157 { 158 unsigned int i; 159 struct nvbios_ramcfg *cfg = &ram->base.target.bios; 160 struct nvkm_subdev *subdev = &ram->base.fb->subdev; 161 struct nvkm_device *device = subdev->device; 162 163 for (i = 0; i <= 8; i++) 164 timing[i] = nvkm_rd32(device, 0x100220 + (i * 4)); 165 166 /* Derive the bare minimum for the MR calculation to succeed */ 167 cfg->timing_ver = 0x10; 168 T(CL) = (timing[3] & 0xff) + 1; 169 170 switch (ram->base.type) { 171 case NVKM_RAM_TYPE_DDR2: 172 T(CWL) = T(CL) - 1; 173 break; 174 case NVKM_RAM_TYPE_GDDR3: 175 T(CWL) = ((timing[2] & 0xff000000) >> 24) + 1; 176 break; 177 default: 178 return -ENOSYS; 179 break; 180 } 181 182 T(WR) = ((timing[1] >> 24) & 0xff) - 1 - T(CWL); 183 184 return 0; 185 } 186 #undef T 187 188 static void 189 nvkm_sddr2_dll_reset(struct nv50_ramseq *hwsq) 190 { 191 ram_mask(hwsq, mr[0], 0x100, 0x100); 192 ram_mask(hwsq, mr[0], 0x100, 0x000); 193 ram_nsec(hwsq, 24000); 194 } 195 196 static void 197 nv50_ram_gpio(struct nv50_ramseq *hwsq, u8 tag, u32 val) 198 { 199 struct nvkm_gpio *gpio = hwsq->base.subdev->device->gpio; 200 struct dcb_gpio_func func; 201 u32 reg, sh, gpio_val; 202 int ret; 203 204 if (nvkm_gpio_get(gpio, 0, tag, DCB_GPIO_UNUSED) != val) { 205 ret = nvkm_gpio_find(gpio, 0, tag, DCB_GPIO_UNUSED, &func); 206 if (ret) 207 return; 208 209 reg = func.line >> 3; 210 sh = (func.line & 0x7) << 2; 211 gpio_val = ram_rd32(hwsq, gpio[reg]); 212 213 if (gpio_val & (8 << sh)) 214 val = !val; 215 if (!(func.log[1] & 1)) 216 val = !val; 217 218 ram_mask(hwsq, gpio[reg], (0x3 << sh), ((val | 0x2) << sh)); 219 ram_nsec(hwsq, 20000); 220 } 221 } 222 223 static int 224 nv50_ram_calc(struct nvkm_ram *base, u32 freq) 225 { 226 struct nv50_ram *ram = nv50_ram(base); 227 struct nv50_ramseq *hwsq = &ram->hwsq; 228 struct nvkm_subdev *subdev = &ram->base.fb->subdev; 229 struct nvkm_bios *bios = subdev->device->bios; 230 struct nvbios_perfE perfE; 231 struct nvbios_pll mpll; 232 struct nvkm_ram_data *next; 233 u8 ver, hdr, cnt, len, strap, size; 234 u32 data; 235 u32 r100da0, r004008, unk710, unk714, unk718, unk71c; 236 int N1, M1, N2, M2, P; 237 int ret, i; 238 u32 timing[9]; 239 240 next = &ram->base.target; 241 next->freq = freq; 242 ram->base.next = next; 243 244 /* lookup closest matching performance table entry for frequency */ 245 i = 0; 246 do { 247 data = nvbios_perfEp(bios, i++, &ver, &hdr, &cnt, 248 &size, &perfE); 249 if (!data || (ver < 0x25 || ver >= 0x40) || 250 (size < 2)) { 251 nvkm_error(subdev, "invalid/missing perftab entry\n"); 252 return -EINVAL; 253 } 254 } while (perfE.memory < freq); 255 256 nvbios_rammapEp_from_perf(bios, data, hdr, &next->bios); 257 258 /* locate specific data set for the attached memory */ 259 strap = nvbios_ramcfg_index(subdev); 260 if (strap >= cnt) { 261 nvkm_error(subdev, "invalid ramcfg strap\n"); 262 return -EINVAL; 263 } 264 265 data = nvbios_rammapSp_from_perf(bios, data + hdr, size, strap, 266 &next->bios); 267 if (!data) { 268 nvkm_error(subdev, "invalid/missing rammap entry "); 269 return -EINVAL; 270 } 271 272 /* lookup memory timings, if bios says they're present */ 273 if (next->bios.ramcfg_timing != 0xff) { 274 data = nvbios_timingEp(bios, next->bios.ramcfg_timing, 275 &ver, &hdr, &cnt, &len, &next->bios); 276 if (!data || ver != 0x10 || hdr < 0x12) { 277 nvkm_error(subdev, "invalid/missing timing entry " 278 "%02x %04x %02x %02x\n", 279 strap, data, ver, hdr); 280 return -EINVAL; 281 } 282 nv50_ram_timing_calc(ram, timing); 283 } else { 284 nv50_ram_timing_read(ram, timing); 285 } 286 287 ret = ram_init(hwsq, subdev); 288 if (ret) 289 return ret; 290 291 /* Determine ram-specific MR values */ 292 ram->base.mr[0] = ram_rd32(hwsq, mr[0]); 293 ram->base.mr[1] = ram_rd32(hwsq, mr[1]); 294 ram->base.mr[2] = ram_rd32(hwsq, mr[2]); 295 296 switch (ram->base.type) { 297 case NVKM_RAM_TYPE_GDDR3: 298 ret = nvkm_gddr3_calc(&ram->base); 299 break; 300 default: 301 ret = -ENOSYS; 302 break; 303 } 304 305 if (ret) { 306 nvkm_error(subdev, "Could not calculate MR\n"); 307 return ret; 308 } 309 310 if (subdev->device->chipset <= 0x96 && !next->bios.ramcfg_00_03_02) 311 ram_mask(hwsq, 0x100710, 0x00000200, 0x00000000); 312 313 /* Always disable this bit during reclock */ 314 ram_mask(hwsq, 0x100200, 0x00000800, 0x00000000); 315 316 ram_wait_vblank(hwsq); 317 ram_wr32(hwsq, 0x611200, 0x00003300); 318 ram_wr32(hwsq, 0x002504, 0x00000001); /* block fifo */ 319 ram_nsec(hwsq, 8000); 320 ram_setf(hwsq, 0x10, 0x00); /* disable fb */ 321 ram_wait(hwsq, 0x00, 0x01); /* wait for fb disabled */ 322 ram_nsec(hwsq, 2000); 323 324 if (next->bios.timing_10_ODT) 325 nv50_ram_gpio(hwsq, 0x2e, 1); 326 327 ram_wr32(hwsq, 0x1002d4, 0x00000001); /* precharge */ 328 ram_wr32(hwsq, 0x1002d0, 0x00000001); /* refresh */ 329 ram_wr32(hwsq, 0x1002d0, 0x00000001); /* refresh */ 330 ram_wr32(hwsq, 0x100210, 0x00000000); /* disable auto-refresh */ 331 ram_wr32(hwsq, 0x1002dc, 0x00000001); /* enable self-refresh */ 332 333 ret = nvbios_pll_parse(bios, 0x004008, &mpll); 334 mpll.vco2.max_freq = 0; 335 if (ret >= 0) { 336 ret = nv04_pll_calc(subdev, &mpll, freq, 337 &N1, &M1, &N2, &M2, &P); 338 if (ret <= 0) 339 ret = -EINVAL; 340 } 341 342 if (ret < 0) 343 return ret; 344 345 /* XXX: 750MHz seems rather arbitrary */ 346 if (freq <= 750000) { 347 r100da0 = 0x00000010; 348 r004008 = 0x90000000; 349 } else { 350 r100da0 = 0x00000000; 351 r004008 = 0x80000000; 352 } 353 354 r004008 |= (mpll.bias_p << 19) | (P << 22) | (P << 16); 355 356 ram_mask(hwsq, 0x00c040, 0xc000c000, 0x0000c000); 357 /* XXX: Is rammap_00_16_40 the DLL bit we've seen in GT215? Why does 358 * it have a different rammap bit from DLLoff? */ 359 ram_mask(hwsq, 0x004008, 0x00004200, 0x00000200 | 360 next->bios.rammap_00_16_40 << 14); 361 ram_mask(hwsq, 0x00400c, 0x0000ffff, (N1 << 8) | M1); 362 ram_mask(hwsq, 0x004008, 0x91ff0000, r004008); 363 364 /* XXX: GDDR3 only? */ 365 if (subdev->device->chipset >= 0x92) 366 ram_wr32(hwsq, 0x100da0, r100da0); 367 368 nv50_ram_gpio(hwsq, 0x18, !next->bios.ramcfg_FBVDDQ); 369 ram_nsec(hwsq, 64000); /*XXX*/ 370 ram_nsec(hwsq, 32000); /*XXX*/ 371 372 ram_mask(hwsq, 0x004008, 0x00002200, 0x00002000); 373 374 ram_wr32(hwsq, 0x1002dc, 0x00000000); /* disable self-refresh */ 375 ram_wr32(hwsq, 0x1002d4, 0x00000001); /* disable self-refresh */ 376 ram_wr32(hwsq, 0x100210, 0x80000000); /* enable auto-refresh */ 377 378 ram_nsec(hwsq, 12000); 379 380 switch (ram->base.type) { 381 case NVKM_RAM_TYPE_DDR2: 382 ram_nuke(hwsq, mr[0]); /* force update */ 383 ram_mask(hwsq, mr[0], 0x000, 0x000); 384 break; 385 case NVKM_RAM_TYPE_GDDR3: 386 ram_nuke(hwsq, mr[1]); /* force update */ 387 ram_wr32(hwsq, mr[1], ram->base.mr[1]); 388 ram_nuke(hwsq, mr[0]); /* force update */ 389 ram_wr32(hwsq, mr[0], ram->base.mr[0]); 390 break; 391 default: 392 break; 393 } 394 395 ram_mask(hwsq, timing[3], 0xffffffff, timing[3]); 396 ram_mask(hwsq, timing[1], 0xffffffff, timing[1]); 397 ram_mask(hwsq, timing[6], 0xffffffff, timing[6]); 398 ram_mask(hwsq, timing[7], 0xffffffff, timing[7]); 399 ram_mask(hwsq, timing[8], 0xffffffff, timing[8]); 400 ram_mask(hwsq, timing[0], 0xffffffff, timing[0]); 401 ram_mask(hwsq, timing[2], 0xffffffff, timing[2]); 402 ram_mask(hwsq, timing[4], 0xffffffff, timing[4]); 403 ram_mask(hwsq, timing[5], 0xffffffff, timing[5]); 404 405 if (!next->bios.ramcfg_00_03_02) 406 ram_mask(hwsq, 0x10021c, 0x00010000, 0x00000000); 407 ram_mask(hwsq, 0x100200, 0x00001000, !next->bios.ramcfg_00_04_02 << 12); 408 409 /* XXX: A lot of this could be "chipset"/"ram type" specific stuff */ 410 unk710 = ram_rd32(hwsq, 0x100710) & ~0x00000100; 411 unk714 = ram_rd32(hwsq, 0x100714) & ~0xf0000020; 412 unk718 = ram_rd32(hwsq, 0x100718) & ~0x00000100; 413 unk71c = ram_rd32(hwsq, 0x10071c) & ~0x00000100; 414 if (subdev->device->chipset <= 0x96) { 415 unk710 &= ~0x0000006e; 416 unk714 &= ~0x00000100; 417 418 if (!next->bios.ramcfg_00_03_08) 419 unk710 |= 0x00000060; 420 if (!next->bios.ramcfg_FBVDDQ) 421 unk714 |= 0x00000100; 422 if ( next->bios.ramcfg_00_04_04) 423 unk710 |= 0x0000000e; 424 } else { 425 unk710 &= ~0x00000001; 426 427 if (!next->bios.ramcfg_00_03_08) 428 unk710 |= 0x00000001; 429 } 430 431 if ( next->bios.ramcfg_00_03_01) 432 unk71c |= 0x00000100; 433 if ( next->bios.ramcfg_00_03_02) 434 unk710 |= 0x00000100; 435 if (!next->bios.ramcfg_00_03_08) 436 unk714 |= 0x00000020; 437 if ( next->bios.ramcfg_00_04_04) 438 unk714 |= 0x70000000; 439 if ( next->bios.ramcfg_00_04_20) 440 unk718 |= 0x00000100; 441 442 ram_mask(hwsq, 0x100714, 0xffffffff, unk714); 443 ram_mask(hwsq, 0x10071c, 0xffffffff, unk71c); 444 ram_mask(hwsq, 0x100718, 0xffffffff, unk718); 445 ram_mask(hwsq, 0x100710, 0xffffffff, unk710); 446 447 /* XXX: G94 does not even test these regs in trace. Harmless we do it, 448 * but why is it omitted? */ 449 if (next->bios.rammap_00_16_20) { 450 ram_wr32(hwsq, 0x1005a0, next->bios.ramcfg_00_07 << 16 | 451 next->bios.ramcfg_00_06 << 8 | 452 next->bios.ramcfg_00_05); 453 ram_wr32(hwsq, 0x1005a4, next->bios.ramcfg_00_09 << 8 | 454 next->bios.ramcfg_00_08); 455 ram_mask(hwsq, 0x10053c, 0x00001000, 0x00000000); 456 } else { 457 ram_mask(hwsq, 0x10053c, 0x00001000, 0x00001000); 458 } 459 ram_mask(hwsq, mr[1], 0xffffffff, ram->base.mr[1]); 460 461 if (!next->bios.timing_10_ODT) 462 nv50_ram_gpio(hwsq, 0x2e, 0); 463 464 /* Reset DLL */ 465 if (!next->bios.ramcfg_DLLoff) 466 nvkm_sddr2_dll_reset(hwsq); 467 468 ram_setf(hwsq, 0x10, 0x01); /* enable fb */ 469 ram_wait(hwsq, 0x00, 0x00); /* wait for fb enabled */ 470 ram_wr32(hwsq, 0x611200, 0x00003330); 471 ram_wr32(hwsq, 0x002504, 0x00000000); /* un-block fifo */ 472 473 if (next->bios.rammap_00_17_02) 474 ram_mask(hwsq, 0x100200, 0x00000800, 0x00000800); 475 if (!next->bios.rammap_00_16_40) 476 ram_mask(hwsq, 0x004008, 0x00004000, 0x00000000); 477 if (next->bios.ramcfg_00_03_02) 478 ram_mask(hwsq, 0x10021c, 0x00010000, 0x00010000); 479 if (subdev->device->chipset <= 0x96 && next->bios.ramcfg_00_03_02) 480 ram_mask(hwsq, 0x100710, 0x00000200, 0x00000200); 481 482 return 0; 483 } 484 485 static int 486 nv50_ram_prog(struct nvkm_ram *base) 487 { 488 struct nv50_ram *ram = nv50_ram(base); 489 struct nvkm_device *device = ram->base.fb->subdev.device; 490 ram_exec(&ram->hwsq, nvkm_boolopt(device->cfgopt, "NvMemExec", true)); 491 return 0; 492 } 493 494 static void 495 nv50_ram_tidy(struct nvkm_ram *base) 496 { 497 struct nv50_ram *ram = nv50_ram(base); 498 ram_exec(&ram->hwsq, false); 499 } 500 501 static const struct nvkm_ram_func 502 nv50_ram_func = { 503 .calc = nv50_ram_calc, 504 .prog = nv50_ram_prog, 505 .tidy = nv50_ram_tidy, 506 }; 507 508 static u32 509 nv50_fb_vram_rblock(struct nvkm_ram *ram) 510 { 511 struct nvkm_subdev *subdev = &ram->fb->subdev; 512 struct nvkm_device *device = subdev->device; 513 int colbits, rowbitsa, rowbitsb, banks; 514 u64 rowsize, predicted; 515 u32 r0, r4, rt, rblock_size; 516 517 r0 = nvkm_rd32(device, 0x100200); 518 r4 = nvkm_rd32(device, 0x100204); 519 rt = nvkm_rd32(device, 0x100250); 520 nvkm_debug(subdev, "memcfg %08x %08x %08x %08x\n", 521 r0, r4, rt, nvkm_rd32(device, 0x001540)); 522 523 colbits = (r4 & 0x0000f000) >> 12; 524 rowbitsa = ((r4 & 0x000f0000) >> 16) + 8; 525 rowbitsb = ((r4 & 0x00f00000) >> 20) + 8; 526 banks = 1 << (((r4 & 0x03000000) >> 24) + 2); 527 528 rowsize = ram->parts * banks * (1 << colbits) * 8; 529 predicted = rowsize << rowbitsa; 530 if (r0 & 0x00000004) 531 predicted += rowsize << rowbitsb; 532 533 if (predicted != ram->size) { 534 nvkm_warn(subdev, "memory controller reports %d MiB VRAM\n", 535 (u32)(ram->size >> 20)); 536 } 537 538 rblock_size = rowsize; 539 if (rt & 1) 540 rblock_size *= 3; 541 542 nvkm_debug(subdev, "rblock %d bytes\n", rblock_size); 543 return rblock_size; 544 } 545 546 int 547 nv50_ram_ctor(const struct nvkm_ram_func *func, 548 struct nvkm_fb *fb, struct nvkm_ram *ram) 549 { 550 struct nvkm_device *device = fb->subdev.device; 551 struct nvkm_bios *bios = device->bios; 552 const u32 rsvd_head = ( 256 * 1024); /* vga memory */ 553 const u32 rsvd_tail = (1024 * 1024); /* vbios etc */ 554 u64 size = nvkm_rd32(device, 0x10020c); 555 enum nvkm_ram_type type = NVKM_RAM_TYPE_UNKNOWN; 556 int ret; 557 558 switch (nvkm_rd32(device, 0x100714) & 0x00000007) { 559 case 0: type = NVKM_RAM_TYPE_DDR1; break; 560 case 1: 561 if (nvkm_fb_bios_memtype(bios) == NVKM_RAM_TYPE_DDR3) 562 type = NVKM_RAM_TYPE_DDR3; 563 else 564 type = NVKM_RAM_TYPE_DDR2; 565 break; 566 case 2: type = NVKM_RAM_TYPE_GDDR3; break; 567 case 3: type = NVKM_RAM_TYPE_GDDR4; break; 568 case 4: type = NVKM_RAM_TYPE_GDDR5; break; 569 default: 570 break; 571 } 572 573 size = (size & 0x000000ff) << 32 | (size & 0xffffff00); 574 575 ret = nvkm_ram_ctor(func, fb, type, size, ram); 576 if (ret) 577 return ret; 578 579 ram->part_mask = (nvkm_rd32(device, 0x001540) & 0x00ff0000) >> 16; 580 ram->parts = hweight8(ram->part_mask); 581 ram->ranks = (nvkm_rd32(device, 0x100200) & 0x4) ? 2 : 1; 582 nvkm_mm_fini(&ram->vram); 583 584 return nvkm_mm_init(&ram->vram, NVKM_RAM_MM_NORMAL, 585 rsvd_head >> NVKM_RAM_MM_SHIFT, 586 (size - rsvd_head - rsvd_tail) >> NVKM_RAM_MM_SHIFT, 587 nv50_fb_vram_rblock(ram) >> NVKM_RAM_MM_SHIFT); 588 } 589 590 int 591 nv50_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram) 592 { 593 struct nv50_ram *ram; 594 int ret, i; 595 596 if (!(ram = kzalloc(sizeof(*ram), GFP_KERNEL))) 597 return -ENOMEM; 598 *pram = &ram->base; 599 600 ret = nv50_ram_ctor(&nv50_ram_func, fb, &ram->base); 601 if (ret) 602 return ret; 603 604 ram->hwsq.r_0x002504 = hwsq_reg(0x002504); 605 ram->hwsq.r_0x00c040 = hwsq_reg(0x00c040); 606 ram->hwsq.r_0x004008 = hwsq_reg(0x004008); 607 ram->hwsq.r_0x00400c = hwsq_reg(0x00400c); 608 ram->hwsq.r_0x100200 = hwsq_reg(0x100200); 609 ram->hwsq.r_0x100210 = hwsq_reg(0x100210); 610 ram->hwsq.r_0x10021c = hwsq_reg(0x10021c); 611 ram->hwsq.r_0x1002d0 = hwsq_reg(0x1002d0); 612 ram->hwsq.r_0x1002d4 = hwsq_reg(0x1002d4); 613 ram->hwsq.r_0x1002dc = hwsq_reg(0x1002dc); 614 ram->hwsq.r_0x10053c = hwsq_reg(0x10053c); 615 ram->hwsq.r_0x1005a0 = hwsq_reg(0x1005a0); 616 ram->hwsq.r_0x1005a4 = hwsq_reg(0x1005a4); 617 ram->hwsq.r_0x100710 = hwsq_reg(0x100710); 618 ram->hwsq.r_0x100714 = hwsq_reg(0x100714); 619 ram->hwsq.r_0x100718 = hwsq_reg(0x100718); 620 ram->hwsq.r_0x10071c = hwsq_reg(0x10071c); 621 ram->hwsq.r_0x100da0 = hwsq_stride(0x100da0, 4, ram->base.part_mask); 622 ram->hwsq.r_0x100e20 = hwsq_reg(0x100e20); 623 ram->hwsq.r_0x100e24 = hwsq_reg(0x100e24); 624 ram->hwsq.r_0x611200 = hwsq_reg(0x611200); 625 626 for (i = 0; i < 9; i++) 627 ram->hwsq.r_timing[i] = hwsq_reg(0x100220 + (i * 0x04)); 628 629 if (ram->base.ranks > 1) { 630 ram->hwsq.r_mr[0] = hwsq_reg2(0x1002c0, 0x1002c8); 631 ram->hwsq.r_mr[1] = hwsq_reg2(0x1002c4, 0x1002cc); 632 ram->hwsq.r_mr[2] = hwsq_reg2(0x1002e0, 0x1002e8); 633 ram->hwsq.r_mr[3] = hwsq_reg2(0x1002e4, 0x1002ec); 634 } else { 635 ram->hwsq.r_mr[0] = hwsq_reg(0x1002c0); 636 ram->hwsq.r_mr[1] = hwsq_reg(0x1002c4); 637 ram->hwsq.r_mr[2] = hwsq_reg(0x1002e0); 638 ram->hwsq.r_mr[3] = hwsq_reg(0x1002e4); 639 } 640 641 ram->hwsq.r_gpio[0] = hwsq_reg(0x00e104); 642 ram->hwsq.r_gpio[1] = hwsq_reg(0x00e108); 643 ram->hwsq.r_gpio[2] = hwsq_reg(0x00e120); 644 ram->hwsq.r_gpio[3] = hwsq_reg(0x00e124); 645 646 return 0; 647 } 648