Home | History | Annotate | Line # | Download | only in clk
      1 /*	$NetBSD: nouveau_nvkm_subdev_clk_gk104.c,v 1.3 2021/12/18 23:45:39 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright 2013 Red Hat Inc.
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the "Software"),
      8  * to deal in the Software without restriction, including without limitation
      9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     10  * and/or sell copies of the Software, and to permit persons to whom the
     11  * Software is furnished to do so, subject to the following conditions:
     12  *
     13  * The above copyright notice and this permission notice shall be included in
     14  * all copies or substantial portions of the Software.
     15  *
     16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     22  * OTHER DEALINGS IN THE SOFTWARE.
     23  *
     24  * Authors: Ben Skeggs
     25  */
     26 #include <sys/cdefs.h>
     27 __KERNEL_RCSID(0, "$NetBSD: nouveau_nvkm_subdev_clk_gk104.c,v 1.3 2021/12/18 23:45:39 riastradh Exp $");
     28 
     29 #define gk104_clk(p) container_of((p), struct gk104_clk, base)
     30 #include "priv.h"
     31 #include "pll.h"
     32 
     33 #include <subdev/timer.h>
     34 #include <subdev/bios.h>
     35 #include <subdev/bios/pll.h>
     36 
     37 struct gk104_clk_info {
     38 	u32 freq;
     39 	u32 ssel;
     40 	u32 mdiv;
     41 	u32 dsrc;
     42 	u32 ddiv;
     43 	u32 coef;
     44 };
     45 
     46 struct gk104_clk {
     47 	struct nvkm_clk base;
     48 	struct gk104_clk_info eng[16];
     49 };
     50 
     51 static u32 read_div(struct gk104_clk *, int, u32, u32);
     52 static u32 read_pll(struct gk104_clk *, u32);
     53 
     54 static u32
     55 read_vco(struct gk104_clk *clk, u32 dsrc)
     56 {
     57 	struct nvkm_device *device = clk->base.subdev.device;
     58 	u32 ssrc = nvkm_rd32(device, dsrc);
     59 	if (!(ssrc & 0x00000100))
     60 		return read_pll(clk, 0x00e800);
     61 	return read_pll(clk, 0x00e820);
     62 }
     63 
     64 static u32
     65 read_pll(struct gk104_clk *clk, u32 pll)
     66 {
     67 	struct nvkm_device *device = clk->base.subdev.device;
     68 	u32 ctrl = nvkm_rd32(device, pll + 0x00);
     69 	u32 coef = nvkm_rd32(device, pll + 0x04);
     70 	u32 P = (coef & 0x003f0000) >> 16;
     71 	u32 N = (coef & 0x0000ff00) >> 8;
     72 	u32 M = (coef & 0x000000ff) >> 0;
     73 	u32 sclk;
     74 	u16 fN = 0xf000;
     75 
     76 	if (!(ctrl & 0x00000001))
     77 		return 0;
     78 
     79 	switch (pll) {
     80 	case 0x00e800:
     81 	case 0x00e820:
     82 		sclk = device->crystal;
     83 		P = 1;
     84 		break;
     85 	case 0x132000:
     86 		sclk = read_pll(clk, 0x132020);
     87 		P = (coef & 0x10000000) ? 2 : 1;
     88 		break;
     89 	case 0x132020:
     90 		sclk = read_div(clk, 0, 0x137320, 0x137330);
     91 		fN   = nvkm_rd32(device, pll + 0x10) >> 16;
     92 		break;
     93 	case 0x137000:
     94 	case 0x137020:
     95 	case 0x137040:
     96 	case 0x1370e0:
     97 		sclk = read_div(clk, (pll & 0xff) / 0x20, 0x137120, 0x137140);
     98 		break;
     99 	default:
    100 		return 0;
    101 	}
    102 
    103 	if (P == 0)
    104 		P = 1;
    105 
    106 	sclk = (sclk * N) + (((u16)(fN + 4096) * sclk) >> 13);
    107 	return sclk / (M * P);
    108 }
    109 
    110 static u32
    111 read_div(struct gk104_clk *clk, int doff, u32 dsrc, u32 dctl)
    112 {
    113 	struct nvkm_device *device = clk->base.subdev.device;
    114 	u32 ssrc = nvkm_rd32(device, dsrc + (doff * 4));
    115 	u32 sctl = nvkm_rd32(device, dctl + (doff * 4));
    116 
    117 	switch (ssrc & 0x00000003) {
    118 	case 0:
    119 		if ((ssrc & 0x00030000) != 0x00030000)
    120 			return device->crystal;
    121 		return 108000;
    122 	case 2:
    123 		return 100000;
    124 	case 3:
    125 		if (sctl & 0x80000000) {
    126 			u32 sclk = read_vco(clk, dsrc + (doff * 4));
    127 			u32 sdiv = (sctl & 0x0000003f) + 2;
    128 			return (sclk * 2) / sdiv;
    129 		}
    130 
    131 		return read_vco(clk, dsrc + (doff * 4));
    132 	default:
    133 		return 0;
    134 	}
    135 }
    136 
    137 static u32
    138 read_mem(struct gk104_clk *clk)
    139 {
    140 	struct nvkm_device *device = clk->base.subdev.device;
    141 	switch (nvkm_rd32(device, 0x1373f4) & 0x0000000f) {
    142 	case 1: return read_pll(clk, 0x132020);
    143 	case 2: return read_pll(clk, 0x132000);
    144 	default:
    145 		return 0;
    146 	}
    147 }
    148 
    149 static u32
    150 read_clk(struct gk104_clk *clk, int idx)
    151 {
    152 	struct nvkm_device *device = clk->base.subdev.device;
    153 	u32 sctl = nvkm_rd32(device, 0x137250 + (idx * 4));
    154 	u32 sclk, sdiv;
    155 
    156 	if (idx < 7) {
    157 		u32 ssel = nvkm_rd32(device, 0x137100);
    158 		if (ssel & (1 << idx)) {
    159 			sclk = read_pll(clk, 0x137000 + (idx * 0x20));
    160 			sdiv = 1;
    161 		} else {
    162 			sclk = read_div(clk, idx, 0x137160, 0x1371d0);
    163 			sdiv = 0;
    164 		}
    165 	} else {
    166 		u32 ssrc = nvkm_rd32(device, 0x137160 + (idx * 0x04));
    167 		if ((ssrc & 0x00000003) == 0x00000003) {
    168 			sclk = read_div(clk, idx, 0x137160, 0x1371d0);
    169 			if (ssrc & 0x00000100) {
    170 				if (ssrc & 0x40000000)
    171 					sclk = read_pll(clk, 0x1370e0);
    172 				sdiv = 1;
    173 			} else {
    174 				sdiv = 0;
    175 			}
    176 		} else {
    177 			sclk = read_div(clk, idx, 0x137160, 0x1371d0);
    178 			sdiv = 0;
    179 		}
    180 	}
    181 
    182 	if (sctl & 0x80000000) {
    183 		if (sdiv)
    184 			sdiv = ((sctl & 0x00003f00) >> 8) + 2;
    185 		else
    186 			sdiv = ((sctl & 0x0000003f) >> 0) + 2;
    187 		return (sclk * 2) / sdiv;
    188 	}
    189 
    190 	return sclk;
    191 }
    192 
    193 static int
    194 gk104_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
    195 {
    196 	struct gk104_clk *clk = gk104_clk(base);
    197 	struct nvkm_subdev *subdev = &clk->base.subdev;
    198 	struct nvkm_device *device = subdev->device;
    199 
    200 	switch (src) {
    201 	case nv_clk_src_crystal:
    202 		return device->crystal;
    203 	case nv_clk_src_href:
    204 		return 100000;
    205 	case nv_clk_src_mem:
    206 		return read_mem(clk);
    207 	case nv_clk_src_gpc:
    208 		return read_clk(clk, 0x00);
    209 	case nv_clk_src_rop:
    210 		return read_clk(clk, 0x01);
    211 	case nv_clk_src_hubk07:
    212 		return read_clk(clk, 0x02);
    213 	case nv_clk_src_hubk06:
    214 		return read_clk(clk, 0x07);
    215 	case nv_clk_src_hubk01:
    216 		return read_clk(clk, 0x08);
    217 	case nv_clk_src_pmu:
    218 		return read_clk(clk, 0x0c);
    219 	case nv_clk_src_vdec:
    220 		return read_clk(clk, 0x0e);
    221 	default:
    222 		nvkm_error(subdev, "invalid clock source %d\n", src);
    223 		return -EINVAL;
    224 	}
    225 }
    226 
    227 static u32
    228 calc_div(struct gk104_clk *clk, int idx, u32 ref, u32 freq, u32 *ddiv)
    229 {
    230 	u32 div = min((ref * 2) / freq, (u32)65);
    231 	if (div < 2)
    232 		div = 2;
    233 
    234 	*ddiv = div - 2;
    235 	return (ref * 2) / div;
    236 }
    237 
    238 static u32
    239 calc_src(struct gk104_clk *clk, int idx, u32 freq, u32 *dsrc, u32 *ddiv)
    240 {
    241 	u32 sclk;
    242 
    243 	/* use one of the fixed frequencies if possible */
    244 	*ddiv = 0x00000000;
    245 	switch (freq) {
    246 	case  27000:
    247 	case 108000:
    248 		*dsrc = 0x00000000;
    249 		if (freq == 108000)
    250 			*dsrc |= 0x00030000;
    251 		return freq;
    252 	case 100000:
    253 		*dsrc = 0x00000002;
    254 		return freq;
    255 	default:
    256 		*dsrc = 0x00000003;
    257 		break;
    258 	}
    259 
    260 	/* otherwise, calculate the closest divider */
    261 	sclk = read_vco(clk, 0x137160 + (idx * 4));
    262 	if (idx < 7)
    263 		sclk = calc_div(clk, idx, sclk, freq, ddiv);
    264 	return sclk;
    265 }
    266 
    267 static u32
    268 calc_pll(struct gk104_clk *clk, int idx, u32 freq, u32 *coef)
    269 {
    270 	struct nvkm_subdev *subdev = &clk->base.subdev;
    271 	struct nvkm_bios *bios = subdev->device->bios;
    272 	struct nvbios_pll limits;
    273 	int N, M, P, ret;
    274 
    275 	ret = nvbios_pll_parse(bios, 0x137000 + (idx * 0x20), &limits);
    276 	if (ret)
    277 		return 0;
    278 
    279 	limits.refclk = read_div(clk, idx, 0x137120, 0x137140);
    280 	if (!limits.refclk)
    281 		return 0;
    282 
    283 	ret = gt215_pll_calc(subdev, &limits, freq, &N, NULL, &M, &P);
    284 	if (ret <= 0)
    285 		return 0;
    286 
    287 	*coef = (P << 16) | (N << 8) | M;
    288 	return ret;
    289 }
    290 
    291 static int
    292 calc_clk(struct gk104_clk *clk,
    293 	 struct nvkm_cstate *cstate, int idx, int dom)
    294 {
    295 	struct gk104_clk_info *info = &clk->eng[idx];
    296 	u32 freq = cstate->domain[dom];
    297 	u32 src0, div0, div1D, div1P = 0;
    298 	u32 clk0, clk1 = 0;
    299 
    300 	/* invalid clock domain */
    301 	if (!freq)
    302 		return 0;
    303 
    304 	/* first possible path, using only dividers */
    305 	clk0 = calc_src(clk, idx, freq, &src0, &div0);
    306 	clk0 = calc_div(clk, idx, clk0, freq, &div1D);
    307 
    308 	/* see if we can get any closer using PLLs */
    309 	if (clk0 != freq && (0x0000ff87 & (1 << idx))) {
    310 		if (idx <= 7)
    311 			clk1 = calc_pll(clk, idx, freq, &info->coef);
    312 		else
    313 			clk1 = cstate->domain[nv_clk_src_hubk06];
    314 		clk1 = calc_div(clk, idx, clk1, freq, &div1P);
    315 	}
    316 
    317 	/* select the method which gets closest to target freq */
    318 	if (abs((int)freq - clk0) <= abs((int)freq - clk1)) {
    319 		info->dsrc = src0;
    320 		if (div0) {
    321 			info->ddiv |= 0x80000000;
    322 			info->ddiv |= div0;
    323 		}
    324 		if (div1D) {
    325 			info->mdiv |= 0x80000000;
    326 			info->mdiv |= div1D;
    327 		}
    328 		info->ssel = 0;
    329 		info->freq = clk0;
    330 	} else {
    331 		if (div1P) {
    332 			info->mdiv |= 0x80000000;
    333 			info->mdiv |= div1P << 8;
    334 		}
    335 		info->ssel = (1 << idx);
    336 		info->dsrc = 0x40000100;
    337 		info->freq = clk1;
    338 	}
    339 
    340 	return 0;
    341 }
    342 
    343 static int
    344 gk104_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
    345 {
    346 	struct gk104_clk *clk = gk104_clk(base);
    347 	int ret;
    348 
    349 	if ((ret = calc_clk(clk, cstate, 0x00, nv_clk_src_gpc)) ||
    350 	    (ret = calc_clk(clk, cstate, 0x01, nv_clk_src_rop)) ||
    351 	    (ret = calc_clk(clk, cstate, 0x02, nv_clk_src_hubk07)) ||
    352 	    (ret = calc_clk(clk, cstate, 0x07, nv_clk_src_hubk06)) ||
    353 	    (ret = calc_clk(clk, cstate, 0x08, nv_clk_src_hubk01)) ||
    354 	    (ret = calc_clk(clk, cstate, 0x0c, nv_clk_src_pmu)) ||
    355 	    (ret = calc_clk(clk, cstate, 0x0e, nv_clk_src_vdec)))
    356 		return ret;
    357 
    358 	return 0;
    359 }
    360 
    361 static void
    362 gk104_clk_prog_0(struct gk104_clk *clk, int idx)
    363 {
    364 	struct gk104_clk_info *info = &clk->eng[idx];
    365 	struct nvkm_device *device = clk->base.subdev.device;
    366 	if (!info->ssel) {
    367 		nvkm_mask(device, 0x1371d0 + (idx * 0x04), 0x8000003f, info->ddiv);
    368 		nvkm_wr32(device, 0x137160 + (idx * 0x04), info->dsrc);
    369 	}
    370 }
    371 
    372 static void
    373 gk104_clk_prog_1_0(struct gk104_clk *clk, int idx)
    374 {
    375 	struct nvkm_device *device = clk->base.subdev.device;
    376 	nvkm_mask(device, 0x137100, (1 << idx), 0x00000000);
    377 	nvkm_msec(device, 2000,
    378 		if (!(nvkm_rd32(device, 0x137100) & (1 << idx)))
    379 			break;
    380 	);
    381 }
    382 
    383 static void
    384 gk104_clk_prog_1_1(struct gk104_clk *clk, int idx)
    385 {
    386 	struct nvkm_device *device = clk->base.subdev.device;
    387 	nvkm_mask(device, 0x137160 + (idx * 0x04), 0x00000100, 0x00000000);
    388 }
    389 
    390 static void
    391 gk104_clk_prog_2(struct gk104_clk *clk, int idx)
    392 {
    393 	struct gk104_clk_info *info = &clk->eng[idx];
    394 	struct nvkm_device *device = clk->base.subdev.device;
    395 	const u32 addr = 0x137000 + (idx * 0x20);
    396 	nvkm_mask(device, addr + 0x00, 0x00000004, 0x00000000);
    397 	nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000000);
    398 	if (info->coef) {
    399 		nvkm_wr32(device, addr + 0x04, info->coef);
    400 		nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000001);
    401 
    402 		/* Test PLL lock */
    403 		nvkm_mask(device, addr + 0x00, 0x00000010, 0x00000000);
    404 		nvkm_msec(device, 2000,
    405 			if (nvkm_rd32(device, addr + 0x00) & 0x00020000)
    406 				break;
    407 		);
    408 		nvkm_mask(device, addr + 0x00, 0x00000010, 0x00000010);
    409 
    410 		/* Enable sync mode */
    411 		nvkm_mask(device, addr + 0x00, 0x00000004, 0x00000004);
    412 	}
    413 }
    414 
    415 static void
    416 gk104_clk_prog_3(struct gk104_clk *clk, int idx)
    417 {
    418 	struct gk104_clk_info *info = &clk->eng[idx];
    419 	struct nvkm_device *device = clk->base.subdev.device;
    420 	if (info->ssel)
    421 		nvkm_mask(device, 0x137250 + (idx * 0x04), 0x00003f00, info->mdiv);
    422 	else
    423 		nvkm_mask(device, 0x137250 + (idx * 0x04), 0x0000003f, info->mdiv);
    424 }
    425 
    426 static void
    427 gk104_clk_prog_4_0(struct gk104_clk *clk, int idx)
    428 {
    429 	struct gk104_clk_info *info = &clk->eng[idx];
    430 	struct nvkm_device *device = clk->base.subdev.device;
    431 	if (info->ssel) {
    432 		nvkm_mask(device, 0x137100, (1 << idx), info->ssel);
    433 		nvkm_msec(device, 2000,
    434 			u32 tmp = nvkm_rd32(device, 0x137100) & (1 << idx);
    435 			if (tmp == info->ssel)
    436 				break;
    437 		);
    438 	}
    439 }
    440 
    441 static void
    442 gk104_clk_prog_4_1(struct gk104_clk *clk, int idx)
    443 {
    444 	struct gk104_clk_info *info = &clk->eng[idx];
    445 	struct nvkm_device *device = clk->base.subdev.device;
    446 	if (info->ssel) {
    447 		nvkm_mask(device, 0x137160 + (idx * 0x04), 0x40000000, 0x40000000);
    448 		nvkm_mask(device, 0x137160 + (idx * 0x04), 0x00000100, 0x00000100);
    449 	}
    450 }
    451 
    452 static int
    453 gk104_clk_prog(struct nvkm_clk *base)
    454 {
    455 	struct gk104_clk *clk = gk104_clk(base);
    456 	struct {
    457 		u32 mask;
    458 		void (*exec)(struct gk104_clk *, int);
    459 	} stage[] = {
    460 		{ 0x007f, gk104_clk_prog_0   }, /* div programming */
    461 		{ 0x007f, gk104_clk_prog_1_0 }, /* select div mode */
    462 		{ 0xff80, gk104_clk_prog_1_1 },
    463 		{ 0x00ff, gk104_clk_prog_2   }, /* (maybe) program pll */
    464 		{ 0xff80, gk104_clk_prog_3   }, /* final divider */
    465 		{ 0x007f, gk104_clk_prog_4_0 }, /* (maybe) select pll mode */
    466 		{ 0xff80, gk104_clk_prog_4_1 },
    467 	};
    468 	int i, j;
    469 
    470 	for (i = 0; i < ARRAY_SIZE(stage); i++) {
    471 		for (j = 0; j < ARRAY_SIZE(clk->eng); j++) {
    472 			if (!(stage[i].mask & (1 << j)))
    473 				continue;
    474 			if (!clk->eng[j].freq)
    475 				continue;
    476 			stage[i].exec(clk, j);
    477 		}
    478 	}
    479 
    480 	return 0;
    481 }
    482 
    483 static void
    484 gk104_clk_tidy(struct nvkm_clk *base)
    485 {
    486 	struct gk104_clk *clk = gk104_clk(base);
    487 	memset(clk->eng, 0x00, sizeof(clk->eng));
    488 }
    489 
    490 static const struct nvkm_clk_func
    491 gk104_clk = {
    492 	.read = gk104_clk_read,
    493 	.calc = gk104_clk_calc,
    494 	.prog = gk104_clk_prog,
    495 	.tidy = gk104_clk_tidy,
    496 	.domains = {
    497 		{ nv_clk_src_crystal, 0xff },
    498 		{ nv_clk_src_href   , 0xff },
    499 		{ nv_clk_src_gpc    , 0x00, NVKM_CLK_DOM_FLAG_CORE | NVKM_CLK_DOM_FLAG_VPSTATE, "core", 2000 },
    500 		{ nv_clk_src_hubk07 , 0x01, NVKM_CLK_DOM_FLAG_CORE },
    501 		{ nv_clk_src_rop    , 0x02, NVKM_CLK_DOM_FLAG_CORE },
    502 		{ nv_clk_src_mem    , 0x03, 0, "memory", 500 },
    503 		{ nv_clk_src_hubk06 , 0x04, NVKM_CLK_DOM_FLAG_CORE },
    504 		{ nv_clk_src_hubk01 , 0x05 },
    505 		{ nv_clk_src_vdec   , 0x06 },
    506 		{ nv_clk_src_pmu    , 0x07 },
    507 		{ nv_clk_src_max }
    508 	}
    509 };
    510 
    511 int
    512 gk104_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
    513 {
    514 	struct gk104_clk *clk;
    515 
    516 	if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
    517 		return -ENOMEM;
    518 	*pclk = &clk->base;
    519 
    520 	return nvkm_clk_ctor(&gk104_clk, device, index, true, &clk->base);
    521 }
    522