Home | History | Annotate | Line # | Download | only in clk
      1 /*	$NetBSD: nouveau_nvkm_subdev_clk_gf100.c,v 1.3 2021/12/18 23:45:39 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright 2012 Red Hat Inc.
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the "Software"),
      8  * to deal in the Software without restriction, including without limitation
      9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     10  * and/or sell copies of the Software, and to permit persons to whom the
     11  * Software is furnished to do so, subject to the following conditions:
     12  *
     13  * The above copyright notice and this permission notice shall be included in
     14  * all copies or substantial portions of the Software.
     15  *
     16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     22  * OTHER DEALINGS IN THE SOFTWARE.
     23  *
     24  * Authors: Ben Skeggs
     25  */
     26 #include <sys/cdefs.h>
     27 __KERNEL_RCSID(0, "$NetBSD: nouveau_nvkm_subdev_clk_gf100.c,v 1.3 2021/12/18 23:45:39 riastradh Exp $");
     28 
     29 #define gf100_clk(p) container_of((p), struct gf100_clk, base)
     30 #include "priv.h"
     31 #include "pll.h"
     32 
     33 #include <subdev/bios.h>
     34 #include <subdev/bios/pll.h>
     35 #include <subdev/timer.h>
     36 
     37 struct gf100_clk_info {
     38 	u32 freq;
     39 	u32 ssel;
     40 	u32 mdiv;
     41 	u32 dsrc;
     42 	u32 ddiv;
     43 	u32 coef;
     44 };
     45 
     46 struct gf100_clk {
     47 	struct nvkm_clk base;
     48 	struct gf100_clk_info eng[16];
     49 };
     50 
     51 static u32 read_div(struct gf100_clk *, int, u32, u32);
     52 
     53 static u32
     54 read_vco(struct gf100_clk *clk, u32 dsrc)
     55 {
     56 	struct nvkm_device *device = clk->base.subdev.device;
     57 	u32 ssrc = nvkm_rd32(device, dsrc);
     58 	if (!(ssrc & 0x00000100))
     59 		return nvkm_clk_read(&clk->base, nv_clk_src_sppll0);
     60 	return nvkm_clk_read(&clk->base, nv_clk_src_sppll1);
     61 }
     62 
     63 static u32
     64 read_pll(struct gf100_clk *clk, u32 pll)
     65 {
     66 	struct nvkm_device *device = clk->base.subdev.device;
     67 	u32 ctrl = nvkm_rd32(device, pll + 0x00);
     68 	u32 coef = nvkm_rd32(device, pll + 0x04);
     69 	u32 P = (coef & 0x003f0000) >> 16;
     70 	u32 N = (coef & 0x0000ff00) >> 8;
     71 	u32 M = (coef & 0x000000ff) >> 0;
     72 	u32 sclk;
     73 
     74 	if (!(ctrl & 0x00000001))
     75 		return 0;
     76 
     77 	switch (pll) {
     78 	case 0x00e800:
     79 	case 0x00e820:
     80 		sclk = device->crystal;
     81 		P = 1;
     82 		break;
     83 	case 0x132000:
     84 		sclk = nvkm_clk_read(&clk->base, nv_clk_src_mpllsrc);
     85 		break;
     86 	case 0x132020:
     87 		sclk = nvkm_clk_read(&clk->base, nv_clk_src_mpllsrcref);
     88 		break;
     89 	case 0x137000:
     90 	case 0x137020:
     91 	case 0x137040:
     92 	case 0x1370e0:
     93 		sclk = read_div(clk, (pll & 0xff) / 0x20, 0x137120, 0x137140);
     94 		break;
     95 	default:
     96 		return 0;
     97 	}
     98 
     99 	return sclk * N / M / P;
    100 }
    101 
    102 static u32
    103 read_div(struct gf100_clk *clk, int doff, u32 dsrc, u32 dctl)
    104 {
    105 	struct nvkm_device *device = clk->base.subdev.device;
    106 	u32 ssrc = nvkm_rd32(device, dsrc + (doff * 4));
    107 	u32 sclk, sctl, sdiv = 2;
    108 
    109 	switch (ssrc & 0x00000003) {
    110 	case 0:
    111 		if ((ssrc & 0x00030000) != 0x00030000)
    112 			return device->crystal;
    113 		return 108000;
    114 	case 2:
    115 		return 100000;
    116 	case 3:
    117 		sclk = read_vco(clk, dsrc + (doff * 4));
    118 
    119 		/* Memclk has doff of 0 despite its alt. location */
    120 		if (doff <= 2) {
    121 			sctl = nvkm_rd32(device, dctl + (doff * 4));
    122 
    123 			if (sctl & 0x80000000) {
    124 				if (ssrc & 0x100)
    125 					sctl >>= 8;
    126 
    127 				sdiv = (sctl & 0x3f) + 2;
    128 			}
    129 		}
    130 
    131 		return (sclk * 2) / sdiv;
    132 	default:
    133 		return 0;
    134 	}
    135 }
    136 
    137 static u32
    138 read_clk(struct gf100_clk *clk, int idx)
    139 {
    140 	struct nvkm_device *device = clk->base.subdev.device;
    141 	u32 sctl = nvkm_rd32(device, 0x137250 + (idx * 4));
    142 	u32 ssel = nvkm_rd32(device, 0x137100);
    143 	u32 sclk, sdiv;
    144 
    145 	if (ssel & (1 << idx)) {
    146 		if (idx < 7)
    147 			sclk = read_pll(clk, 0x137000 + (idx * 0x20));
    148 		else
    149 			sclk = read_pll(clk, 0x1370e0);
    150 		sdiv = ((sctl & 0x00003f00) >> 8) + 2;
    151 	} else {
    152 		sclk = read_div(clk, idx, 0x137160, 0x1371d0);
    153 		sdiv = ((sctl & 0x0000003f) >> 0) + 2;
    154 	}
    155 
    156 	if (sctl & 0x80000000)
    157 		return (sclk * 2) / sdiv;
    158 
    159 	return sclk;
    160 }
    161 
    162 static int
    163 gf100_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
    164 {
    165 	struct gf100_clk *clk = gf100_clk(base);
    166 	struct nvkm_subdev *subdev = &clk->base.subdev;
    167 	struct nvkm_device *device = subdev->device;
    168 
    169 	switch (src) {
    170 	case nv_clk_src_crystal:
    171 		return device->crystal;
    172 	case nv_clk_src_href:
    173 		return 100000;
    174 	case nv_clk_src_sppll0:
    175 		return read_pll(clk, 0x00e800);
    176 	case nv_clk_src_sppll1:
    177 		return read_pll(clk, 0x00e820);
    178 
    179 	case nv_clk_src_mpllsrcref:
    180 		return read_div(clk, 0, 0x137320, 0x137330);
    181 	case nv_clk_src_mpllsrc:
    182 		return read_pll(clk, 0x132020);
    183 	case nv_clk_src_mpll:
    184 		return read_pll(clk, 0x132000);
    185 	case nv_clk_src_mdiv:
    186 		return read_div(clk, 0, 0x137300, 0x137310);
    187 	case nv_clk_src_mem:
    188 		if (nvkm_rd32(device, 0x1373f0) & 0x00000002)
    189 			return nvkm_clk_read(&clk->base, nv_clk_src_mpll);
    190 		return nvkm_clk_read(&clk->base, nv_clk_src_mdiv);
    191 
    192 	case nv_clk_src_gpc:
    193 		return read_clk(clk, 0x00);
    194 	case nv_clk_src_rop:
    195 		return read_clk(clk, 0x01);
    196 	case nv_clk_src_hubk07:
    197 		return read_clk(clk, 0x02);
    198 	case nv_clk_src_hubk06:
    199 		return read_clk(clk, 0x07);
    200 	case nv_clk_src_hubk01:
    201 		return read_clk(clk, 0x08);
    202 	case nv_clk_src_copy:
    203 		return read_clk(clk, 0x09);
    204 	case nv_clk_src_pmu:
    205 		return read_clk(clk, 0x0c);
    206 	case nv_clk_src_vdec:
    207 		return read_clk(clk, 0x0e);
    208 	default:
    209 		nvkm_error(subdev, "invalid clock source %d\n", src);
    210 		return -EINVAL;
    211 	}
    212 }
    213 
    214 static u32
    215 calc_div(struct gf100_clk *clk, int idx, u32 ref, u32 freq, u32 *ddiv)
    216 {
    217 	u32 div = min((ref * 2) / freq, (u32)65);
    218 	if (div < 2)
    219 		div = 2;
    220 
    221 	*ddiv = div - 2;
    222 	return (ref * 2) / div;
    223 }
    224 
    225 static u32
    226 calc_src(struct gf100_clk *clk, int idx, u32 freq, u32 *dsrc, u32 *ddiv)
    227 {
    228 	u32 sclk;
    229 
    230 	/* use one of the fixed frequencies if possible */
    231 	*ddiv = 0x00000000;
    232 	switch (freq) {
    233 	case  27000:
    234 	case 108000:
    235 		*dsrc = 0x00000000;
    236 		if (freq == 108000)
    237 			*dsrc |= 0x00030000;
    238 		return freq;
    239 	case 100000:
    240 		*dsrc = 0x00000002;
    241 		return freq;
    242 	default:
    243 		*dsrc = 0x00000003;
    244 		break;
    245 	}
    246 
    247 	/* otherwise, calculate the closest divider */
    248 	sclk = read_vco(clk, 0x137160 + (idx * 4));
    249 	if (idx < 7)
    250 		sclk = calc_div(clk, idx, sclk, freq, ddiv);
    251 	return sclk;
    252 }
    253 
    254 static u32
    255 calc_pll(struct gf100_clk *clk, int idx, u32 freq, u32 *coef)
    256 {
    257 	struct nvkm_subdev *subdev = &clk->base.subdev;
    258 	struct nvkm_bios *bios = subdev->device->bios;
    259 	struct nvbios_pll limits;
    260 	int N, M, P, ret;
    261 
    262 	ret = nvbios_pll_parse(bios, 0x137000 + (idx * 0x20), &limits);
    263 	if (ret)
    264 		return 0;
    265 
    266 	limits.refclk = read_div(clk, idx, 0x137120, 0x137140);
    267 	if (!limits.refclk)
    268 		return 0;
    269 
    270 	ret = gt215_pll_calc(subdev, &limits, freq, &N, NULL, &M, &P);
    271 	if (ret <= 0)
    272 		return 0;
    273 
    274 	*coef = (P << 16) | (N << 8) | M;
    275 	return ret;
    276 }
    277 
    278 static int
    279 calc_clk(struct gf100_clk *clk, struct nvkm_cstate *cstate, int idx, int dom)
    280 {
    281 	struct gf100_clk_info *info = &clk->eng[idx];
    282 	u32 freq = cstate->domain[dom];
    283 	u32 src0, div0, div1D, div1P = 0;
    284 	u32 clk0, clk1 = 0;
    285 
    286 	/* invalid clock domain */
    287 	if (!freq)
    288 		return 0;
    289 
    290 	/* first possible path, using only dividers */
    291 	clk0 = calc_src(clk, idx, freq, &src0, &div0);
    292 	clk0 = calc_div(clk, idx, clk0, freq, &div1D);
    293 
    294 	/* see if we can get any closer using PLLs */
    295 	if (clk0 != freq && (0x00004387 & (1 << idx))) {
    296 		if (idx <= 7)
    297 			clk1 = calc_pll(clk, idx, freq, &info->coef);
    298 		else
    299 			clk1 = cstate->domain[nv_clk_src_hubk06];
    300 		clk1 = calc_div(clk, idx, clk1, freq, &div1P);
    301 	}
    302 
    303 	/* select the method which gets closest to target freq */
    304 	if (abs((int)freq - clk0) <= abs((int)freq - clk1)) {
    305 		info->dsrc = src0;
    306 		if (div0) {
    307 			info->ddiv |= 0x80000000;
    308 			info->ddiv |= div0 << 8;
    309 			info->ddiv |= div0;
    310 		}
    311 		if (div1D) {
    312 			info->mdiv |= 0x80000000;
    313 			info->mdiv |= div1D;
    314 		}
    315 		info->ssel = info->coef = 0;
    316 		info->freq = clk0;
    317 	} else {
    318 		if (div1P) {
    319 			info->mdiv |= 0x80000000;
    320 			info->mdiv |= div1P << 8;
    321 		}
    322 		info->ssel = (1 << idx);
    323 		info->freq = clk1;
    324 	}
    325 
    326 	return 0;
    327 }
    328 
    329 static int
    330 gf100_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
    331 {
    332 	struct gf100_clk *clk = gf100_clk(base);
    333 	int ret;
    334 
    335 	if ((ret = calc_clk(clk, cstate, 0x00, nv_clk_src_gpc)) ||
    336 	    (ret = calc_clk(clk, cstate, 0x01, nv_clk_src_rop)) ||
    337 	    (ret = calc_clk(clk, cstate, 0x02, nv_clk_src_hubk07)) ||
    338 	    (ret = calc_clk(clk, cstate, 0x07, nv_clk_src_hubk06)) ||
    339 	    (ret = calc_clk(clk, cstate, 0x08, nv_clk_src_hubk01)) ||
    340 	    (ret = calc_clk(clk, cstate, 0x09, nv_clk_src_copy)) ||
    341 	    (ret = calc_clk(clk, cstate, 0x0c, nv_clk_src_pmu)) ||
    342 	    (ret = calc_clk(clk, cstate, 0x0e, nv_clk_src_vdec)))
    343 		return ret;
    344 
    345 	return 0;
    346 }
    347 
    348 static void
    349 gf100_clk_prog_0(struct gf100_clk *clk, int idx)
    350 {
    351 	struct gf100_clk_info *info = &clk->eng[idx];
    352 	struct nvkm_device *device = clk->base.subdev.device;
    353 	if (idx < 7 && !info->ssel) {
    354 		nvkm_mask(device, 0x1371d0 + (idx * 0x04), 0x80003f3f, info->ddiv);
    355 		nvkm_wr32(device, 0x137160 + (idx * 0x04), info->dsrc);
    356 	}
    357 }
    358 
    359 static void
    360 gf100_clk_prog_1(struct gf100_clk *clk, int idx)
    361 {
    362 	struct nvkm_device *device = clk->base.subdev.device;
    363 	nvkm_mask(device, 0x137100, (1 << idx), 0x00000000);
    364 	nvkm_msec(device, 2000,
    365 		if (!(nvkm_rd32(device, 0x137100) & (1 << idx)))
    366 			break;
    367 	);
    368 }
    369 
    370 static void
    371 gf100_clk_prog_2(struct gf100_clk *clk, int idx)
    372 {
    373 	struct gf100_clk_info *info = &clk->eng[idx];
    374 	struct nvkm_device *device = clk->base.subdev.device;
    375 	const u32 addr = 0x137000 + (idx * 0x20);
    376 	if (idx <= 7) {
    377 		nvkm_mask(device, addr + 0x00, 0x00000004, 0x00000000);
    378 		nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000000);
    379 		if (info->coef) {
    380 			nvkm_wr32(device, addr + 0x04, info->coef);
    381 			nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000001);
    382 
    383 			/* Test PLL lock */
    384 			nvkm_mask(device, addr + 0x00, 0x00000010, 0x00000000);
    385 			nvkm_msec(device, 2000,
    386 				if (nvkm_rd32(device, addr + 0x00) & 0x00020000)
    387 					break;
    388 			);
    389 			nvkm_mask(device, addr + 0x00, 0x00000010, 0x00000010);
    390 
    391 			/* Enable sync mode */
    392 			nvkm_mask(device, addr + 0x00, 0x00000004, 0x00000004);
    393 		}
    394 	}
    395 }
    396 
    397 static void
    398 gf100_clk_prog_3(struct gf100_clk *clk, int idx)
    399 {
    400 	struct gf100_clk_info *info = &clk->eng[idx];
    401 	struct nvkm_device *device = clk->base.subdev.device;
    402 	if (info->ssel) {
    403 		nvkm_mask(device, 0x137100, (1 << idx), info->ssel);
    404 		nvkm_msec(device, 2000,
    405 			u32 tmp = nvkm_rd32(device, 0x137100) & (1 << idx);
    406 			if (tmp == info->ssel)
    407 				break;
    408 		);
    409 	}
    410 }
    411 
    412 static void
    413 gf100_clk_prog_4(struct gf100_clk *clk, int idx)
    414 {
    415 	struct gf100_clk_info *info = &clk->eng[idx];
    416 	struct nvkm_device *device = clk->base.subdev.device;
    417 	nvkm_mask(device, 0x137250 + (idx * 0x04), 0x00003f3f, info->mdiv);
    418 }
    419 
    420 static int
    421 gf100_clk_prog(struct nvkm_clk *base)
    422 {
    423 	struct gf100_clk *clk = gf100_clk(base);
    424 	struct {
    425 		void (*exec)(struct gf100_clk *, int);
    426 	} stage[] = {
    427 		{ gf100_clk_prog_0 }, /* div programming */
    428 		{ gf100_clk_prog_1 }, /* select div mode */
    429 		{ gf100_clk_prog_2 }, /* (maybe) program pll */
    430 		{ gf100_clk_prog_3 }, /* (maybe) select pll mode */
    431 		{ gf100_clk_prog_4 }, /* final divider */
    432 	};
    433 	int i, j;
    434 
    435 	for (i = 0; i < ARRAY_SIZE(stage); i++) {
    436 		for (j = 0; j < ARRAY_SIZE(clk->eng); j++) {
    437 			if (!clk->eng[j].freq)
    438 				continue;
    439 			stage[i].exec(clk, j);
    440 		}
    441 	}
    442 
    443 	return 0;
    444 }
    445 
    446 static void
    447 gf100_clk_tidy(struct nvkm_clk *base)
    448 {
    449 	struct gf100_clk *clk = gf100_clk(base);
    450 	memset(clk->eng, 0x00, sizeof(clk->eng));
    451 }
    452 
    453 static const struct nvkm_clk_func
    454 gf100_clk = {
    455 	.read = gf100_clk_read,
    456 	.calc = gf100_clk_calc,
    457 	.prog = gf100_clk_prog,
    458 	.tidy = gf100_clk_tidy,
    459 	.domains = {
    460 		{ nv_clk_src_crystal, 0xff },
    461 		{ nv_clk_src_href   , 0xff },
    462 		{ nv_clk_src_hubk06 , 0x00 },
    463 		{ nv_clk_src_hubk01 , 0x01 },
    464 		{ nv_clk_src_copy   , 0x02 },
    465 		{ nv_clk_src_gpc    , 0x03, NVKM_CLK_DOM_FLAG_VPSTATE, "core", 2000 },
    466 		{ nv_clk_src_rop    , 0x04 },
    467 		{ nv_clk_src_mem    , 0x05, 0, "memory", 1000 },
    468 		{ nv_clk_src_vdec   , 0x06 },
    469 		{ nv_clk_src_pmu    , 0x0a },
    470 		{ nv_clk_src_hubk07 , 0x0b },
    471 		{ nv_clk_src_max }
    472 	}
    473 };
    474 
    475 int
    476 gf100_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
    477 {
    478 	struct gf100_clk *clk;
    479 
    480 	if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
    481 		return -ENOMEM;
    482 	*pclk = &clk->base;
    483 
    484 	return nvkm_clk_ctor(&gf100_clk, device, index, false, &clk->base);
    485 }
    486