Home | History | Annotate | Line # | Download | only in clk
      1 /*	$NetBSD: nouveau_nvkm_subdev_clk_gk20a.c,v 1.4 2021/12/18 23:45:39 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the "Software"),
      8  * to deal in the Software without restriction, including without limitation
      9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     10  * and/or sell copies of the Software, and to permit persons to whom the
     11  * Software is furnished to do so, subject to the following conditions:
     12  *
     13  * The above copyright notice and this permission notice shall be included in
     14  * all copies or substantial portions of the Software.
     15  *
     16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
     22  * DEALINGS IN THE SOFTWARE.
     23  *
     24  * Shamelessly ripped off from ChromeOS's gk20a/clk_pllg.c
     25  *
     26  */
     27 #include <sys/cdefs.h>
     28 __KERNEL_RCSID(0, "$NetBSD: nouveau_nvkm_subdev_clk_gk20a.c,v 1.4 2021/12/18 23:45:39 riastradh Exp $");
     29 
     30 #include "priv.h"
     31 #include "gk20a.h"
     32 
     33 #include <core/tegra.h>
     34 #include <subdev/timer.h>
     35 
     36 static const u8 _pl_to_div[] = {
     37 /* PL:   0, 1, 2, 3, 4, 5, 6,  7,  8,  9, 10, 11, 12, 13, 14 */
     38 /* p: */ 1, 2, 3, 4, 5, 6, 8, 10, 12, 16, 12, 16, 20, 24, 32,
     39 };
     40 
     41 static u32 pl_to_div(u32 pl)
     42 {
     43 	if (pl >= ARRAY_SIZE(_pl_to_div))
     44 		return 1;
     45 
     46 	return _pl_to_div[pl];
     47 }
     48 
     49 static u32 div_to_pl(u32 div)
     50 {
     51 	u32 pl;
     52 
     53 	for (pl = 0; pl < ARRAY_SIZE(_pl_to_div) - 1; pl++) {
     54 		if (_pl_to_div[pl] >= div)
     55 			return pl;
     56 	}
     57 
     58 	return ARRAY_SIZE(_pl_to_div) - 1;
     59 }
     60 
     61 static const struct gk20a_clk_pllg_params gk20a_pllg_params = {
     62 	.min_vco = 1000000, .max_vco = 2064000,
     63 	.min_u = 12000, .max_u = 38000,
     64 	.min_m = 1, .max_m = 255,
     65 	.min_n = 8, .max_n = 255,
     66 	.min_pl = 1, .max_pl = 32,
     67 };
     68 
     69 void
     70 gk20a_pllg_read_mnp(struct gk20a_clk *clk, struct gk20a_pll *pll)
     71 {
     72 	struct nvkm_device *device = clk->base.subdev.device;
     73 	u32 val;
     74 
     75 	val = nvkm_rd32(device, GPCPLL_COEFF);
     76 	pll->m = (val >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH);
     77 	pll->n = (val >> GPCPLL_COEFF_N_SHIFT) & MASK(GPCPLL_COEFF_N_WIDTH);
     78 	pll->pl = (val >> GPCPLL_COEFF_P_SHIFT) & MASK(GPCPLL_COEFF_P_WIDTH);
     79 }
     80 
     81 void
     82 gk20a_pllg_write_mnp(struct gk20a_clk *clk, const struct gk20a_pll *pll)
     83 {
     84 	struct nvkm_device *device = clk->base.subdev.device;
     85 	u32 val;
     86 
     87 	val = (pll->m & MASK(GPCPLL_COEFF_M_WIDTH)) << GPCPLL_COEFF_M_SHIFT;
     88 	val |= (pll->n & MASK(GPCPLL_COEFF_N_WIDTH)) << GPCPLL_COEFF_N_SHIFT;
     89 	val |= (pll->pl & MASK(GPCPLL_COEFF_P_WIDTH)) << GPCPLL_COEFF_P_SHIFT;
     90 	nvkm_wr32(device, GPCPLL_COEFF, val);
     91 }
     92 
     93 u32
     94 gk20a_pllg_calc_rate(struct gk20a_clk *clk, struct gk20a_pll *pll)
     95 {
     96 	u64 rate;
     97 	u32 divider;
     98 
     99 	rate = clk->parent_rate * pll->n;
    100 	divider = pll->m * clk->pl_to_div(pll->pl);
    101 
    102 	return rate / divider / 2;
    103 }
    104 
    105 int
    106 gk20a_pllg_calc_mnp(struct gk20a_clk *clk, unsigned long rate,
    107 		    struct gk20a_pll *pll)
    108 {
    109 	struct nvkm_subdev *subdev = &clk->base.subdev;
    110 	u32 target_clk_f, ref_clk_f, target_freq;
    111 	u32 min_vco_f, max_vco_f;
    112 	u32 low_pl, high_pl, best_pl;
    113 	u32 target_vco_f;
    114 	u32 best_m, best_n;
    115 	u32 best_delta = ~0;
    116 	u32 pl;
    117 
    118 	target_clk_f = rate * 2 / KHZ;
    119 	ref_clk_f = clk->parent_rate / KHZ;
    120 
    121 	target_vco_f = target_clk_f + target_clk_f / 50;
    122 	max_vco_f = max(clk->params->max_vco, target_vco_f);
    123 	min_vco_f = clk->params->min_vco;
    124 	best_m = clk->params->max_m;
    125 	best_n = clk->params->min_n;
    126 	best_pl = clk->params->min_pl;
    127 
    128 	/* min_pl <= high_pl <= max_pl */
    129 	high_pl = (max_vco_f + target_vco_f - 1) / target_vco_f;
    130 	high_pl = min(high_pl, clk->params->max_pl);
    131 	high_pl = max(high_pl, clk->params->min_pl);
    132 	high_pl = clk->div_to_pl(high_pl);
    133 
    134 	/* min_pl <= low_pl <= max_pl */
    135 	low_pl = min_vco_f / target_vco_f;
    136 	low_pl = min(low_pl, clk->params->max_pl);
    137 	low_pl = max(low_pl, clk->params->min_pl);
    138 	low_pl = clk->div_to_pl(low_pl);
    139 
    140 	nvkm_debug(subdev, "low_PL %d(div%d), high_PL %d(div%d)", low_pl,
    141 		   clk->pl_to_div(low_pl), high_pl, clk->pl_to_div(high_pl));
    142 
    143 	/* Select lowest possible VCO */
    144 	for (pl = low_pl; pl <= high_pl; pl++) {
    145 		u32 m, n, n2;
    146 
    147 		target_vco_f = target_clk_f * clk->pl_to_div(pl);
    148 
    149 		for (m = clk->params->min_m; m <= clk->params->max_m; m++) {
    150 			u32 u_f = ref_clk_f / m;
    151 
    152 			if (u_f < clk->params->min_u)
    153 				break;
    154 			if (u_f > clk->params->max_u)
    155 				continue;
    156 
    157 			n = (target_vco_f * m) / ref_clk_f;
    158 			n2 = ((target_vco_f * m) + (ref_clk_f - 1)) / ref_clk_f;
    159 
    160 			if (n > clk->params->max_n)
    161 				break;
    162 
    163 			for (; n <= n2; n++) {
    164 				u32 vco_f;
    165 
    166 				if (n < clk->params->min_n)
    167 					continue;
    168 				if (n > clk->params->max_n)
    169 					break;
    170 
    171 				vco_f = ref_clk_f * n / m;
    172 
    173 				if (vco_f >= min_vco_f && vco_f <= max_vco_f) {
    174 					u32 delta, lwv;
    175 
    176 					lwv = (vco_f + (clk->pl_to_div(pl) / 2))
    177 						/ clk->pl_to_div(pl);
    178 					delta = abs(lwv - target_clk_f);
    179 
    180 					if (delta < best_delta) {
    181 						best_delta = delta;
    182 						best_m = m;
    183 						best_n = n;
    184 						best_pl = pl;
    185 
    186 						if (best_delta == 0)
    187 							goto found_match;
    188 					}
    189 				}
    190 			}
    191 		}
    192 	}
    193 
    194 found_match:
    195 	WARN_ON(best_delta == ~0);
    196 
    197 	if (best_delta != 0)
    198 		nvkm_debug(subdev,
    199 			   "no best match for target @ %dMHz on gpc_pll",
    200 			   target_clk_f / KHZ);
    201 
    202 	pll->m = best_m;
    203 	pll->n = best_n;
    204 	pll->pl = best_pl;
    205 
    206 	target_freq = gk20a_pllg_calc_rate(clk, pll);
    207 
    208 	nvkm_debug(subdev,
    209 		   "actual target freq %d KHz, M %d, N %d, PL %d(div%d)\n",
    210 		   target_freq / KHZ, pll->m, pll->n, pll->pl,
    211 		   clk->pl_to_div(pll->pl));
    212 	return 0;
    213 }
    214 
    215 static int
    216 gk20a_pllg_slide(struct gk20a_clk *clk, u32 n)
    217 {
    218 	struct nvkm_subdev *subdev = &clk->base.subdev;
    219 	struct nvkm_device *device = subdev->device;
    220 	struct gk20a_pll pll;
    221 	int ret = 0;
    222 
    223 	/* get old coefficients */
    224 	gk20a_pllg_read_mnp(clk, &pll);
    225 	/* do nothing if NDIV is the same */
    226 	if (n == pll.n)
    227 		return 0;
    228 
    229 	/* pll slowdown mode */
    230 	nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
    231 		BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT),
    232 		BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT));
    233 
    234 	/* new ndiv ready for ramp */
    235 	pll.n = n;
    236 	udelay(1);
    237 	gk20a_pllg_write_mnp(clk, &pll);
    238 
    239 	/* dynamic ramp to new ndiv */
    240 	udelay(1);
    241 	nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
    242 		  BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT),
    243 		  BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT));
    244 
    245 	/* wait for ramping to complete */
    246 	if (nvkm_wait_usec(device, 500, GPC_BCAST_NDIV_SLOWDOWN_DEBUG,
    247 		GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK,
    248 		GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK) < 0)
    249 		ret = -ETIMEDOUT;
    250 
    251 	/* exit slowdown mode */
    252 	nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
    253 		BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT) |
    254 		BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT), 0);
    255 	nvkm_rd32(device, GPCPLL_NDIV_SLOWDOWN);
    256 
    257 	return ret;
    258 }
    259 
    260 static int
    261 gk20a_pllg_enable(struct gk20a_clk *clk)
    262 {
    263 	struct nvkm_device *device = clk->base.subdev.device;
    264 	u32 val;
    265 
    266 	nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, GPCPLL_CFG_ENABLE);
    267 	nvkm_rd32(device, GPCPLL_CFG);
    268 
    269 	/* enable lock detection */
    270 	val = nvkm_rd32(device, GPCPLL_CFG);
    271 	if (val & GPCPLL_CFG_LOCK_DET_OFF) {
    272 		val &= ~GPCPLL_CFG_LOCK_DET_OFF;
    273 		nvkm_wr32(device, GPCPLL_CFG, val);
    274 	}
    275 
    276 	/* wait for lock */
    277 	if (nvkm_wait_usec(device, 300, GPCPLL_CFG, GPCPLL_CFG_LOCK,
    278 			   GPCPLL_CFG_LOCK) < 0)
    279 		return -ETIMEDOUT;
    280 
    281 	/* switch to VCO mode */
    282 	nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT),
    283 		BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
    284 
    285 	return 0;
    286 }
    287 
    288 static void
    289 gk20a_pllg_disable(struct gk20a_clk *clk)
    290 {
    291 	struct nvkm_device *device = clk->base.subdev.device;
    292 
    293 	/* put PLL in bypass before disabling it */
    294 	nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0);
    295 
    296 	nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, 0);
    297 	nvkm_rd32(device, GPCPLL_CFG);
    298 }
    299 
    300 static int
    301 gk20a_pllg_program_mnp(struct gk20a_clk *clk, const struct gk20a_pll *pll)
    302 {
    303 	struct nvkm_subdev *subdev = &clk->base.subdev;
    304 	struct nvkm_device *device = subdev->device;
    305 	struct gk20a_pll cur_pll;
    306 	int ret;
    307 
    308 	gk20a_pllg_read_mnp(clk, &cur_pll);
    309 
    310 	/* split VCO-to-bypass jump in half by setting out divider 1:2 */
    311 	nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
    312 		  GPC2CLK_OUT_VCODIV2 << GPC2CLK_OUT_VCODIV_SHIFT);
    313 	/* Intentional 2nd write to assure linear divider operation */
    314 	nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
    315 		  GPC2CLK_OUT_VCODIV2 << GPC2CLK_OUT_VCODIV_SHIFT);
    316 	nvkm_rd32(device, GPC2CLK_OUT);
    317 	udelay(2);
    318 
    319 	gk20a_pllg_disable(clk);
    320 
    321 	gk20a_pllg_write_mnp(clk, pll);
    322 
    323 	ret = gk20a_pllg_enable(clk);
    324 	if (ret)
    325 		return ret;
    326 
    327 	/* restore out divider 1:1 */
    328 	udelay(2);
    329 	nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
    330 		  GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT);
    331 	/* Intentional 2nd write to assure linear divider operation */
    332 	nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
    333 		  GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT);
    334 	nvkm_rd32(device, GPC2CLK_OUT);
    335 
    336 	return 0;
    337 }
    338 
    339 static int
    340 gk20a_pllg_program_mnp_slide(struct gk20a_clk *clk, const struct gk20a_pll *pll)
    341 {
    342 	struct gk20a_pll cur_pll;
    343 	int ret;
    344 
    345 	if (gk20a_pllg_is_enabled(clk)) {
    346 		gk20a_pllg_read_mnp(clk, &cur_pll);
    347 
    348 		/* just do NDIV slide if there is no change to M and PL */
    349 		if (pll->m == cur_pll.m && pll->pl == cur_pll.pl)
    350 			return gk20a_pllg_slide(clk, pll->n);
    351 
    352 		/* slide down to current NDIV_LO */
    353 		cur_pll.n = gk20a_pllg_n_lo(clk, &cur_pll);
    354 		ret = gk20a_pllg_slide(clk, cur_pll.n);
    355 		if (ret)
    356 			return ret;
    357 	}
    358 
    359 	/* program MNP with the new clock parameters and new NDIV_LO */
    360 	cur_pll = *pll;
    361 	cur_pll.n = gk20a_pllg_n_lo(clk, &cur_pll);
    362 	ret = gk20a_pllg_program_mnp(clk, &cur_pll);
    363 	if (ret)
    364 		return ret;
    365 
    366 	/* slide up to new NDIV */
    367 	return gk20a_pllg_slide(clk, pll->n);
    368 }
    369 
    370 static struct nvkm_pstate
    371 gk20a_pstates[] = {
    372 	{
    373 		.base = {
    374 			.domain[nv_clk_src_gpc] = 72000,
    375 			.voltage = 0,
    376 		},
    377 	},
    378 	{
    379 		.base = {
    380 			.domain[nv_clk_src_gpc] = 108000,
    381 			.voltage = 1,
    382 		},
    383 	},
    384 	{
    385 		.base = {
    386 			.domain[nv_clk_src_gpc] = 180000,
    387 			.voltage = 2,
    388 		},
    389 	},
    390 	{
    391 		.base = {
    392 			.domain[nv_clk_src_gpc] = 252000,
    393 			.voltage = 3,
    394 		},
    395 	},
    396 	{
    397 		.base = {
    398 			.domain[nv_clk_src_gpc] = 324000,
    399 			.voltage = 4,
    400 		},
    401 	},
    402 	{
    403 		.base = {
    404 			.domain[nv_clk_src_gpc] = 396000,
    405 			.voltage = 5,
    406 		},
    407 	},
    408 	{
    409 		.base = {
    410 			.domain[nv_clk_src_gpc] = 468000,
    411 			.voltage = 6,
    412 		},
    413 	},
    414 	{
    415 		.base = {
    416 			.domain[nv_clk_src_gpc] = 540000,
    417 			.voltage = 7,
    418 		},
    419 	},
    420 	{
    421 		.base = {
    422 			.domain[nv_clk_src_gpc] = 612000,
    423 			.voltage = 8,
    424 		},
    425 	},
    426 	{
    427 		.base = {
    428 			.domain[nv_clk_src_gpc] = 648000,
    429 			.voltage = 9,
    430 		},
    431 	},
    432 	{
    433 		.base = {
    434 			.domain[nv_clk_src_gpc] = 684000,
    435 			.voltage = 10,
    436 		},
    437 	},
    438 	{
    439 		.base = {
    440 			.domain[nv_clk_src_gpc] = 708000,
    441 			.voltage = 11,
    442 		},
    443 	},
    444 	{
    445 		.base = {
    446 			.domain[nv_clk_src_gpc] = 756000,
    447 			.voltage = 12,
    448 		},
    449 	},
    450 	{
    451 		.base = {
    452 			.domain[nv_clk_src_gpc] = 804000,
    453 			.voltage = 13,
    454 		},
    455 	},
    456 	{
    457 		.base = {
    458 			.domain[nv_clk_src_gpc] = 852000,
    459 			.voltage = 14,
    460 		},
    461 	},
    462 };
    463 
    464 int
    465 gk20a_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
    466 {
    467 	struct gk20a_clk *clk = gk20a_clk(base);
    468 	struct nvkm_subdev *subdev = &clk->base.subdev;
    469 	struct nvkm_device *device = subdev->device;
    470 	struct gk20a_pll pll;
    471 
    472 	switch (src) {
    473 	case nv_clk_src_crystal:
    474 		return device->crystal;
    475 	case nv_clk_src_gpc:
    476 		gk20a_pllg_read_mnp(clk, &pll);
    477 		return gk20a_pllg_calc_rate(clk, &pll) / GK20A_CLK_GPC_MDIV;
    478 	default:
    479 		nvkm_error(subdev, "invalid clock source %d\n", src);
    480 		return -EINVAL;
    481 	}
    482 }
    483 
    484 int
    485 gk20a_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
    486 {
    487 	struct gk20a_clk *clk = gk20a_clk(base);
    488 
    489 	return gk20a_pllg_calc_mnp(clk, cstate->domain[nv_clk_src_gpc] *
    490 					 GK20A_CLK_GPC_MDIV, &clk->pll);
    491 }
    492 
    493 int
    494 gk20a_clk_prog(struct nvkm_clk *base)
    495 {
    496 	struct gk20a_clk *clk = gk20a_clk(base);
    497 	int ret;
    498 
    499 	ret = gk20a_pllg_program_mnp_slide(clk, &clk->pll);
    500 	if (ret)
    501 		ret = gk20a_pllg_program_mnp(clk, &clk->pll);
    502 
    503 	return ret;
    504 }
    505 
    506 void
    507 gk20a_clk_tidy(struct nvkm_clk *base)
    508 {
    509 }
    510 
    511 int
    512 gk20a_clk_setup_slide(struct gk20a_clk *clk)
    513 {
    514 	struct nvkm_subdev *subdev = &clk->base.subdev;
    515 	struct nvkm_device *device = subdev->device;
    516 	u32 step_a, step_b;
    517 
    518 	switch (clk->parent_rate) {
    519 	case 12000000:
    520 	case 12800000:
    521 	case 13000000:
    522 		step_a = 0x2b;
    523 		step_b = 0x0b;
    524 		break;
    525 	case 19200000:
    526 		step_a = 0x12;
    527 		step_b = 0x08;
    528 		break;
    529 	case 38400000:
    530 		step_a = 0x04;
    531 		step_b = 0x05;
    532 		break;
    533 	default:
    534 		nvkm_error(subdev, "invalid parent clock rate %u KHz",
    535 			   clk->parent_rate / KHZ);
    536 		return -EINVAL;
    537 	}
    538 
    539 	nvkm_mask(device, GPCPLL_CFG2, 0xff << GPCPLL_CFG2_PLL_STEPA_SHIFT,
    540 		step_a << GPCPLL_CFG2_PLL_STEPA_SHIFT);
    541 	nvkm_mask(device, GPCPLL_CFG3, 0xff << GPCPLL_CFG3_PLL_STEPB_SHIFT,
    542 		step_b << GPCPLL_CFG3_PLL_STEPB_SHIFT);
    543 
    544 	return 0;
    545 }
    546 
    547 void
    548 gk20a_clk_fini(struct nvkm_clk *base)
    549 {
    550 	struct nvkm_device *device = base->subdev.device;
    551 	struct gk20a_clk *clk = gk20a_clk(base);
    552 
    553 	/* slide to VCO min */
    554 	if (gk20a_pllg_is_enabled(clk)) {
    555 		struct gk20a_pll pll;
    556 		u32 n_lo;
    557 
    558 		gk20a_pllg_read_mnp(clk, &pll);
    559 		n_lo = gk20a_pllg_n_lo(clk, &pll);
    560 		gk20a_pllg_slide(clk, n_lo);
    561 	}
    562 
    563 	gk20a_pllg_disable(clk);
    564 
    565 	/* set IDDQ */
    566 	nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_IDDQ, 1);
    567 }
    568 
    569 static int
    570 gk20a_clk_init(struct nvkm_clk *base)
    571 {
    572 	struct gk20a_clk *clk = gk20a_clk(base);
    573 	struct nvkm_subdev *subdev = &clk->base.subdev;
    574 	struct nvkm_device *device = subdev->device;
    575 	int ret;
    576 
    577 	/* get out from IDDQ */
    578 	nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_IDDQ, 0);
    579 	nvkm_rd32(device, GPCPLL_CFG);
    580 	udelay(5);
    581 
    582 	nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_INIT_MASK,
    583 		  GPC2CLK_OUT_INIT_VAL);
    584 
    585 	ret = gk20a_clk_setup_slide(clk);
    586 	if (ret)
    587 		return ret;
    588 
    589 	/* Start with lowest frequency */
    590 	base->func->calc(base, &base->func->pstates[0].base);
    591 	ret = base->func->prog(&clk->base);
    592 	if (ret) {
    593 		nvkm_error(subdev, "cannot initialize clock\n");
    594 		return ret;
    595 	}
    596 
    597 	return 0;
    598 }
    599 
    600 static const struct nvkm_clk_func
    601 gk20a_clk = {
    602 	.init = gk20a_clk_init,
    603 	.fini = gk20a_clk_fini,
    604 	.read = gk20a_clk_read,
    605 	.calc = gk20a_clk_calc,
    606 	.prog = gk20a_clk_prog,
    607 	.tidy = gk20a_clk_tidy,
    608 	.pstates = gk20a_pstates,
    609 	.nr_pstates = ARRAY_SIZE(gk20a_pstates),
    610 	.domains = {
    611 		{ nv_clk_src_crystal, 0xff },
    612 		{ nv_clk_src_gpc, 0xff, 0, "core", GK20A_CLK_GPC_MDIV },
    613 		{ nv_clk_src_max }
    614 	}
    615 };
    616 
    617 int
    618 gk20a_clk_ctor(struct nvkm_device *device, int index,
    619 		const struct nvkm_clk_func *func,
    620 		const struct gk20a_clk_pllg_params *params,
    621 		struct gk20a_clk *clk)
    622 {
    623 	struct nvkm_device_tegra *tdev = device->func->tegra(device);
    624 	int ret;
    625 	int i;
    626 
    627 	/* Finish initializing the pstates */
    628 	for (i = 0; i < func->nr_pstates; i++) {
    629 		INIT_LIST_HEAD(&func->pstates[i].list);
    630 		func->pstates[i].pstate = i + 1;
    631 	}
    632 
    633 	clk->params = params;
    634 	clk->parent_rate = clk_get_rate(tdev->clk);
    635 
    636 	ret = nvkm_clk_ctor(func, device, index, true, &clk->base);
    637 	if (ret)
    638 		return ret;
    639 
    640 	nvkm_debug(&clk->base.subdev, "parent clock rate: %d Khz\n",
    641 		   clk->parent_rate / KHZ);
    642 
    643 	return 0;
    644 }
    645 
    646 int
    647 gk20a_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
    648 {
    649 	struct gk20a_clk *clk;
    650 	int ret;
    651 
    652 	clk = kzalloc(sizeof(*clk), GFP_KERNEL);
    653 	if (!clk)
    654 		return -ENOMEM;
    655 	*pclk = &clk->base;
    656 
    657 	ret = gk20a_clk_ctor(device, index, &gk20a_clk, &gk20a_pllg_params,
    658 			      clk);
    659 
    660 	clk->pl_to_div = pl_to_div;
    661 	clk->div_to_pl = div_to_pl;
    662 
    663 	return ret;
    664 }
    665