Home | History | Annotate | Line # | Download | only in gr
      1 /*	$NetBSD: nouveau_nvkm_engine_gr_gk20a.c,v 1.4 2021/12/19 10:51:57 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the "Software"),
      8  * to deal in the Software without restriction, including without limitation
      9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     10  * and/or sell copies of the Software, and to permit persons to whom the
     11  * Software is furnished to do so, subject to the following conditions:
     12  *
     13  * The above copyright notice and this permission notice shall be included in
     14  * all copies or substantial portions of the Software.
     15  *
     16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
     22  * DEALINGS IN THE SOFTWARE.
     23  */
     24 #include <sys/cdefs.h>
     25 __KERNEL_RCSID(0, "$NetBSD: nouveau_nvkm_engine_gr_gk20a.c,v 1.4 2021/12/19 10:51:57 riastradh Exp $");
     26 
     27 #include "gf100.h"
     28 #include "ctxgf100.h"
     29 
     30 #include <core/firmware.h>
     31 #include <subdev/timer.h>
     32 
     33 #include <nvif/class.h>
     34 
     35 struct gk20a_fw_av
     36 {
     37 	u32 addr;
     38 	u32 data;
     39 };
     40 
     41 static int
     42 gk20a_gr_av_to_init(struct gf100_gr *gr, const char *path, const char *name,
     43 		    int ver, struct gf100_gr_pack **ppack)
     44 {
     45 	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
     46 	struct nvkm_blob blob;
     47 	struct gf100_gr_init *init;
     48 	struct gf100_gr_pack *pack;
     49 	int nent;
     50 	int ret;
     51 	int i;
     52 
     53 	ret = nvkm_firmware_load_blob(subdev, path, name, ver, &blob);
     54 	if (ret)
     55 		return ret;
     56 
     57 	nent = (blob.size / sizeof(struct gk20a_fw_av));
     58 	pack = vzalloc((sizeof(*pack) * 2) + (sizeof(*init) * (nent + 1)));
     59 	if (!pack) {
     60 		ret = -ENOMEM;
     61 		goto end;
     62 	}
     63 
     64 	init = (void *)(pack + 2);
     65 	pack[0].init = init;
     66 
     67 	for (i = 0; i < nent; i++) {
     68 		struct gf100_gr_init *ent = &init[i];
     69 		struct gk20a_fw_av *av = &((struct gk20a_fw_av *)blob.data)[i];
     70 
     71 		ent->addr = av->addr;
     72 		ent->data = av->data;
     73 		ent->count = 1;
     74 		ent->pitch = 1;
     75 	}
     76 
     77 	*ppack = pack;
     78 
     79 end:
     80 	nvkm_blob_dtor(&blob);
     81 	return ret;
     82 }
     83 
     84 struct gk20a_fw_aiv
     85 {
     86 	u32 addr;
     87 	u32 index;
     88 	u32 data;
     89 };
     90 
     91 static int
     92 gk20a_gr_aiv_to_init(struct gf100_gr *gr, const char *path, const char *name,
     93 		     int ver, struct gf100_gr_pack **ppack)
     94 {
     95 	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
     96 	struct nvkm_blob blob;
     97 	struct gf100_gr_init *init;
     98 	struct gf100_gr_pack *pack;
     99 	int nent;
    100 	int ret;
    101 	int i;
    102 
    103 	ret = nvkm_firmware_load_blob(subdev, path, name, ver, &blob);
    104 	if (ret)
    105 		return ret;
    106 
    107 	nent = (blob.size / sizeof(struct gk20a_fw_aiv));
    108 	pack = vzalloc((sizeof(*pack) * 2) + (sizeof(*init) * (nent + 1)));
    109 	if (!pack) {
    110 		ret = -ENOMEM;
    111 		goto end;
    112 	}
    113 
    114 	init = (void *)(pack + 2);
    115 	pack[0].init = init;
    116 
    117 	for (i = 0; i < nent; i++) {
    118 		struct gf100_gr_init *ent = &init[i];
    119 		struct gk20a_fw_aiv *av = &((struct gk20a_fw_aiv *)blob.data)[i];
    120 
    121 		ent->addr = av->addr;
    122 		ent->data = av->data;
    123 		ent->count = 1;
    124 		ent->pitch = 1;
    125 	}
    126 
    127 	*ppack = pack;
    128 
    129 end:
    130 	nvkm_blob_dtor(&blob);
    131 	return ret;
    132 }
    133 
    134 static int
    135 gk20a_gr_av_to_method(struct gf100_gr *gr, const char *path, const char *name,
    136 		      int ver, struct gf100_gr_pack **ppack)
    137 {
    138 	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
    139 	struct nvkm_blob blob;
    140 	struct gf100_gr_init *init;
    141 	struct gf100_gr_pack *pack;
    142 	/* We don't suppose we will initialize more than 16 classes here... */
    143 	static const unsigned int max_classes = 16;
    144 	u32 classidx = 0, prevclass = 0;
    145 	int nent;
    146 	int ret;
    147 	int i;
    148 
    149 	ret = nvkm_firmware_load_blob(subdev, path, name, ver, &blob);
    150 	if (ret)
    151 		return ret;
    152 
    153 	nent = (blob.size / sizeof(struct gk20a_fw_av));
    154 
    155 	pack = vzalloc((sizeof(*pack) * (max_classes + 1)) +
    156 		       (sizeof(*init) * (nent + max_classes + 1)));
    157 	if (!pack) {
    158 		ret = -ENOMEM;
    159 		goto end;
    160 	}
    161 
    162 	init = (void *)(pack + max_classes + 1);
    163 
    164 	for (i = 0; i < nent; i++, init++) {
    165 		struct gk20a_fw_av *av = &((struct gk20a_fw_av *)blob.data)[i];
    166 		u32 class = av->addr & 0xffff;
    167 		u32 addr = (av->addr & 0xffff0000) >> 14;
    168 
    169 		if (prevclass != class) {
    170 			if (prevclass) /* Add terminator to the method list. */
    171 				init++;
    172 			pack[classidx].init = init;
    173 			pack[classidx].type = class;
    174 			prevclass = class;
    175 			if (++classidx >= max_classes) {
    176 				vfree(pack);
    177 				ret = -ENOSPC;
    178 				goto end;
    179 			}
    180 		}
    181 
    182 		init->addr = addr;
    183 		init->data = av->data;
    184 		init->count = 1;
    185 		init->pitch = 1;
    186 	}
    187 
    188 	*ppack = pack;
    189 
    190 end:
    191 	nvkm_blob_dtor(&blob);
    192 	return ret;
    193 }
    194 
    195 static int
    196 gk20a_gr_wait_mem_scrubbing(struct gf100_gr *gr)
    197 {
    198 	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
    199 	struct nvkm_device *device = subdev->device;
    200 
    201 	if (nvkm_msec(device, 2000,
    202 		if (!(nvkm_rd32(device, 0x40910c) & 0x00000006))
    203 			break;
    204 	) < 0) {
    205 		nvkm_error(subdev, "FECS mem scrubbing timeout\n");
    206 		return -ETIMEDOUT;
    207 	}
    208 
    209 	if (nvkm_msec(device, 2000,
    210 		if (!(nvkm_rd32(device, 0x41a10c) & 0x00000006))
    211 			break;
    212 	) < 0) {
    213 		nvkm_error(subdev, "GPCCS mem scrubbing timeout\n");
    214 		return -ETIMEDOUT;
    215 	}
    216 
    217 	return 0;
    218 }
    219 
    220 static void
    221 gk20a_gr_set_hww_esr_report_mask(struct gf100_gr *gr)
    222 {
    223 	struct nvkm_device *device = gr->base.engine.subdev.device;
    224 	nvkm_wr32(device, 0x419e44, 0x1ffffe);
    225 	nvkm_wr32(device, 0x419e4c, 0x7f);
    226 }
    227 
    228 int
    229 gk20a_gr_init(struct gf100_gr *gr)
    230 {
    231 	struct nvkm_device *device = gr->base.engine.subdev.device;
    232 	int ret;
    233 
    234 	/* Clear SCC RAM */
    235 	nvkm_wr32(device, 0x40802c, 0x1);
    236 
    237 	gf100_gr_mmio(gr, gr->sw_nonctx);
    238 
    239 	ret = gk20a_gr_wait_mem_scrubbing(gr);
    240 	if (ret)
    241 		return ret;
    242 
    243 	ret = gf100_gr_wait_idle(gr);
    244 	if (ret)
    245 		return ret;
    246 
    247 	/* MMU debug buffer */
    248 	if (gr->func->init_gpc_mmu)
    249 		gr->func->init_gpc_mmu(gr);
    250 
    251 	/* Set the PE as stream master */
    252 	nvkm_mask(device, 0x503018, 0x1, 0x1);
    253 
    254 	/* Zcull init */
    255 	gr->func->init_zcull(gr);
    256 
    257 	gr->func->init_rop_active_fbps(gr);
    258 
    259 	/* Enable FIFO access */
    260 	nvkm_wr32(device, 0x400500, 0x00010001);
    261 
    262 	/* Enable interrupts */
    263 	nvkm_wr32(device, 0x400100, 0xffffffff);
    264 	nvkm_wr32(device, 0x40013c, 0xffffffff);
    265 
    266 	/* Enable FECS error interrupts */
    267 	nvkm_wr32(device, 0x409c24, 0x000f0000);
    268 
    269 	/* Enable hardware warning exceptions */
    270 	nvkm_wr32(device, 0x404000, 0xc0000000);
    271 	nvkm_wr32(device, 0x404600, 0xc0000000);
    272 
    273 	if (gr->func->set_hww_esr_report_mask)
    274 		gr->func->set_hww_esr_report_mask(gr);
    275 
    276 	/* Enable TPC exceptions per GPC */
    277 	nvkm_wr32(device, 0x419d0c, 0x2);
    278 	nvkm_wr32(device, 0x41ac94, (((1 << gr->tpc_total) - 1) & 0xff) << 16);
    279 
    280 	/* Reset and enable all exceptions */
    281 	nvkm_wr32(device, 0x400108, 0xffffffff);
    282 	nvkm_wr32(device, 0x400138, 0xffffffff);
    283 	nvkm_wr32(device, 0x400118, 0xffffffff);
    284 	nvkm_wr32(device, 0x400130, 0xffffffff);
    285 	nvkm_wr32(device, 0x40011c, 0xffffffff);
    286 	nvkm_wr32(device, 0x400134, 0xffffffff);
    287 
    288 	gf100_gr_zbc_init(gr);
    289 
    290 	return gf100_gr_init_ctxctl(gr);
    291 }
    292 
    293 static const struct gf100_gr_func
    294 gk20a_gr = {
    295 	.oneinit_tiles = gf100_gr_oneinit_tiles,
    296 	.oneinit_sm_id = gf100_gr_oneinit_sm_id,
    297 	.init = gk20a_gr_init,
    298 	.init_zcull = gf117_gr_init_zcull,
    299 	.init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
    300 	.trap_mp = gf100_gr_trap_mp,
    301 	.set_hww_esr_report_mask = gk20a_gr_set_hww_esr_report_mask,
    302 	.rops = gf100_gr_rops,
    303 	.ppc_nr = 1,
    304 	.grctx = &gk20a_grctx,
    305 	.zbc = &gf100_gr_zbc,
    306 	.sclass = {
    307 		{ -1, -1, FERMI_TWOD_A },
    308 		{ -1, -1, KEPLER_INLINE_TO_MEMORY_A },
    309 		{ -1, -1, KEPLER_C, &gf100_fermi },
    310 		{ -1, -1, KEPLER_COMPUTE_A },
    311 		{}
    312 	}
    313 };
    314 
    315 int
    316 gk20a_gr_load_sw(struct gf100_gr *gr, const char *path, int ver)
    317 {
    318 	if (gk20a_gr_av_to_init(gr, path, "sw_nonctx", ver, &gr->sw_nonctx) ||
    319 	    gk20a_gr_aiv_to_init(gr, path, "sw_ctx", ver, &gr->sw_ctx) ||
    320 	    gk20a_gr_av_to_init(gr, path, "sw_bundle_init", ver, &gr->bundle) ||
    321 	    gk20a_gr_av_to_method(gr, path, "sw_method_init", ver, &gr->method))
    322 		return -ENOENT;
    323 
    324 	return 0;
    325 }
    326 
    327 static int
    328 gk20a_gr_load(struct gf100_gr *gr, int ver, const struct gf100_gr_fwif *fwif)
    329 {
    330 	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
    331 
    332 	if (nvkm_firmware_load_blob(subdev, "", "fecs_inst", ver,
    333 				    &gr->fecs.inst) ||
    334 	    nvkm_firmware_load_blob(subdev, "", "fecs_data", ver,
    335 				    &gr->fecs.data) ||
    336 	    nvkm_firmware_load_blob(subdev, "", "gpccs_inst", ver,
    337 				    &gr->gpccs.inst) ||
    338 	    nvkm_firmware_load_blob(subdev, "", "gpccs_data", ver,
    339 				    &gr->gpccs.data))
    340 		return -ENOENT;
    341 
    342 	gr->firmware = true;
    343 
    344 	return gk20a_gr_load_sw(gr, "", ver);
    345 }
    346 
    347 static const struct gf100_gr_fwif
    348 gk20a_gr_fwif[] = {
    349 	{ -1, gk20a_gr_load, &gk20a_gr },
    350 	{}
    351 };
    352 
    353 int
    354 gk20a_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
    355 {
    356 	return gf100_gr_new_(gk20a_gr_fwif, device, index, pgr);
    357 }
    358