Home | History | Annotate | Line # | Download | only in sec2
      1 /*	$NetBSD: nouveau_nvkm_engine_sec2_gp102.c,v 1.2 2021/12/18 23:45:37 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the "Software"),
      8  * to deal in the Software without restriction, including without limitation
      9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     10  * and/or sell copies of the Software, and to permit persons to whom the
     11  * Software is furnished to do so, subject to the following conditions:
     12  *
     13  * The above copyright notice and this permission notice shall be included in
     14  * all copies or substantial portions of the Software.
     15  *
     16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
     22  * DEALINGS IN THE SOFTWARE.
     23  */
     24 #include <sys/cdefs.h>
     25 __KERNEL_RCSID(0, "$NetBSD: nouveau_nvkm_engine_sec2_gp102.c,v 1.2 2021/12/18 23:45:37 riastradh Exp $");
     26 
     27 #include "priv.h"
     28 
     29 #include <core/memory.h>
     30 #include <subdev/acr.h>
     31 #include <subdev/timer.h>
     32 
     33 #include <nvfw/flcn.h>
     34 #include <nvfw/sec2.h>
     35 
     36 static int
     37 gp102_sec2_acr_bootstrap_falcon_callback(void *priv, struct nv_falcon_msg *hdr)
     38 {
     39 	struct nv_sec2_acr_bootstrap_falcon_msg *msg =
     40 		container_of(hdr, typeof(*msg), msg.hdr);
     41 	struct nvkm_subdev *subdev = priv;
     42 	const char *name = nvkm_acr_lsf_id(msg->falcon_id);
     43 
     44 	if (msg->error_code) {
     45 		nvkm_error(subdev, "ACR_BOOTSTRAP_FALCON failed for "
     46 				   "falcon %d [%s]: %08x\n",
     47 			   msg->falcon_id, name, msg->error_code);
     48 		return -EINVAL;
     49 	}
     50 
     51 	nvkm_debug(subdev, "%s booted\n", name);
     52 	return 0;
     53 }
     54 
     55 static int
     56 gp102_sec2_acr_bootstrap_falcon(struct nvkm_falcon *falcon,
     57 			        enum nvkm_acr_lsf_id id)
     58 {
     59 	struct nvkm_sec2 *sec2 = container_of(falcon, typeof(*sec2), falcon);
     60 	struct nv_sec2_acr_bootstrap_falcon_cmd cmd = {
     61 		.cmd.hdr.unit_id = sec2->func->unit_acr,
     62 		.cmd.hdr.size = sizeof(cmd),
     63 		.cmd.cmd_type = NV_SEC2_ACR_CMD_BOOTSTRAP_FALCON,
     64 		.flags = NV_SEC2_ACR_BOOTSTRAP_FALCON_FLAGS_RESET_YES,
     65 		.falcon_id = id,
     66 	};
     67 
     68 	return nvkm_falcon_cmdq_send(sec2->cmdq, &cmd.cmd.hdr,
     69 				     gp102_sec2_acr_bootstrap_falcon_callback,
     70 				     &sec2->engine.subdev,
     71 				     msecs_to_jiffies(1000));
     72 }
     73 
     74 static int
     75 gp102_sec2_acr_boot(struct nvkm_falcon *falcon)
     76 {
     77 	struct nv_sec2_args args = {};
     78 	nvkm_falcon_load_dmem(falcon, &args,
     79 			      falcon->func->emem_addr, sizeof(args), 0);
     80 	nvkm_falcon_start(falcon);
     81 	return 0;
     82 }
     83 
     84 static void
     85 gp102_sec2_acr_bld_patch(struct nvkm_acr *acr, u32 bld, s64 adjust)
     86 {
     87 	struct loader_config_v1 hdr;
     88 	nvkm_robj(acr->wpr, bld, &hdr, sizeof(hdr));
     89 	hdr.code_dma_base = hdr.code_dma_base + adjust;
     90 	hdr.data_dma_base = hdr.data_dma_base + adjust;
     91 	hdr.overlay_dma_base = hdr.overlay_dma_base + adjust;
     92 	nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
     93 	loader_config_v1_dump(&acr->subdev, &hdr);
     94 }
     95 
     96 static void
     97 gp102_sec2_acr_bld_write(struct nvkm_acr *acr, u32 bld,
     98 			 struct nvkm_acr_lsfw *lsfw)
     99 {
    100 	const struct loader_config_v1 hdr = {
    101 		.dma_idx = FALCON_SEC2_DMAIDX_UCODE,
    102 		.code_dma_base = lsfw->offset.img + lsfw->app_start_offset,
    103 		.code_size_total = lsfw->app_size,
    104 		.code_size_to_load = lsfw->app_resident_code_size,
    105 		.code_entry_point = lsfw->app_imem_entry,
    106 		.data_dma_base = lsfw->offset.img + lsfw->app_start_offset +
    107 				 lsfw->app_resident_data_offset,
    108 		.data_size = lsfw->app_resident_data_size,
    109 		.overlay_dma_base = lsfw->offset.img + lsfw->app_start_offset,
    110 		.argc = 1,
    111 		.argv = lsfw->falcon->func->emem_addr,
    112 	};
    113 
    114 	nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
    115 }
    116 
    117 static const struct nvkm_acr_lsf_func
    118 gp102_sec2_acr_0 = {
    119 	.bld_size = sizeof(struct loader_config_v1),
    120 	.bld_write = gp102_sec2_acr_bld_write,
    121 	.bld_patch = gp102_sec2_acr_bld_patch,
    122 	.boot = gp102_sec2_acr_boot,
    123 	.bootstrap_falcon = gp102_sec2_acr_bootstrap_falcon,
    124 };
    125 
    126 int
    127 gp102_sec2_initmsg(struct nvkm_sec2 *sec2)
    128 {
    129 	struct nv_sec2_init_msg msg;
    130 	int ret, i;
    131 
    132 	ret = nvkm_falcon_msgq_recv_initmsg(sec2->msgq, &msg, sizeof(msg));
    133 	if (ret)
    134 		return ret;
    135 
    136 	if (msg.hdr.unit_id != NV_SEC2_UNIT_INIT ||
    137 	    msg.msg_type != NV_SEC2_INIT_MSG_INIT)
    138 		return -EINVAL;
    139 
    140 	for (i = 0; i < ARRAY_SIZE(msg.queue_info); i++) {
    141 		if (msg.queue_info[i].id == NV_SEC2_INIT_MSG_QUEUE_ID_MSGQ) {
    142 			nvkm_falcon_msgq_init(sec2->msgq,
    143 					      msg.queue_info[i].index,
    144 					      msg.queue_info[i].offset,
    145 					      msg.queue_info[i].size);
    146 		} else {
    147 			nvkm_falcon_cmdq_init(sec2->cmdq,
    148 					      msg.queue_info[i].index,
    149 					      msg.queue_info[i].offset,
    150 					      msg.queue_info[i].size);
    151 		}
    152 	}
    153 
    154 	return 0;
    155 }
    156 
    157 void
    158 gp102_sec2_intr(struct nvkm_sec2 *sec2)
    159 {
    160 	struct nvkm_subdev *subdev = &sec2->engine.subdev;
    161 	struct nvkm_falcon *falcon = &sec2->falcon;
    162 	u32 disp = nvkm_falcon_rd32(falcon, 0x01c);
    163 	u32 intr = nvkm_falcon_rd32(falcon, 0x008) & disp & ~(disp >> 16);
    164 
    165 	if (intr & 0x00000040) {
    166 		schedule_work(&sec2->work);
    167 		nvkm_falcon_wr32(falcon, 0x004, 0x00000040);
    168 		intr &= ~0x00000040;
    169 	}
    170 
    171 	if (intr) {
    172 		nvkm_error(subdev, "unhandled intr %08x\n", intr);
    173 		nvkm_falcon_wr32(falcon, 0x004, intr);
    174 	}
    175 }
    176 
    177 int
    178 gp102_sec2_flcn_enable(struct nvkm_falcon *falcon)
    179 {
    180 	nvkm_falcon_mask(falcon, 0x3c0, 0x00000001, 0x00000001);
    181 	udelay(10);
    182 	nvkm_falcon_mask(falcon, 0x3c0, 0x00000001, 0x00000000);
    183 	return nvkm_falcon_v1_enable(falcon);
    184 }
    185 
    186 void
    187 gp102_sec2_flcn_bind_context(struct nvkm_falcon *falcon,
    188 			     struct nvkm_memory *ctx)
    189 {
    190 	struct nvkm_device *device = falcon->owner->device;
    191 
    192 	nvkm_falcon_v1_bind_context(falcon, ctx);
    193 	if (!ctx)
    194 		return;
    195 
    196 	/* Not sure if this is a WAR for a HW issue, or some additional
    197 	 * programming sequence that's needed to properly complete the
    198 	 * context switch we trigger above.
    199 	 *
    200 	 * Fixes unreliability of booting the SEC2 RTOS on Quadro P620,
    201 	 * particularly when resuming from suspend.
    202 	 *
    203 	 * Also removes the need for an odd workaround where we needed
    204 	 * to program SEC2's FALCON_CPUCTL_ALIAS_STARTCPU twice before
    205 	 * the SEC2 RTOS would begin executing.
    206 	 */
    207 	nvkm_msec(device, 10,
    208 		u32 irqstat = nvkm_falcon_rd32(falcon, 0x008);
    209 		u32 flcn0dc = nvkm_falcon_rd32(falcon, 0x0dc);
    210 		if ((irqstat & 0x00000008) &&
    211 		    (flcn0dc & 0x00007000) == 0x00005000)
    212 			break;
    213 	);
    214 
    215 	nvkm_falcon_mask(falcon, 0x004, 0x00000008, 0x00000008);
    216 	nvkm_falcon_mask(falcon, 0x058, 0x00000002, 0x00000002);
    217 
    218 	nvkm_msec(device, 10,
    219 		u32 flcn0dc = nvkm_falcon_rd32(falcon, 0x0dc);
    220 		if ((flcn0dc & 0x00007000) == 0x00000000)
    221 			break;
    222 	);
    223 }
    224 
    225 static const struct nvkm_falcon_func
    226 gp102_sec2_flcn = {
    227 	.debug = 0x408,
    228 	.fbif = 0x600,
    229 	.load_imem = nvkm_falcon_v1_load_imem,
    230 	.load_dmem = nvkm_falcon_v1_load_dmem,
    231 	.read_dmem = nvkm_falcon_v1_read_dmem,
    232 	.emem_addr = 0x01000000,
    233 	.bind_context = gp102_sec2_flcn_bind_context,
    234 	.wait_for_halt = nvkm_falcon_v1_wait_for_halt,
    235 	.clear_interrupt = nvkm_falcon_v1_clear_interrupt,
    236 	.set_start_addr = nvkm_falcon_v1_set_start_addr,
    237 	.start = nvkm_falcon_v1_start,
    238 	.enable = gp102_sec2_flcn_enable,
    239 	.disable = nvkm_falcon_v1_disable,
    240 	.cmdq = { 0xa00, 0xa04, 8 },
    241 	.msgq = { 0xa30, 0xa34, 8 },
    242 };
    243 
    244 const struct nvkm_sec2_func
    245 gp102_sec2 = {
    246 	.flcn = &gp102_sec2_flcn,
    247 	.unit_acr = NV_SEC2_UNIT_ACR,
    248 	.intr = gp102_sec2_intr,
    249 	.initmsg = gp102_sec2_initmsg,
    250 };
    251 
    252 MODULE_FIRMWARE("nvidia/gp102/sec2/desc.bin");
    253 MODULE_FIRMWARE("nvidia/gp102/sec2/image.bin");
    254 MODULE_FIRMWARE("nvidia/gp102/sec2/sig.bin");
    255 MODULE_FIRMWARE("nvidia/gp104/sec2/desc.bin");
    256 MODULE_FIRMWARE("nvidia/gp104/sec2/image.bin");
    257 MODULE_FIRMWARE("nvidia/gp104/sec2/sig.bin");
    258 MODULE_FIRMWARE("nvidia/gp106/sec2/desc.bin");
    259 MODULE_FIRMWARE("nvidia/gp106/sec2/image.bin");
    260 MODULE_FIRMWARE("nvidia/gp106/sec2/sig.bin");
    261 MODULE_FIRMWARE("nvidia/gp107/sec2/desc.bin");
    262 MODULE_FIRMWARE("nvidia/gp107/sec2/image.bin");
    263 MODULE_FIRMWARE("nvidia/gp107/sec2/sig.bin");
    264 
    265 static void
    266 gp102_sec2_acr_bld_patch_1(struct nvkm_acr *acr, u32 bld, s64 adjust)
    267 {
    268 	struct flcn_bl_dmem_desc_v2 hdr;
    269 	nvkm_robj(acr->wpr, bld, &hdr, sizeof(hdr));
    270 	hdr.code_dma_base = hdr.code_dma_base + adjust;
    271 	hdr.data_dma_base = hdr.data_dma_base + adjust;
    272 	nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
    273 	flcn_bl_dmem_desc_v2_dump(&acr->subdev, &hdr);
    274 }
    275 
    276 static void
    277 gp102_sec2_acr_bld_write_1(struct nvkm_acr *acr, u32 bld,
    278 			   struct nvkm_acr_lsfw *lsfw)
    279 {
    280 	const struct flcn_bl_dmem_desc_v2 hdr = {
    281 		.ctx_dma = FALCON_SEC2_DMAIDX_UCODE,
    282 		.code_dma_base = lsfw->offset.img + lsfw->app_start_offset,
    283 		.non_sec_code_off = lsfw->app_resident_code_offset,
    284 		.non_sec_code_size = lsfw->app_resident_code_size,
    285 		.code_entry_point = lsfw->app_imem_entry,
    286 		.data_dma_base = lsfw->offset.img + lsfw->app_start_offset +
    287 				 lsfw->app_resident_data_offset,
    288 		.data_size = lsfw->app_resident_data_size,
    289 		.argc = 1,
    290 		.argv = lsfw->falcon->func->emem_addr,
    291 	};
    292 
    293 	nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
    294 }
    295 
    296 const struct nvkm_acr_lsf_func
    297 gp102_sec2_acr_1 = {
    298 	.bld_size = sizeof(struct flcn_bl_dmem_desc_v2),
    299 	.bld_write = gp102_sec2_acr_bld_write_1,
    300 	.bld_patch = gp102_sec2_acr_bld_patch_1,
    301 	.boot = gp102_sec2_acr_boot,
    302 	.bootstrap_falcon = gp102_sec2_acr_bootstrap_falcon,
    303 };
    304 
    305 int
    306 gp102_sec2_load(struct nvkm_sec2 *sec2, int ver,
    307 		const struct nvkm_sec2_fwif *fwif)
    308 {
    309 	return nvkm_acr_lsfw_load_sig_image_desc_v1(&sec2->engine.subdev,
    310 						    &sec2->falcon,
    311 						    NVKM_ACR_LSF_SEC2, "sec2/",
    312 						    ver, fwif->acr);
    313 }
    314 
    315 MODULE_FIRMWARE("nvidia/gp102/sec2/desc-1.bin");
    316 MODULE_FIRMWARE("nvidia/gp102/sec2/image-1.bin");
    317 MODULE_FIRMWARE("nvidia/gp102/sec2/sig-1.bin");
    318 MODULE_FIRMWARE("nvidia/gp104/sec2/desc-1.bin");
    319 MODULE_FIRMWARE("nvidia/gp104/sec2/image-1.bin");
    320 MODULE_FIRMWARE("nvidia/gp104/sec2/sig-1.bin");
    321 MODULE_FIRMWARE("nvidia/gp106/sec2/desc-1.bin");
    322 MODULE_FIRMWARE("nvidia/gp106/sec2/image-1.bin");
    323 MODULE_FIRMWARE("nvidia/gp106/sec2/sig-1.bin");
    324 MODULE_FIRMWARE("nvidia/gp107/sec2/desc-1.bin");
    325 MODULE_FIRMWARE("nvidia/gp107/sec2/image-1.bin");
    326 MODULE_FIRMWARE("nvidia/gp107/sec2/sig-1.bin");
    327 
    328 static const struct nvkm_sec2_fwif
    329 gp102_sec2_fwif[] = {
    330 	{ 1, gp102_sec2_load, &gp102_sec2, &gp102_sec2_acr_1 },
    331 	{ 0, gp102_sec2_load, &gp102_sec2, &gp102_sec2_acr_0 },
    332 	{}
    333 };
    334 
    335 int
    336 gp102_sec2_new(struct nvkm_device *device, int index, struct nvkm_sec2 **psec2)
    337 {
    338 	return nvkm_sec2_new_(gp102_sec2_fwif, device, index, 0, psec2);
    339 }
    340