1 /* $NetBSD: nouveau_nvkm_subdev_pmu_gm20b.c,v 1.2 2021/12/18 23:45:41 riastradh Exp $ */ 2 3 /* 4 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 22 * DEALINGS IN THE SOFTWARE. 23 */ 24 #include <sys/cdefs.h> 25 __KERNEL_RCSID(0, "$NetBSD: nouveau_nvkm_subdev_pmu_gm20b.c,v 1.2 2021/12/18 23:45:41 riastradh Exp $"); 26 27 #include "priv.h" 28 29 #include <core/memory.h> 30 #include <subdev/acr.h> 31 32 #include <nvfw/flcn.h> 33 #include <nvfw/pmu.h> 34 35 static int 36 gm20b_pmu_acr_bootstrap_falcon_cb(void *priv, struct nv_falcon_msg *hdr) 37 { 38 struct nv_pmu_acr_bootstrap_falcon_msg *msg = 39 container_of(hdr, typeof(*msg), msg.hdr); 40 return msg->falcon_id; 41 } 42 43 int 44 gm20b_pmu_acr_bootstrap_falcon(struct nvkm_falcon *falcon, 45 enum nvkm_acr_lsf_id id) 46 { 47 struct nvkm_pmu *pmu = container_of(falcon, typeof(*pmu), falcon); 48 struct nv_pmu_acr_bootstrap_falcon_cmd cmd = { 49 .cmd.hdr.unit_id = NV_PMU_UNIT_ACR, 50 .cmd.hdr.size = sizeof(cmd), 51 .cmd.cmd_type = NV_PMU_ACR_CMD_BOOTSTRAP_FALCON, 52 .flags = NV_PMU_ACR_BOOTSTRAP_FALCON_FLAGS_RESET_YES, 53 .falcon_id = id, 54 }; 55 int ret; 56 57 ret = nvkm_falcon_cmdq_send(pmu->hpq, &cmd.cmd.hdr, 58 gm20b_pmu_acr_bootstrap_falcon_cb, 59 &pmu->subdev, msecs_to_jiffies(1000)); 60 if (ret >= 0) { 61 if (ret != cmd.falcon_id) 62 ret = -EIO; 63 else 64 ret = 0; 65 } 66 67 return ret; 68 } 69 70 int 71 gm20b_pmu_acr_boot(struct nvkm_falcon *falcon) 72 { 73 struct nv_pmu_args args = { .secure_mode = true }; 74 const u32 addr_args = falcon->data.limit - sizeof(struct nv_pmu_args); 75 nvkm_falcon_load_dmem(falcon, &args, addr_args, sizeof(args), 0); 76 nvkm_falcon_start(falcon); 77 return 0; 78 } 79 80 void 81 gm20b_pmu_acr_bld_patch(struct nvkm_acr *acr, u32 bld, s64 adjust) 82 { 83 struct loader_config hdr; 84 u64 addr; 85 86 nvkm_robj(acr->wpr, bld, &hdr, sizeof(hdr)); 87 addr = ((u64)hdr.code_dma_base1 << 40 | hdr.code_dma_base << 8); 88 hdr.code_dma_base = lower_32_bits((addr + adjust) >> 8); 89 hdr.code_dma_base1 = upper_32_bits((addr + adjust) >> 8); 90 addr = ((u64)hdr.data_dma_base1 << 40 | hdr.data_dma_base << 8); 91 hdr.data_dma_base = lower_32_bits((addr + adjust) >> 8); 92 hdr.data_dma_base1 = upper_32_bits((addr + adjust) >> 8); 93 addr = ((u64)hdr.overlay_dma_base1 << 40 | hdr.overlay_dma_base << 8); 94 hdr.overlay_dma_base = lower_32_bits((addr + adjust) << 8); 95 hdr.overlay_dma_base1 = upper_32_bits((addr + adjust) << 8); 96 nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr)); 97 98 loader_config_dump(&acr->subdev, &hdr); 99 } 100 101 void 102 gm20b_pmu_acr_bld_write(struct nvkm_acr *acr, u32 bld, 103 struct nvkm_acr_lsfw *lsfw) 104 { 105 const u64 base = lsfw->offset.img + lsfw->app_start_offset; 106 const u64 code = (base + lsfw->app_resident_code_offset) >> 8; 107 const u64 data = (base + lsfw->app_resident_data_offset) >> 8; 108 const struct loader_config hdr = { 109 .dma_idx = FALCON_DMAIDX_UCODE, 110 .code_dma_base = lower_32_bits(code), 111 .code_size_total = lsfw->app_size, 112 .code_size_to_load = lsfw->app_resident_code_size, 113 .code_entry_point = lsfw->app_imem_entry, 114 .data_dma_base = lower_32_bits(data), 115 .data_size = lsfw->app_resident_data_size, 116 .overlay_dma_base = lower_32_bits(code), 117 .argc = 1, 118 .argv = lsfw->falcon->data.limit - sizeof(struct nv_pmu_args), 119 .code_dma_base1 = upper_32_bits(code), 120 .data_dma_base1 = upper_32_bits(data), 121 .overlay_dma_base1 = upper_32_bits(code), 122 }; 123 124 nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr)); 125 } 126 127 static const struct nvkm_acr_lsf_func 128 gm20b_pmu_acr = { 129 .flags = NVKM_ACR_LSF_DMACTL_REQ_CTX, 130 .bld_size = sizeof(struct loader_config), 131 .bld_write = gm20b_pmu_acr_bld_write, 132 .bld_patch = gm20b_pmu_acr_bld_patch, 133 .boot = gm20b_pmu_acr_boot, 134 .bootstrap_falcon = gm20b_pmu_acr_bootstrap_falcon, 135 }; 136 137 static int 138 gm20b_pmu_acr_init_wpr_callback(void *priv, struct nv_falcon_msg *hdr) 139 { 140 struct nv_pmu_acr_init_wpr_region_msg *msg = 141 container_of(hdr, typeof(*msg), msg.hdr); 142 struct nvkm_pmu *pmu = priv; 143 struct nvkm_subdev *subdev = &pmu->subdev; 144 145 if (msg->error_code) { 146 nvkm_error(subdev, "ACR WPR init failure: %d\n", 147 msg->error_code); 148 return -EINVAL; 149 } 150 151 nvkm_debug(subdev, "ACR WPR init complete\n"); 152 complete_all(&pmu->wpr_ready); 153 return 0; 154 } 155 156 static int 157 gm20b_pmu_acr_init_wpr(struct nvkm_pmu *pmu) 158 { 159 struct nv_pmu_acr_init_wpr_region_cmd cmd = { 160 .cmd.hdr.unit_id = NV_PMU_UNIT_ACR, 161 .cmd.hdr.size = sizeof(cmd), 162 .cmd.cmd_type = NV_PMU_ACR_CMD_INIT_WPR_REGION, 163 .region_id = 1, 164 .wpr_offset = 0, 165 }; 166 167 return nvkm_falcon_cmdq_send(pmu->hpq, &cmd.cmd.hdr, 168 gm20b_pmu_acr_init_wpr_callback, pmu, 0); 169 } 170 171 int 172 gm20b_pmu_initmsg(struct nvkm_pmu *pmu) 173 { 174 struct nv_pmu_init_msg msg; 175 int ret; 176 177 ret = nvkm_falcon_msgq_recv_initmsg(pmu->msgq, &msg, sizeof(msg)); 178 if (ret) 179 return ret; 180 181 if (msg.hdr.unit_id != NV_PMU_UNIT_INIT || 182 msg.msg_type != NV_PMU_INIT_MSG_INIT) 183 return -EINVAL; 184 185 nvkm_falcon_cmdq_init(pmu->hpq, msg.queue_info[0].index, 186 msg.queue_info[0].offset, 187 msg.queue_info[0].size); 188 nvkm_falcon_cmdq_init(pmu->lpq, msg.queue_info[1].index, 189 msg.queue_info[1].offset, 190 msg.queue_info[1].size); 191 nvkm_falcon_msgq_init(pmu->msgq, msg.queue_info[4].index, 192 msg.queue_info[4].offset, 193 msg.queue_info[4].size); 194 return gm20b_pmu_acr_init_wpr(pmu); 195 } 196 197 void 198 gm20b_pmu_recv(struct nvkm_pmu *pmu) 199 { 200 if (!pmu->initmsg_received) { 201 int ret = pmu->func->initmsg(pmu); 202 if (ret) { 203 nvkm_error(&pmu->subdev, 204 "error parsing init message: %d\n", ret); 205 return; 206 } 207 208 pmu->initmsg_received = true; 209 } 210 211 nvkm_falcon_msgq_recv(pmu->msgq); 212 } 213 214 static const struct nvkm_pmu_func 215 gm20b_pmu = { 216 .flcn = >215_pmu_flcn, 217 .enabled = gf100_pmu_enabled, 218 .intr = gt215_pmu_intr, 219 .recv = gm20b_pmu_recv, 220 .initmsg = gm20b_pmu_initmsg, 221 }; 222 223 #if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC) 224 MODULE_FIRMWARE("nvidia/gm20b/pmu/desc.bin"); 225 MODULE_FIRMWARE("nvidia/gm20b/pmu/image.bin"); 226 MODULE_FIRMWARE("nvidia/gm20b/pmu/sig.bin"); 227 #endif 228 229 int 230 gm20b_pmu_load(struct nvkm_pmu *pmu, int ver, const struct nvkm_pmu_fwif *fwif) 231 { 232 return nvkm_acr_lsfw_load_sig_image_desc(&pmu->subdev, &pmu->falcon, 233 NVKM_ACR_LSF_PMU, "pmu/", 234 ver, fwif->acr); 235 } 236 237 static const struct nvkm_pmu_fwif 238 gm20b_pmu_fwif[] = { 239 { 0, gm20b_pmu_load, &gm20b_pmu, &gm20b_pmu_acr }, 240 {} 241 }; 242 243 int 244 gm20b_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) 245 { 246 return nvkm_pmu_new_(gm20b_pmu_fwif, device, index, ppmu); 247 } 248