1 1.3 riastrad /* $NetBSD: amdgpu_nv.c,v 1.3 2021/12/19 12:02:39 riastradh Exp $ */ 2 1.1 riastrad 3 1.1 riastrad /* 4 1.1 riastrad * Copyright 2019 Advanced Micro Devices, Inc. 5 1.1 riastrad * 6 1.1 riastrad * Permission is hereby granted, free of charge, to any person obtaining a 7 1.1 riastrad * copy of this software and associated documentation files (the "Software"), 8 1.1 riastrad * to deal in the Software without restriction, including without limitation 9 1.1 riastrad * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 1.1 riastrad * and/or sell copies of the Software, and to permit persons to whom the 11 1.1 riastrad * Software is furnished to do so, subject to the following conditions: 12 1.1 riastrad * 13 1.1 riastrad * The above copyright notice and this permission notice shall be included in 14 1.1 riastrad * all copies or substantial portions of the Software. 15 1.1 riastrad * 16 1.1 riastrad * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 1.1 riastrad * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 1.1 riastrad * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 1.1 riastrad * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 1.1 riastrad * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 1.1 riastrad * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 1.1 riastrad * OTHER DEALINGS IN THE SOFTWARE. 23 1.1 riastrad * 24 1.1 riastrad */ 25 1.1 riastrad #include <sys/cdefs.h> 26 1.3 riastrad __KERNEL_RCSID(0, "$NetBSD: amdgpu_nv.c,v 1.3 2021/12/19 12:02:39 riastradh Exp $"); 27 1.1 riastrad 28 1.1 riastrad #include <linux/firmware.h> 29 1.1 riastrad #include <linux/slab.h> 30 1.1 riastrad #include <linux/module.h> 31 1.1 riastrad #include <linux/pci.h> 32 1.1 riastrad 33 1.1 riastrad #include "amdgpu.h" 34 1.1 riastrad #include "amdgpu_atombios.h" 35 1.1 riastrad #include "amdgpu_ih.h" 36 1.1 riastrad #include "amdgpu_uvd.h" 37 1.1 riastrad #include "amdgpu_vce.h" 38 1.1 riastrad #include "amdgpu_ucode.h" 39 1.1 riastrad #include "amdgpu_psp.h" 40 1.1 riastrad #include "amdgpu_smu.h" 41 1.1 riastrad #include "atom.h" 42 1.1 riastrad #include "amd_pcie.h" 43 1.1 riastrad 44 1.1 riastrad #include "gc/gc_10_1_0_offset.h" 45 1.1 riastrad #include "gc/gc_10_1_0_sh_mask.h" 46 1.1 riastrad #include "hdp/hdp_5_0_0_offset.h" 47 1.1 riastrad #include "hdp/hdp_5_0_0_sh_mask.h" 48 1.1 riastrad #include "smuio/smuio_11_0_0_offset.h" 49 1.1 riastrad 50 1.1 riastrad #include "soc15.h" 51 1.1 riastrad #include "soc15_common.h" 52 1.1 riastrad #include "gmc_v10_0.h" 53 1.1 riastrad #include "gfxhub_v2_0.h" 54 1.1 riastrad #include "mmhub_v2_0.h" 55 1.1 riastrad #include "nbio_v2_3.h" 56 1.1 riastrad #include "nv.h" 57 1.1 riastrad #include "navi10_ih.h" 58 1.1 riastrad #include "gfx_v10_0.h" 59 1.1 riastrad #include "sdma_v5_0.h" 60 1.1 riastrad #include "vcn_v2_0.h" 61 1.1 riastrad #include "jpeg_v2_0.h" 62 1.1 riastrad #include "dce_virtual.h" 63 1.1 riastrad #include "mes_v10_1.h" 64 1.1 riastrad #include "mxgpu_nv.h" 65 1.1 riastrad 66 1.3 riastrad #include <linux/nbsd-namespace.h> 67 1.3 riastrad 68 1.1 riastrad static const struct amd_ip_funcs nv_common_ip_funcs; 69 1.1 riastrad 70 1.1 riastrad /* 71 1.1 riastrad * Indirect registers accessor 72 1.1 riastrad */ 73 1.1 riastrad static u32 nv_pcie_rreg(struct amdgpu_device *adev, u32 reg) 74 1.1 riastrad { 75 1.1 riastrad unsigned long flags, address, data; 76 1.1 riastrad u32 r; 77 1.1 riastrad address = adev->nbio.funcs->get_pcie_index_offset(adev); 78 1.1 riastrad data = adev->nbio.funcs->get_pcie_data_offset(adev); 79 1.1 riastrad 80 1.1 riastrad spin_lock_irqsave(&adev->pcie_idx_lock, flags); 81 1.1 riastrad WREG32(address, reg); 82 1.1 riastrad (void)RREG32(address); 83 1.1 riastrad r = RREG32(data); 84 1.1 riastrad spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 85 1.1 riastrad return r; 86 1.1 riastrad } 87 1.1 riastrad 88 1.1 riastrad static void nv_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 89 1.1 riastrad { 90 1.1 riastrad unsigned long flags, address, data; 91 1.1 riastrad 92 1.1 riastrad address = adev->nbio.funcs->get_pcie_index_offset(adev); 93 1.1 riastrad data = adev->nbio.funcs->get_pcie_data_offset(adev); 94 1.1 riastrad 95 1.1 riastrad spin_lock_irqsave(&adev->pcie_idx_lock, flags); 96 1.1 riastrad WREG32(address, reg); 97 1.1 riastrad (void)RREG32(address); 98 1.1 riastrad WREG32(data, v); 99 1.1 riastrad (void)RREG32(data); 100 1.1 riastrad spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 101 1.1 riastrad } 102 1.1 riastrad 103 1.1 riastrad static u32 nv_didt_rreg(struct amdgpu_device *adev, u32 reg) 104 1.1 riastrad { 105 1.1 riastrad unsigned long flags, address, data; 106 1.1 riastrad u32 r; 107 1.1 riastrad 108 1.1 riastrad address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX); 109 1.1 riastrad data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA); 110 1.1 riastrad 111 1.1 riastrad spin_lock_irqsave(&adev->didt_idx_lock, flags); 112 1.1 riastrad WREG32(address, (reg)); 113 1.1 riastrad r = RREG32(data); 114 1.1 riastrad spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 115 1.1 riastrad return r; 116 1.1 riastrad } 117 1.1 riastrad 118 1.1 riastrad static void nv_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 119 1.1 riastrad { 120 1.1 riastrad unsigned long flags, address, data; 121 1.1 riastrad 122 1.1 riastrad address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX); 123 1.1 riastrad data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA); 124 1.1 riastrad 125 1.1 riastrad spin_lock_irqsave(&adev->didt_idx_lock, flags); 126 1.1 riastrad WREG32(address, (reg)); 127 1.1 riastrad WREG32(data, (v)); 128 1.1 riastrad spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 129 1.1 riastrad } 130 1.1 riastrad 131 1.1 riastrad static u32 nv_get_config_memsize(struct amdgpu_device *adev) 132 1.1 riastrad { 133 1.1 riastrad return adev->nbio.funcs->get_memsize(adev); 134 1.1 riastrad } 135 1.1 riastrad 136 1.1 riastrad static u32 nv_get_xclk(struct amdgpu_device *adev) 137 1.1 riastrad { 138 1.1 riastrad return adev->clock.spll.reference_freq; 139 1.1 riastrad } 140 1.1 riastrad 141 1.1 riastrad 142 1.1 riastrad void nv_grbm_select(struct amdgpu_device *adev, 143 1.1 riastrad u32 me, u32 pipe, u32 queue, u32 vmid) 144 1.1 riastrad { 145 1.1 riastrad u32 grbm_gfx_cntl = 0; 146 1.1 riastrad grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe); 147 1.1 riastrad grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me); 148 1.1 riastrad grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid); 149 1.1 riastrad grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue); 150 1.1 riastrad 151 1.1 riastrad WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL), grbm_gfx_cntl); 152 1.1 riastrad } 153 1.1 riastrad 154 1.1 riastrad static void nv_vga_set_state(struct amdgpu_device *adev, bool state) 155 1.1 riastrad { 156 1.1 riastrad /* todo */ 157 1.1 riastrad } 158 1.1 riastrad 159 1.1 riastrad static bool nv_read_disabled_bios(struct amdgpu_device *adev) 160 1.1 riastrad { 161 1.1 riastrad /* todo */ 162 1.1 riastrad return false; 163 1.1 riastrad } 164 1.1 riastrad 165 1.1 riastrad static bool nv_read_bios_from_rom(struct amdgpu_device *adev, 166 1.1 riastrad u8 *bios, u32 length_bytes) 167 1.1 riastrad { 168 1.1 riastrad u32 *dw_ptr; 169 1.1 riastrad u32 i, length_dw; 170 1.1 riastrad 171 1.1 riastrad if (bios == NULL) 172 1.1 riastrad return false; 173 1.1 riastrad if (length_bytes == 0) 174 1.1 riastrad return false; 175 1.1 riastrad /* APU vbios image is part of sbios image */ 176 1.1 riastrad if (adev->flags & AMD_IS_APU) 177 1.1 riastrad return false; 178 1.1 riastrad 179 1.1 riastrad dw_ptr = (u32 *)bios; 180 1.1 riastrad length_dw = ALIGN(length_bytes, 4) / 4; 181 1.1 riastrad 182 1.1 riastrad /* set rom index to 0 */ 183 1.1 riastrad WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0); 184 1.1 riastrad /* read out the rom data */ 185 1.1 riastrad for (i = 0; i < length_dw; i++) 186 1.1 riastrad dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA)); 187 1.1 riastrad 188 1.1 riastrad return true; 189 1.1 riastrad } 190 1.1 riastrad 191 1.1 riastrad static struct soc15_allowed_register_entry nv_allowed_read_registers[] = { 192 1.1 riastrad { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)}, 193 1.1 riastrad { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)}, 194 1.1 riastrad { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)}, 195 1.1 riastrad { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)}, 196 1.1 riastrad { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)}, 197 1.1 riastrad { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)}, 198 1.1 riastrad #if 0 /* TODO: will set it when SDMA header is available */ 199 1.1 riastrad { SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)}, 200 1.1 riastrad { SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)}, 201 1.1 riastrad #endif 202 1.1 riastrad { SOC15_REG_ENTRY(GC, 0, mmCP_STAT)}, 203 1.1 riastrad { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)}, 204 1.1 riastrad { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)}, 205 1.1 riastrad { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)}, 206 1.1 riastrad { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)}, 207 1.1 riastrad { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)}, 208 1.1 riastrad { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)}, 209 1.1 riastrad { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_BUSY_STAT)}, 210 1.1 riastrad { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)}, 211 1.1 riastrad { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)}, 212 1.1 riastrad { SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)}, 213 1.1 riastrad }; 214 1.1 riastrad 215 1.1 riastrad static uint32_t nv_read_indexed_register(struct amdgpu_device *adev, u32 se_num, 216 1.1 riastrad u32 sh_num, u32 reg_offset) 217 1.1 riastrad { 218 1.1 riastrad uint32_t val; 219 1.1 riastrad 220 1.1 riastrad mutex_lock(&adev->grbm_idx_mutex); 221 1.1 riastrad if (se_num != 0xffffffff || sh_num != 0xffffffff) 222 1.1 riastrad amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); 223 1.1 riastrad 224 1.1 riastrad val = RREG32(reg_offset); 225 1.1 riastrad 226 1.1 riastrad if (se_num != 0xffffffff || sh_num != 0xffffffff) 227 1.1 riastrad amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 228 1.1 riastrad mutex_unlock(&adev->grbm_idx_mutex); 229 1.1 riastrad return val; 230 1.1 riastrad } 231 1.1 riastrad 232 1.1 riastrad static uint32_t nv_get_register_value(struct amdgpu_device *adev, 233 1.1 riastrad bool indexed, u32 se_num, 234 1.1 riastrad u32 sh_num, u32 reg_offset) 235 1.1 riastrad { 236 1.1 riastrad if (indexed) { 237 1.1 riastrad return nv_read_indexed_register(adev, se_num, sh_num, reg_offset); 238 1.1 riastrad } else { 239 1.1 riastrad if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG)) 240 1.1 riastrad return adev->gfx.config.gb_addr_config; 241 1.1 riastrad return RREG32(reg_offset); 242 1.1 riastrad } 243 1.1 riastrad } 244 1.1 riastrad 245 1.1 riastrad static int nv_read_register(struct amdgpu_device *adev, u32 se_num, 246 1.1 riastrad u32 sh_num, u32 reg_offset, u32 *value) 247 1.1 riastrad { 248 1.1 riastrad uint32_t i; 249 1.1 riastrad struct soc15_allowed_register_entry *en; 250 1.1 riastrad 251 1.1 riastrad *value = 0; 252 1.1 riastrad for (i = 0; i < ARRAY_SIZE(nv_allowed_read_registers); i++) { 253 1.1 riastrad en = &nv_allowed_read_registers[i]; 254 1.1 riastrad if (reg_offset != 255 1.1 riastrad (adev->reg_offset[en->hwip][en->inst][en->seg] + en->reg_offset)) 256 1.1 riastrad continue; 257 1.1 riastrad 258 1.1 riastrad *value = nv_get_register_value(adev, 259 1.1 riastrad nv_allowed_read_registers[i].grbm_indexed, 260 1.1 riastrad se_num, sh_num, reg_offset); 261 1.1 riastrad return 0; 262 1.1 riastrad } 263 1.1 riastrad return -EINVAL; 264 1.1 riastrad } 265 1.1 riastrad 266 1.1 riastrad #if 0 267 1.1 riastrad static void nv_gpu_pci_config_reset(struct amdgpu_device *adev) 268 1.1 riastrad { 269 1.1 riastrad u32 i; 270 1.1 riastrad 271 1.1 riastrad dev_info(adev->dev, "GPU pci config reset\n"); 272 1.1 riastrad 273 1.1 riastrad /* disable BM */ 274 1.1 riastrad pci_clear_master(adev->pdev); 275 1.1 riastrad /* reset */ 276 1.1 riastrad amdgpu_pci_config_reset(adev); 277 1.1 riastrad 278 1.1 riastrad udelay(100); 279 1.1 riastrad 280 1.1 riastrad /* wait for asic to come out of reset */ 281 1.1 riastrad for (i = 0; i < adev->usec_timeout; i++) { 282 1.1 riastrad u32 memsize = nbio_v2_3_get_memsize(adev); 283 1.1 riastrad if (memsize != 0xffffffff) 284 1.1 riastrad break; 285 1.1 riastrad udelay(1); 286 1.1 riastrad } 287 1.1 riastrad 288 1.1 riastrad } 289 1.1 riastrad #endif 290 1.1 riastrad 291 1.1 riastrad static int nv_asic_mode1_reset(struct amdgpu_device *adev) 292 1.1 riastrad { 293 1.1 riastrad u32 i; 294 1.1 riastrad int ret = 0; 295 1.1 riastrad 296 1.1 riastrad amdgpu_atombios_scratch_regs_engine_hung(adev, true); 297 1.1 riastrad 298 1.1 riastrad dev_info(adev->dev, "GPU mode1 reset\n"); 299 1.1 riastrad 300 1.1 riastrad /* disable BM */ 301 1.1 riastrad pci_clear_master(adev->pdev); 302 1.1 riastrad 303 1.1 riastrad pci_save_state(adev->pdev); 304 1.1 riastrad 305 1.1 riastrad ret = psp_gpu_reset(adev); 306 1.1 riastrad if (ret) 307 1.1 riastrad dev_err(adev->dev, "GPU mode1 reset failed\n"); 308 1.1 riastrad 309 1.1 riastrad pci_restore_state(adev->pdev); 310 1.1 riastrad 311 1.1 riastrad /* wait for asic to come out of reset */ 312 1.1 riastrad for (i = 0; i < adev->usec_timeout; i++) { 313 1.1 riastrad u32 memsize = adev->nbio.funcs->get_memsize(adev); 314 1.1 riastrad 315 1.1 riastrad if (memsize != 0xffffffff) 316 1.1 riastrad break; 317 1.1 riastrad udelay(1); 318 1.1 riastrad } 319 1.1 riastrad 320 1.1 riastrad amdgpu_atombios_scratch_regs_engine_hung(adev, false); 321 1.1 riastrad 322 1.1 riastrad return ret; 323 1.1 riastrad } 324 1.1 riastrad 325 1.1 riastrad static bool nv_asic_supports_baco(struct amdgpu_device *adev) 326 1.1 riastrad { 327 1.1 riastrad struct smu_context *smu = &adev->smu; 328 1.1 riastrad 329 1.1 riastrad if (smu_baco_is_support(smu)) 330 1.1 riastrad return true; 331 1.1 riastrad else 332 1.1 riastrad return false; 333 1.1 riastrad } 334 1.1 riastrad 335 1.1 riastrad static enum amd_reset_method 336 1.1 riastrad nv_asic_reset_method(struct amdgpu_device *adev) 337 1.1 riastrad { 338 1.1 riastrad struct smu_context *smu = &adev->smu; 339 1.1 riastrad 340 1.1 riastrad if (!amdgpu_sriov_vf(adev) && smu_baco_is_support(smu)) 341 1.1 riastrad return AMD_RESET_METHOD_BACO; 342 1.1 riastrad else 343 1.1 riastrad return AMD_RESET_METHOD_MODE1; 344 1.1 riastrad } 345 1.1 riastrad 346 1.1 riastrad static int nv_asic_reset(struct amdgpu_device *adev) 347 1.1 riastrad { 348 1.1 riastrad 349 1.1 riastrad /* FIXME: it doesn't work since vega10 */ 350 1.1 riastrad #if 0 351 1.1 riastrad amdgpu_atombios_scratch_regs_engine_hung(adev, true); 352 1.1 riastrad 353 1.1 riastrad nv_gpu_pci_config_reset(adev); 354 1.1 riastrad 355 1.1 riastrad amdgpu_atombios_scratch_regs_engine_hung(adev, false); 356 1.1 riastrad #endif 357 1.1 riastrad int ret = 0; 358 1.1 riastrad struct smu_context *smu = &adev->smu; 359 1.1 riastrad 360 1.1 riastrad if (nv_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { 361 1.1 riastrad if (!adev->in_suspend) 362 1.1 riastrad amdgpu_inc_vram_lost(adev); 363 1.1 riastrad ret = smu_baco_enter(smu); 364 1.1 riastrad if (ret) 365 1.1 riastrad return ret; 366 1.1 riastrad ret = smu_baco_exit(smu); 367 1.1 riastrad if (ret) 368 1.1 riastrad return ret; 369 1.1 riastrad } else { 370 1.1 riastrad if (!adev->in_suspend) 371 1.1 riastrad amdgpu_inc_vram_lost(adev); 372 1.1 riastrad ret = nv_asic_mode1_reset(adev); 373 1.1 riastrad } 374 1.1 riastrad 375 1.1 riastrad return ret; 376 1.1 riastrad } 377 1.1 riastrad 378 1.1 riastrad static int nv_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk) 379 1.1 riastrad { 380 1.1 riastrad /* todo */ 381 1.1 riastrad return 0; 382 1.1 riastrad } 383 1.1 riastrad 384 1.1 riastrad static int nv_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) 385 1.1 riastrad { 386 1.1 riastrad /* todo */ 387 1.1 riastrad return 0; 388 1.1 riastrad } 389 1.1 riastrad 390 1.1 riastrad static void nv_pcie_gen3_enable(struct amdgpu_device *adev) 391 1.1 riastrad { 392 1.1 riastrad if (pci_is_root_bus(adev->pdev->bus)) 393 1.1 riastrad return; 394 1.1 riastrad 395 1.1 riastrad if (amdgpu_pcie_gen2 == 0) 396 1.1 riastrad return; 397 1.1 riastrad 398 1.1 riastrad if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | 399 1.1 riastrad CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3))) 400 1.1 riastrad return; 401 1.1 riastrad 402 1.1 riastrad /* todo */ 403 1.1 riastrad } 404 1.1 riastrad 405 1.1 riastrad static void nv_program_aspm(struct amdgpu_device *adev) 406 1.1 riastrad { 407 1.1 riastrad 408 1.1 riastrad if (amdgpu_aspm == 0) 409 1.1 riastrad return; 410 1.1 riastrad 411 1.1 riastrad /* todo */ 412 1.1 riastrad } 413 1.1 riastrad 414 1.1 riastrad static void nv_enable_doorbell_aperture(struct amdgpu_device *adev, 415 1.1 riastrad bool enable) 416 1.1 riastrad { 417 1.1 riastrad adev->nbio.funcs->enable_doorbell_aperture(adev, enable); 418 1.1 riastrad adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable); 419 1.1 riastrad } 420 1.1 riastrad 421 1.1 riastrad static const struct amdgpu_ip_block_version nv_common_ip_block = 422 1.1 riastrad { 423 1.1 riastrad .type = AMD_IP_BLOCK_TYPE_COMMON, 424 1.1 riastrad .major = 1, 425 1.1 riastrad .minor = 0, 426 1.1 riastrad .rev = 0, 427 1.1 riastrad .funcs = &nv_common_ip_funcs, 428 1.1 riastrad }; 429 1.1 riastrad 430 1.1 riastrad static int nv_reg_base_init(struct amdgpu_device *adev) 431 1.1 riastrad { 432 1.1 riastrad int r; 433 1.1 riastrad 434 1.1 riastrad if (amdgpu_discovery) { 435 1.1 riastrad r = amdgpu_discovery_reg_base_init(adev); 436 1.1 riastrad if (r) { 437 1.1 riastrad DRM_WARN("failed to init reg base from ip discovery table, " 438 1.1 riastrad "fallback to legacy init method\n"); 439 1.1 riastrad goto legacy_init; 440 1.1 riastrad } 441 1.1 riastrad 442 1.1 riastrad return 0; 443 1.1 riastrad } 444 1.1 riastrad 445 1.1 riastrad legacy_init: 446 1.1 riastrad switch (adev->asic_type) { 447 1.1 riastrad case CHIP_NAVI10: 448 1.1 riastrad navi10_reg_base_init(adev); 449 1.1 riastrad break; 450 1.1 riastrad case CHIP_NAVI14: 451 1.1 riastrad navi14_reg_base_init(adev); 452 1.1 riastrad break; 453 1.1 riastrad case CHIP_NAVI12: 454 1.1 riastrad navi12_reg_base_init(adev); 455 1.1 riastrad break; 456 1.1 riastrad default: 457 1.1 riastrad return -EINVAL; 458 1.1 riastrad } 459 1.1 riastrad 460 1.1 riastrad return 0; 461 1.1 riastrad } 462 1.1 riastrad 463 1.1 riastrad int nv_set_ip_blocks(struct amdgpu_device *adev) 464 1.1 riastrad { 465 1.1 riastrad int r; 466 1.1 riastrad 467 1.1 riastrad /* Set IP register base before any HW register access */ 468 1.1 riastrad r = nv_reg_base_init(adev); 469 1.1 riastrad if (r) 470 1.1 riastrad return r; 471 1.1 riastrad 472 1.1 riastrad adev->nbio.funcs = &nbio_v2_3_funcs; 473 1.1 riastrad adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg; 474 1.1 riastrad 475 1.1 riastrad adev->nbio.funcs->detect_hw_virt(adev); 476 1.1 riastrad 477 1.1 riastrad if (amdgpu_sriov_vf(adev)) 478 1.1 riastrad adev->virt.ops = &xgpu_nv_virt_ops; 479 1.1 riastrad 480 1.1 riastrad switch (adev->asic_type) { 481 1.1 riastrad case CHIP_NAVI10: 482 1.1 riastrad case CHIP_NAVI14: 483 1.1 riastrad amdgpu_device_ip_block_add(adev, &nv_common_ip_block); 484 1.1 riastrad amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); 485 1.1 riastrad amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); 486 1.1 riastrad amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); 487 1.1 riastrad if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP && 488 1.1 riastrad !amdgpu_sriov_vf(adev)) 489 1.1 riastrad amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 490 1.1 riastrad if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 491 1.1 riastrad amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 492 1.1 riastrad #if defined(CONFIG_DRM_AMD_DC) 493 1.1 riastrad else if (amdgpu_device_has_dc_support(adev)) 494 1.1 riastrad amdgpu_device_ip_block_add(adev, &dm_ip_block); 495 1.1 riastrad #endif 496 1.1 riastrad amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); 497 1.1 riastrad amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block); 498 1.1 riastrad if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && 499 1.1 riastrad !amdgpu_sriov_vf(adev)) 500 1.1 riastrad amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 501 1.1 riastrad amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); 502 1.1 riastrad amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block); 503 1.1 riastrad if (adev->enable_mes) 504 1.1 riastrad amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block); 505 1.1 riastrad break; 506 1.1 riastrad case CHIP_NAVI12: 507 1.1 riastrad amdgpu_device_ip_block_add(adev, &nv_common_ip_block); 508 1.1 riastrad amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); 509 1.1 riastrad amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); 510 1.1 riastrad amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); 511 1.1 riastrad if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP && 512 1.1 riastrad !amdgpu_sriov_vf(adev)) 513 1.1 riastrad amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 514 1.1 riastrad if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 515 1.1 riastrad amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 516 1.1 riastrad #if defined(CONFIG_DRM_AMD_DC) 517 1.1 riastrad else if (amdgpu_device_has_dc_support(adev)) 518 1.1 riastrad amdgpu_device_ip_block_add(adev, &dm_ip_block); 519 1.1 riastrad #endif 520 1.1 riastrad amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); 521 1.1 riastrad amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block); 522 1.1 riastrad if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && 523 1.1 riastrad !amdgpu_sriov_vf(adev)) 524 1.1 riastrad amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 525 1.1 riastrad amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); 526 1.1 riastrad amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block); 527 1.1 riastrad break; 528 1.1 riastrad default: 529 1.1 riastrad return -EINVAL; 530 1.1 riastrad } 531 1.1 riastrad 532 1.1 riastrad return 0; 533 1.1 riastrad } 534 1.1 riastrad 535 1.1 riastrad static uint32_t nv_get_rev_id(struct amdgpu_device *adev) 536 1.1 riastrad { 537 1.1 riastrad return adev->nbio.funcs->get_rev_id(adev); 538 1.1 riastrad } 539 1.1 riastrad 540 1.1 riastrad static void nv_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) 541 1.1 riastrad { 542 1.1 riastrad adev->nbio.funcs->hdp_flush(adev, ring); 543 1.1 riastrad } 544 1.1 riastrad 545 1.1 riastrad static void nv_invalidate_hdp(struct amdgpu_device *adev, 546 1.1 riastrad struct amdgpu_ring *ring) 547 1.1 riastrad { 548 1.1 riastrad if (!ring || !ring->funcs->emit_wreg) { 549 1.1 riastrad WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_READ_CACHE_INVALIDATE, 1); 550 1.1 riastrad } else { 551 1.1 riastrad amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET( 552 1.1 riastrad HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1); 553 1.1 riastrad } 554 1.1 riastrad } 555 1.1 riastrad 556 1.1 riastrad static bool nv_need_full_reset(struct amdgpu_device *adev) 557 1.1 riastrad { 558 1.1 riastrad return true; 559 1.1 riastrad } 560 1.1 riastrad 561 1.1 riastrad static void nv_get_pcie_usage(struct amdgpu_device *adev, 562 1.1 riastrad uint64_t *count0, 563 1.1 riastrad uint64_t *count1) 564 1.1 riastrad { 565 1.1 riastrad /*TODO*/ 566 1.1 riastrad } 567 1.1 riastrad 568 1.1 riastrad static bool nv_need_reset_on_init(struct amdgpu_device *adev) 569 1.1 riastrad { 570 1.1 riastrad #if 0 571 1.1 riastrad u32 sol_reg; 572 1.1 riastrad 573 1.1 riastrad if (adev->flags & AMD_IS_APU) 574 1.1 riastrad return false; 575 1.1 riastrad 576 1.1 riastrad /* Check sOS sign of life register to confirm sys driver and sOS 577 1.1 riastrad * are already been loaded. 578 1.1 riastrad */ 579 1.1 riastrad sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81); 580 1.1 riastrad if (sol_reg) 581 1.1 riastrad return true; 582 1.1 riastrad #endif 583 1.1 riastrad /* TODO: re-enable it when mode1 reset is functional */ 584 1.1 riastrad return false; 585 1.1 riastrad } 586 1.1 riastrad 587 1.1 riastrad static uint64_t nv_get_pcie_replay_count(struct amdgpu_device *adev) 588 1.1 riastrad { 589 1.1 riastrad 590 1.1 riastrad /* TODO 591 1.1 riastrad * dummy implement for pcie_replay_count sysfs interface 592 1.1 riastrad * */ 593 1.1 riastrad 594 1.1 riastrad return 0; 595 1.1 riastrad } 596 1.1 riastrad 597 1.1 riastrad static void nv_init_doorbell_index(struct amdgpu_device *adev) 598 1.1 riastrad { 599 1.1 riastrad adev->doorbell_index.kiq = AMDGPU_NAVI10_DOORBELL_KIQ; 600 1.1 riastrad adev->doorbell_index.mec_ring0 = AMDGPU_NAVI10_DOORBELL_MEC_RING0; 601 1.1 riastrad adev->doorbell_index.mec_ring1 = AMDGPU_NAVI10_DOORBELL_MEC_RING1; 602 1.1 riastrad adev->doorbell_index.mec_ring2 = AMDGPU_NAVI10_DOORBELL_MEC_RING2; 603 1.1 riastrad adev->doorbell_index.mec_ring3 = AMDGPU_NAVI10_DOORBELL_MEC_RING3; 604 1.1 riastrad adev->doorbell_index.mec_ring4 = AMDGPU_NAVI10_DOORBELL_MEC_RING4; 605 1.1 riastrad adev->doorbell_index.mec_ring5 = AMDGPU_NAVI10_DOORBELL_MEC_RING5; 606 1.1 riastrad adev->doorbell_index.mec_ring6 = AMDGPU_NAVI10_DOORBELL_MEC_RING6; 607 1.1 riastrad adev->doorbell_index.mec_ring7 = AMDGPU_NAVI10_DOORBELL_MEC_RING7; 608 1.1 riastrad adev->doorbell_index.userqueue_start = AMDGPU_NAVI10_DOORBELL_USERQUEUE_START; 609 1.1 riastrad adev->doorbell_index.userqueue_end = AMDGPU_NAVI10_DOORBELL_USERQUEUE_END; 610 1.1 riastrad adev->doorbell_index.gfx_ring0 = AMDGPU_NAVI10_DOORBELL_GFX_RING0; 611 1.1 riastrad adev->doorbell_index.gfx_ring1 = AMDGPU_NAVI10_DOORBELL_GFX_RING1; 612 1.1 riastrad adev->doorbell_index.sdma_engine[0] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE0; 613 1.1 riastrad adev->doorbell_index.sdma_engine[1] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE1; 614 1.1 riastrad adev->doorbell_index.ih = AMDGPU_NAVI10_DOORBELL_IH; 615 1.1 riastrad adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_NAVI10_DOORBELL64_VCN0_1; 616 1.1 riastrad adev->doorbell_index.vcn.vcn_ring2_3 = AMDGPU_NAVI10_DOORBELL64_VCN2_3; 617 1.1 riastrad adev->doorbell_index.vcn.vcn_ring4_5 = AMDGPU_NAVI10_DOORBELL64_VCN4_5; 618 1.1 riastrad adev->doorbell_index.vcn.vcn_ring6_7 = AMDGPU_NAVI10_DOORBELL64_VCN6_7; 619 1.1 riastrad adev->doorbell_index.first_non_cp = AMDGPU_NAVI10_DOORBELL64_FIRST_NON_CP; 620 1.1 riastrad adev->doorbell_index.last_non_cp = AMDGPU_NAVI10_DOORBELL64_LAST_NON_CP; 621 1.1 riastrad 622 1.1 riastrad adev->doorbell_index.max_assignment = AMDGPU_NAVI10_DOORBELL_MAX_ASSIGNMENT << 1; 623 1.1 riastrad adev->doorbell_index.sdma_doorbell_range = 20; 624 1.1 riastrad } 625 1.1 riastrad 626 1.1 riastrad static const struct amdgpu_asic_funcs nv_asic_funcs = 627 1.1 riastrad { 628 1.1 riastrad .read_disabled_bios = &nv_read_disabled_bios, 629 1.1 riastrad .read_bios_from_rom = &nv_read_bios_from_rom, 630 1.1 riastrad .read_register = &nv_read_register, 631 1.1 riastrad .reset = &nv_asic_reset, 632 1.1 riastrad .reset_method = &nv_asic_reset_method, 633 1.1 riastrad .set_vga_state = &nv_vga_set_state, 634 1.1 riastrad .get_xclk = &nv_get_xclk, 635 1.1 riastrad .set_uvd_clocks = &nv_set_uvd_clocks, 636 1.1 riastrad .set_vce_clocks = &nv_set_vce_clocks, 637 1.1 riastrad .get_config_memsize = &nv_get_config_memsize, 638 1.1 riastrad .flush_hdp = &nv_flush_hdp, 639 1.1 riastrad .invalidate_hdp = &nv_invalidate_hdp, 640 1.1 riastrad .init_doorbell_index = &nv_init_doorbell_index, 641 1.1 riastrad .need_full_reset = &nv_need_full_reset, 642 1.1 riastrad .get_pcie_usage = &nv_get_pcie_usage, 643 1.1 riastrad .need_reset_on_init = &nv_need_reset_on_init, 644 1.1 riastrad .get_pcie_replay_count = &nv_get_pcie_replay_count, 645 1.1 riastrad .supports_baco = &nv_asic_supports_baco, 646 1.1 riastrad }; 647 1.1 riastrad 648 1.1 riastrad static int nv_common_early_init(void *handle) 649 1.1 riastrad { 650 1.1 riastrad #define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE) 651 1.1 riastrad struct amdgpu_device *adev = (struct amdgpu_device *)handle; 652 1.1 riastrad 653 1.1 riastrad adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET; 654 1.1 riastrad adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET; 655 1.1 riastrad adev->smc_rreg = NULL; 656 1.1 riastrad adev->smc_wreg = NULL; 657 1.1 riastrad adev->pcie_rreg = &nv_pcie_rreg; 658 1.1 riastrad adev->pcie_wreg = &nv_pcie_wreg; 659 1.1 riastrad 660 1.1 riastrad /* TODO: will add them during VCN v2 implementation */ 661 1.1 riastrad adev->uvd_ctx_rreg = NULL; 662 1.1 riastrad adev->uvd_ctx_wreg = NULL; 663 1.1 riastrad 664 1.1 riastrad adev->didt_rreg = &nv_didt_rreg; 665 1.1 riastrad adev->didt_wreg = &nv_didt_wreg; 666 1.1 riastrad 667 1.1 riastrad adev->asic_funcs = &nv_asic_funcs; 668 1.1 riastrad 669 1.1 riastrad adev->rev_id = nv_get_rev_id(adev); 670 1.1 riastrad adev->external_rev_id = 0xff; 671 1.1 riastrad switch (adev->asic_type) { 672 1.1 riastrad case CHIP_NAVI10: 673 1.1 riastrad adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 674 1.1 riastrad AMD_CG_SUPPORT_GFX_CGCG | 675 1.1 riastrad AMD_CG_SUPPORT_IH_CG | 676 1.1 riastrad AMD_CG_SUPPORT_HDP_MGCG | 677 1.1 riastrad AMD_CG_SUPPORT_HDP_LS | 678 1.1 riastrad AMD_CG_SUPPORT_SDMA_MGCG | 679 1.1 riastrad AMD_CG_SUPPORT_SDMA_LS | 680 1.1 riastrad AMD_CG_SUPPORT_MC_MGCG | 681 1.1 riastrad AMD_CG_SUPPORT_MC_LS | 682 1.1 riastrad AMD_CG_SUPPORT_ATHUB_MGCG | 683 1.1 riastrad AMD_CG_SUPPORT_ATHUB_LS | 684 1.1 riastrad AMD_CG_SUPPORT_VCN_MGCG | 685 1.1 riastrad AMD_CG_SUPPORT_JPEG_MGCG | 686 1.1 riastrad AMD_CG_SUPPORT_BIF_MGCG | 687 1.1 riastrad AMD_CG_SUPPORT_BIF_LS; 688 1.1 riastrad adev->pg_flags = AMD_PG_SUPPORT_VCN | 689 1.1 riastrad AMD_PG_SUPPORT_VCN_DPG | 690 1.1 riastrad AMD_PG_SUPPORT_JPEG | 691 1.1 riastrad AMD_PG_SUPPORT_ATHUB; 692 1.1 riastrad adev->external_rev_id = adev->rev_id + 0x1; 693 1.1 riastrad break; 694 1.1 riastrad case CHIP_NAVI14: 695 1.1 riastrad adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 696 1.1 riastrad AMD_CG_SUPPORT_GFX_CGCG | 697 1.1 riastrad AMD_CG_SUPPORT_IH_CG | 698 1.1 riastrad AMD_CG_SUPPORT_HDP_MGCG | 699 1.1 riastrad AMD_CG_SUPPORT_HDP_LS | 700 1.1 riastrad AMD_CG_SUPPORT_SDMA_MGCG | 701 1.1 riastrad AMD_CG_SUPPORT_SDMA_LS | 702 1.1 riastrad AMD_CG_SUPPORT_MC_MGCG | 703 1.1 riastrad AMD_CG_SUPPORT_MC_LS | 704 1.1 riastrad AMD_CG_SUPPORT_ATHUB_MGCG | 705 1.1 riastrad AMD_CG_SUPPORT_ATHUB_LS | 706 1.1 riastrad AMD_CG_SUPPORT_VCN_MGCG | 707 1.1 riastrad AMD_CG_SUPPORT_JPEG_MGCG | 708 1.1 riastrad AMD_CG_SUPPORT_BIF_MGCG | 709 1.1 riastrad AMD_CG_SUPPORT_BIF_LS; 710 1.1 riastrad adev->pg_flags = AMD_PG_SUPPORT_VCN | 711 1.1 riastrad AMD_PG_SUPPORT_JPEG | 712 1.1 riastrad AMD_PG_SUPPORT_VCN_DPG; 713 1.1 riastrad adev->external_rev_id = adev->rev_id + 20; 714 1.1 riastrad break; 715 1.1 riastrad case CHIP_NAVI12: 716 1.1 riastrad adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 717 1.1 riastrad AMD_CG_SUPPORT_GFX_MGLS | 718 1.1 riastrad AMD_CG_SUPPORT_GFX_CGCG | 719 1.1 riastrad AMD_CG_SUPPORT_GFX_CP_LS | 720 1.1 riastrad AMD_CG_SUPPORT_GFX_RLC_LS | 721 1.1 riastrad AMD_CG_SUPPORT_IH_CG | 722 1.1 riastrad AMD_CG_SUPPORT_HDP_MGCG | 723 1.1 riastrad AMD_CG_SUPPORT_HDP_LS | 724 1.1 riastrad AMD_CG_SUPPORT_SDMA_MGCG | 725 1.1 riastrad AMD_CG_SUPPORT_SDMA_LS | 726 1.1 riastrad AMD_CG_SUPPORT_MC_MGCG | 727 1.1 riastrad AMD_CG_SUPPORT_MC_LS | 728 1.1 riastrad AMD_CG_SUPPORT_ATHUB_MGCG | 729 1.1 riastrad AMD_CG_SUPPORT_ATHUB_LS | 730 1.1 riastrad AMD_CG_SUPPORT_VCN_MGCG | 731 1.1 riastrad AMD_CG_SUPPORT_JPEG_MGCG; 732 1.1 riastrad adev->pg_flags = AMD_PG_SUPPORT_VCN | 733 1.1 riastrad AMD_PG_SUPPORT_VCN_DPG | 734 1.1 riastrad AMD_PG_SUPPORT_JPEG | 735 1.1 riastrad AMD_PG_SUPPORT_ATHUB; 736 1.1 riastrad /* guest vm gets 0xffffffff when reading RCC_DEV0_EPF0_STRAP0, 737 1.1 riastrad * as a consequence, the rev_id and external_rev_id are wrong. 738 1.1 riastrad * workaround it by hardcoding rev_id to 0 (default value). 739 1.1 riastrad */ 740 1.1 riastrad if (amdgpu_sriov_vf(adev)) 741 1.1 riastrad adev->rev_id = 0; 742 1.1 riastrad adev->external_rev_id = adev->rev_id + 0xa; 743 1.1 riastrad break; 744 1.1 riastrad default: 745 1.1 riastrad /* FIXME: not supported yet */ 746 1.1 riastrad return -EINVAL; 747 1.1 riastrad } 748 1.1 riastrad 749 1.1 riastrad if (amdgpu_sriov_vf(adev)) { 750 1.1 riastrad amdgpu_virt_init_setting(adev); 751 1.1 riastrad xgpu_nv_mailbox_set_irq_funcs(adev); 752 1.1 riastrad } 753 1.1 riastrad 754 1.1 riastrad return 0; 755 1.1 riastrad } 756 1.1 riastrad 757 1.1 riastrad static int nv_common_late_init(void *handle) 758 1.1 riastrad { 759 1.1 riastrad struct amdgpu_device *adev = (struct amdgpu_device *)handle; 760 1.1 riastrad 761 1.1 riastrad if (amdgpu_sriov_vf(adev)) 762 1.1 riastrad xgpu_nv_mailbox_get_irq(adev); 763 1.1 riastrad 764 1.1 riastrad return 0; 765 1.1 riastrad } 766 1.1 riastrad 767 1.1 riastrad static int nv_common_sw_init(void *handle) 768 1.1 riastrad { 769 1.1 riastrad struct amdgpu_device *adev = (struct amdgpu_device *)handle; 770 1.1 riastrad 771 1.1 riastrad if (amdgpu_sriov_vf(adev)) 772 1.1 riastrad xgpu_nv_mailbox_add_irq_id(adev); 773 1.1 riastrad 774 1.1 riastrad return 0; 775 1.1 riastrad } 776 1.1 riastrad 777 1.1 riastrad static int nv_common_sw_fini(void *handle) 778 1.1 riastrad { 779 1.1 riastrad return 0; 780 1.1 riastrad } 781 1.1 riastrad 782 1.1 riastrad static int nv_common_hw_init(void *handle) 783 1.1 riastrad { 784 1.1 riastrad struct amdgpu_device *adev = (struct amdgpu_device *)handle; 785 1.1 riastrad 786 1.1 riastrad /* enable pcie gen2/3 link */ 787 1.1 riastrad nv_pcie_gen3_enable(adev); 788 1.1 riastrad /* enable aspm */ 789 1.1 riastrad nv_program_aspm(adev); 790 1.1 riastrad /* setup nbio registers */ 791 1.1 riastrad adev->nbio.funcs->init_registers(adev); 792 1.1 riastrad /* remap HDP registers to a hole in mmio space, 793 1.1 riastrad * for the purpose of expose those registers 794 1.1 riastrad * to process space 795 1.1 riastrad */ 796 1.1 riastrad if (adev->nbio.funcs->remap_hdp_registers) 797 1.1 riastrad adev->nbio.funcs->remap_hdp_registers(adev); 798 1.1 riastrad /* enable the doorbell aperture */ 799 1.1 riastrad nv_enable_doorbell_aperture(adev, true); 800 1.1 riastrad 801 1.1 riastrad return 0; 802 1.1 riastrad } 803 1.1 riastrad 804 1.1 riastrad static int nv_common_hw_fini(void *handle) 805 1.1 riastrad { 806 1.1 riastrad struct amdgpu_device *adev = (struct amdgpu_device *)handle; 807 1.1 riastrad 808 1.1 riastrad /* disable the doorbell aperture */ 809 1.1 riastrad nv_enable_doorbell_aperture(adev, false); 810 1.1 riastrad 811 1.1 riastrad return 0; 812 1.1 riastrad } 813 1.1 riastrad 814 1.1 riastrad static int nv_common_suspend(void *handle) 815 1.1 riastrad { 816 1.1 riastrad struct amdgpu_device *adev = (struct amdgpu_device *)handle; 817 1.1 riastrad 818 1.1 riastrad return nv_common_hw_fini(adev); 819 1.1 riastrad } 820 1.1 riastrad 821 1.1 riastrad static int nv_common_resume(void *handle) 822 1.1 riastrad { 823 1.1 riastrad struct amdgpu_device *adev = (struct amdgpu_device *)handle; 824 1.1 riastrad 825 1.1 riastrad return nv_common_hw_init(adev); 826 1.1 riastrad } 827 1.1 riastrad 828 1.1 riastrad static bool nv_common_is_idle(void *handle) 829 1.1 riastrad { 830 1.1 riastrad return true; 831 1.1 riastrad } 832 1.1 riastrad 833 1.1 riastrad static int nv_common_wait_for_idle(void *handle) 834 1.1 riastrad { 835 1.1 riastrad return 0; 836 1.1 riastrad } 837 1.1 riastrad 838 1.1 riastrad static int nv_common_soft_reset(void *handle) 839 1.1 riastrad { 840 1.1 riastrad return 0; 841 1.1 riastrad } 842 1.1 riastrad 843 1.1 riastrad static void nv_update_hdp_mem_power_gating(struct amdgpu_device *adev, 844 1.1 riastrad bool enable) 845 1.1 riastrad { 846 1.1 riastrad uint32_t hdp_clk_cntl, hdp_clk_cntl1; 847 1.1 riastrad uint32_t hdp_mem_pwr_cntl; 848 1.1 riastrad 849 1.1 riastrad if (!(adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | 850 1.1 riastrad AMD_CG_SUPPORT_HDP_DS | 851 1.1 riastrad AMD_CG_SUPPORT_HDP_SD))) 852 1.1 riastrad return; 853 1.1 riastrad 854 1.1 riastrad hdp_clk_cntl = hdp_clk_cntl1 = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL); 855 1.1 riastrad hdp_mem_pwr_cntl = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL); 856 1.1 riastrad 857 1.1 riastrad /* Before doing clock/power mode switch, 858 1.1 riastrad * forced on IPH & RC clock */ 859 1.1 riastrad hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL, 860 1.1 riastrad IPH_MEM_CLK_SOFT_OVERRIDE, 1); 861 1.1 riastrad hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL, 862 1.1 riastrad RC_MEM_CLK_SOFT_OVERRIDE, 1); 863 1.1 riastrad WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl); 864 1.1 riastrad 865 1.1 riastrad /* HDP 5.0 doesn't support dynamic power mode switch, 866 1.1 riastrad * disable clock and power gating before any changing */ 867 1.1 riastrad hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 868 1.1 riastrad IPH_MEM_POWER_CTRL_EN, 0); 869 1.1 riastrad hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 870 1.1 riastrad IPH_MEM_POWER_LS_EN, 0); 871 1.1 riastrad hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 872 1.1 riastrad IPH_MEM_POWER_DS_EN, 0); 873 1.1 riastrad hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 874 1.1 riastrad IPH_MEM_POWER_SD_EN, 0); 875 1.1 riastrad hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 876 1.1 riastrad RC_MEM_POWER_CTRL_EN, 0); 877 1.1 riastrad hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 878 1.1 riastrad RC_MEM_POWER_LS_EN, 0); 879 1.1 riastrad hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 880 1.1 riastrad RC_MEM_POWER_DS_EN, 0); 881 1.1 riastrad hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 882 1.1 riastrad RC_MEM_POWER_SD_EN, 0); 883 1.1 riastrad WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl); 884 1.1 riastrad 885 1.1 riastrad /* only one clock gating mode (LS/DS/SD) can be enabled */ 886 1.1 riastrad if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) { 887 1.1 riastrad hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 888 1.1 riastrad HDP_MEM_POWER_CTRL, 889 1.1 riastrad IPH_MEM_POWER_LS_EN, enable); 890 1.1 riastrad hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 891 1.1 riastrad HDP_MEM_POWER_CTRL, 892 1.1 riastrad RC_MEM_POWER_LS_EN, enable); 893 1.1 riastrad } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS) { 894 1.1 riastrad hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 895 1.1 riastrad HDP_MEM_POWER_CTRL, 896 1.1 riastrad IPH_MEM_POWER_DS_EN, enable); 897 1.1 riastrad hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 898 1.1 riastrad HDP_MEM_POWER_CTRL, 899 1.1 riastrad RC_MEM_POWER_DS_EN, enable); 900 1.1 riastrad } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_SD) { 901 1.1 riastrad hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 902 1.1 riastrad HDP_MEM_POWER_CTRL, 903 1.1 riastrad IPH_MEM_POWER_SD_EN, enable); 904 1.1 riastrad /* RC should not use shut down mode, fallback to ds */ 905 1.1 riastrad hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 906 1.1 riastrad HDP_MEM_POWER_CTRL, 907 1.1 riastrad RC_MEM_POWER_DS_EN, enable); 908 1.1 riastrad } 909 1.1 riastrad 910 1.1 riastrad WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl); 911 1.1 riastrad 912 1.1 riastrad /* restore IPH & RC clock override after clock/power mode changing */ 913 1.1 riastrad WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl1); 914 1.1 riastrad } 915 1.1 riastrad 916 1.1 riastrad static void nv_update_hdp_clock_gating(struct amdgpu_device *adev, 917 1.1 riastrad bool enable) 918 1.1 riastrad { 919 1.1 riastrad uint32_t hdp_clk_cntl; 920 1.1 riastrad 921 1.1 riastrad if (!(adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG)) 922 1.1 riastrad return; 923 1.1 riastrad 924 1.1 riastrad hdp_clk_cntl = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL); 925 1.1 riastrad 926 1.1 riastrad if (enable) { 927 1.1 riastrad hdp_clk_cntl &= 928 1.1 riastrad ~(uint32_t) 929 1.1 riastrad (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK | 930 1.1 riastrad HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK | 931 1.1 riastrad HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK | 932 1.1 riastrad HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK | 933 1.1 riastrad HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK | 934 1.1 riastrad HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK); 935 1.1 riastrad } else { 936 1.1 riastrad hdp_clk_cntl |= HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK | 937 1.1 riastrad HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK | 938 1.1 riastrad HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK | 939 1.1 riastrad HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK | 940 1.1 riastrad HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK | 941 1.1 riastrad HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK; 942 1.1 riastrad } 943 1.1 riastrad 944 1.1 riastrad WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl); 945 1.1 riastrad } 946 1.1 riastrad 947 1.1 riastrad static int nv_common_set_clockgating_state(void *handle, 948 1.1 riastrad enum amd_clockgating_state state) 949 1.1 riastrad { 950 1.1 riastrad struct amdgpu_device *adev = (struct amdgpu_device *)handle; 951 1.1 riastrad 952 1.1 riastrad if (amdgpu_sriov_vf(adev)) 953 1.1 riastrad return 0; 954 1.1 riastrad 955 1.1 riastrad switch (adev->asic_type) { 956 1.1 riastrad case CHIP_NAVI10: 957 1.1 riastrad case CHIP_NAVI14: 958 1.1 riastrad case CHIP_NAVI12: 959 1.1 riastrad adev->nbio.funcs->update_medium_grain_clock_gating(adev, 960 1.1 riastrad state == AMD_CG_STATE_GATE); 961 1.1 riastrad adev->nbio.funcs->update_medium_grain_light_sleep(adev, 962 1.1 riastrad state == AMD_CG_STATE_GATE); 963 1.1 riastrad nv_update_hdp_mem_power_gating(adev, 964 1.1 riastrad state == AMD_CG_STATE_GATE); 965 1.1 riastrad nv_update_hdp_clock_gating(adev, 966 1.1 riastrad state == AMD_CG_STATE_GATE); 967 1.1 riastrad break; 968 1.1 riastrad default: 969 1.1 riastrad break; 970 1.1 riastrad } 971 1.1 riastrad return 0; 972 1.1 riastrad } 973 1.1 riastrad 974 1.1 riastrad static int nv_common_set_powergating_state(void *handle, 975 1.1 riastrad enum amd_powergating_state state) 976 1.1 riastrad { 977 1.1 riastrad /* TODO */ 978 1.1 riastrad return 0; 979 1.1 riastrad } 980 1.1 riastrad 981 1.1 riastrad static void nv_common_get_clockgating_state(void *handle, u32 *flags) 982 1.1 riastrad { 983 1.1 riastrad struct amdgpu_device *adev = (struct amdgpu_device *)handle; 984 1.1 riastrad uint32_t tmp; 985 1.1 riastrad 986 1.1 riastrad if (amdgpu_sriov_vf(adev)) 987 1.1 riastrad *flags = 0; 988 1.1 riastrad 989 1.1 riastrad adev->nbio.funcs->get_clockgating_state(adev, flags); 990 1.1 riastrad 991 1.1 riastrad /* AMD_CG_SUPPORT_HDP_MGCG */ 992 1.1 riastrad tmp = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL); 993 1.1 riastrad if (!(tmp & (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK | 994 1.1 riastrad HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK | 995 1.1 riastrad HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK | 996 1.1 riastrad HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK | 997 1.1 riastrad HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK | 998 1.1 riastrad HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK))) 999 1.1 riastrad *flags |= AMD_CG_SUPPORT_HDP_MGCG; 1000 1.1 riastrad 1001 1.1 riastrad /* AMD_CG_SUPPORT_HDP_LS/DS/SD */ 1002 1.1 riastrad tmp = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL); 1003 1.1 riastrad if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK) 1004 1.1 riastrad *flags |= AMD_CG_SUPPORT_HDP_LS; 1005 1.1 riastrad else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_DS_EN_MASK) 1006 1.1 riastrad *flags |= AMD_CG_SUPPORT_HDP_DS; 1007 1.1 riastrad else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_SD_EN_MASK) 1008 1.1 riastrad *flags |= AMD_CG_SUPPORT_HDP_SD; 1009 1.1 riastrad 1010 1.1 riastrad return; 1011 1.1 riastrad } 1012 1.1 riastrad 1013 1.1 riastrad static const struct amd_ip_funcs nv_common_ip_funcs = { 1014 1.1 riastrad .name = "nv_common", 1015 1.1 riastrad .early_init = nv_common_early_init, 1016 1.1 riastrad .late_init = nv_common_late_init, 1017 1.1 riastrad .sw_init = nv_common_sw_init, 1018 1.1 riastrad .sw_fini = nv_common_sw_fini, 1019 1.1 riastrad .hw_init = nv_common_hw_init, 1020 1.1 riastrad .hw_fini = nv_common_hw_fini, 1021 1.1 riastrad .suspend = nv_common_suspend, 1022 1.1 riastrad .resume = nv_common_resume, 1023 1.1 riastrad .is_idle = nv_common_is_idle, 1024 1.1 riastrad .wait_for_idle = nv_common_wait_for_idle, 1025 1.1 riastrad .soft_reset = nv_common_soft_reset, 1026 1.1 riastrad .set_clockgating_state = nv_common_set_clockgating_state, 1027 1.1 riastrad .set_powergating_state = nv_common_set_powergating_state, 1028 1.1 riastrad .get_clockgating_state = nv_common_get_clockgating_state, 1029 1.1 riastrad }; 1030