1 /* $NetBSD: kfd_device_queue_manager_cik.c,v 1.3 2021/12/18 23:44:59 riastradh Exp $ */ 2 3 /* 4 * Copyright 2014 Advanced Micro Devices, Inc. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 */ 25 26 #include <sys/cdefs.h> 27 __KERNEL_RCSID(0, "$NetBSD: kfd_device_queue_manager_cik.c,v 1.3 2021/12/18 23:44:59 riastradh Exp $"); 28 29 #include "kfd_device_queue_manager.h" 30 #include "cik_regs.h" 31 #include "oss/oss_2_4_sh_mask.h" 32 #include "gca/gfx_7_2_sh_mask.h" 33 34 static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm, 35 struct qcm_process_device *qpd, 36 enum cache_policy default_policy, 37 enum cache_policy alternate_policy, 38 void __user *alternate_aperture_base, 39 uint64_t alternate_aperture_size); 40 static int update_qpd_cik(struct device_queue_manager *dqm, 41 struct qcm_process_device *qpd); 42 static int update_qpd_cik_hawaii(struct device_queue_manager *dqm, 43 struct qcm_process_device *qpd); 44 static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q, 45 struct qcm_process_device *qpd); 46 static void init_sdma_vm_hawaii(struct device_queue_manager *dqm, 47 struct queue *q, 48 struct qcm_process_device *qpd); 49 50 void device_queue_manager_init_cik( 51 struct device_queue_manager_asic_ops *asic_ops) 52 { 53 asic_ops->set_cache_memory_policy = set_cache_memory_policy_cik; 54 asic_ops->update_qpd = update_qpd_cik; 55 asic_ops->init_sdma_vm = init_sdma_vm; 56 asic_ops->mqd_manager_init = mqd_manager_init_cik; 57 } 58 59 void device_queue_manager_init_cik_hawaii( 60 struct device_queue_manager_asic_ops *asic_ops) 61 { 62 asic_ops->set_cache_memory_policy = set_cache_memory_policy_cik; 63 asic_ops->update_qpd = update_qpd_cik_hawaii; 64 asic_ops->init_sdma_vm = init_sdma_vm_hawaii; 65 asic_ops->mqd_manager_init = mqd_manager_init_cik_hawaii; 66 } 67 68 static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble) 69 { 70 /* In 64-bit mode, we can only control the top 3 bits of the LDS, 71 * scratch and GPUVM apertures. 72 * The hardware fills in the remaining 59 bits according to the 73 * following pattern: 74 * LDS: X0000000'00000000 - X0000001'00000000 (4GB) 75 * Scratch: X0000001'00000000 - X0000002'00000000 (4GB) 76 * GPUVM: Y0010000'00000000 - Y0020000'00000000 (1TB) 77 * 78 * (where X/Y is the configurable nybble with the low-bit 0) 79 * 80 * LDS and scratch will have the same top nybble programmed in the 81 * top 3 bits of SH_MEM_BASES.PRIVATE_BASE. 82 * GPUVM can have a different top nybble programmed in the 83 * top 3 bits of SH_MEM_BASES.SHARED_BASE. 84 * We don't bother to support different top nybbles 85 * for LDS/Scratch and GPUVM. 86 */ 87 88 WARN_ON((top_address_nybble & 1) || top_address_nybble > 0xE || 89 top_address_nybble == 0); 90 91 return PRIVATE_BASE(top_address_nybble << 12) | 92 SHARED_BASE(top_address_nybble << 12); 93 } 94 95 static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm, 96 struct qcm_process_device *qpd, 97 enum cache_policy default_policy, 98 enum cache_policy alternate_policy, 99 void __user *alternate_aperture_base, 100 uint64_t alternate_aperture_size) 101 { 102 uint32_t default_mtype; 103 uint32_t ape1_mtype; 104 105 default_mtype = (default_policy == cache_policy_coherent) ? 106 MTYPE_NONCACHED : 107 MTYPE_CACHED; 108 109 ape1_mtype = (alternate_policy == cache_policy_coherent) ? 110 MTYPE_NONCACHED : 111 MTYPE_CACHED; 112 113 qpd->sh_mem_config = (qpd->sh_mem_config & PTR32) 114 | ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED) 115 | DEFAULT_MTYPE(default_mtype) 116 | APE1_MTYPE(ape1_mtype); 117 118 return true; 119 } 120 121 static int update_qpd_cik(struct device_queue_manager *dqm, 122 struct qcm_process_device *qpd) 123 { 124 struct kfd_process_device *pdd; 125 unsigned int temp; 126 127 pdd = qpd_to_pdd(qpd); 128 129 /* check if sh_mem_config register already configured */ 130 if (qpd->sh_mem_config == 0) { 131 qpd->sh_mem_config = 132 ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED) | 133 DEFAULT_MTYPE(MTYPE_NONCACHED) | 134 APE1_MTYPE(MTYPE_NONCACHED); 135 qpd->sh_mem_ape1_limit = 0; 136 qpd->sh_mem_ape1_base = 0; 137 } 138 139 if (qpd->pqm->process->is_32bit_user_mode) { 140 temp = get_sh_mem_bases_32(pdd); 141 qpd->sh_mem_bases = SHARED_BASE(temp); 142 qpd->sh_mem_config |= PTR32; 143 } else { 144 temp = get_sh_mem_bases_nybble_64(pdd); 145 qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp); 146 qpd->sh_mem_config |= 1 << SH_MEM_CONFIG__PRIVATE_ATC__SHIFT; 147 } 148 149 pr_debug("is32bit process: %d sh_mem_bases nybble: 0x%X and register 0x%X\n", 150 qpd->pqm->process->is_32bit_user_mode, temp, qpd->sh_mem_bases); 151 152 return 0; 153 } 154 155 static int update_qpd_cik_hawaii(struct device_queue_manager *dqm, 156 struct qcm_process_device *qpd) 157 { 158 struct kfd_process_device *pdd; 159 unsigned int temp; 160 161 pdd = qpd_to_pdd(qpd); 162 163 /* check if sh_mem_config register already configured */ 164 if (qpd->sh_mem_config == 0) { 165 qpd->sh_mem_config = 166 ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED) | 167 DEFAULT_MTYPE(MTYPE_NONCACHED) | 168 APE1_MTYPE(MTYPE_NONCACHED); 169 qpd->sh_mem_ape1_limit = 0; 170 qpd->sh_mem_ape1_base = 0; 171 } 172 173 /* On dGPU we're always in GPUVM64 addressing mode with 64-bit 174 * aperture addresses. 175 */ 176 temp = get_sh_mem_bases_nybble_64(pdd); 177 qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp); 178 179 pr_debug("is32bit process: %d sh_mem_bases nybble: 0x%X and register 0x%X\n", 180 qpd->pqm->process->is_32bit_user_mode, temp, qpd->sh_mem_bases); 181 182 return 0; 183 } 184 185 static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q, 186 struct qcm_process_device *qpd) 187 { 188 uint32_t value = (1 << SDMA0_RLC0_VIRTUAL_ADDR__ATC__SHIFT); 189 190 if (q->process->is_32bit_user_mode) 191 value |= (1 << SDMA0_RLC0_VIRTUAL_ADDR__PTR32__SHIFT) | 192 get_sh_mem_bases_32(qpd_to_pdd(qpd)); 193 else 194 value |= ((get_sh_mem_bases_nybble_64(qpd_to_pdd(qpd))) << 195 SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE__SHIFT) & 196 SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE_MASK; 197 198 q->properties.sdma_vm_addr = value; 199 } 200 201 static void init_sdma_vm_hawaii(struct device_queue_manager *dqm, 202 struct queue *q, 203 struct qcm_process_device *qpd) 204 { 205 /* On dGPU we're always in GPUVM64 addressing mode with 64-bit 206 * aperture addresses. 207 */ 208 q->properties.sdma_vm_addr = 209 ((get_sh_mem_bases_nybble_64(qpd_to_pdd(qpd))) << 210 SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE__SHIFT) & 211 SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE_MASK; 212 } 213