1 /* $NetBSD: kfd_device_queue_manager.h,v 1.3 2021/12/18 23:44:59 riastradh Exp $ */ 2 3 /* 4 * Copyright 2014 Advanced Micro Devices, Inc. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 */ 25 26 #ifndef KFD_DEVICE_QUEUE_MANAGER_H_ 27 #define KFD_DEVICE_QUEUE_MANAGER_H_ 28 29 #include <linux/rwsem.h> 30 #include <linux/list.h> 31 #include <linux/mutex.h> 32 #include <linux/sched/mm.h> 33 #include "kfd_priv.h" 34 #include "kfd_mqd_manager.h" 35 36 37 #define VMID_NUM 16 38 39 struct device_process_node { 40 struct qcm_process_device *qpd; 41 struct list_head list; 42 }; 43 44 /** 45 * struct device_queue_manager_ops 46 * 47 * @create_queue: Queue creation routine. 48 * 49 * @destroy_queue: Queue destruction routine. 50 * 51 * @update_queue: Queue update routine. 52 * 53 * @exeute_queues: Dispatches the queues list to the H/W. 54 * 55 * @register_process: This routine associates a specific process with device. 56 * 57 * @unregister_process: destroys the associations between process to device. 58 * 59 * @initialize: Initializes the pipelines and memory module for that device. 60 * 61 * @start: Initializes the resources/modules the the device needs for queues 62 * execution. This function is called on device initialization and after the 63 * system woke up after suspension. 64 * 65 * @stop: This routine stops execution of all the active queue running on the 66 * H/W and basically this function called on system suspend. 67 * 68 * @uninitialize: Destroys all the device queue manager resources allocated in 69 * initialize routine. 70 * 71 * @create_kernel_queue: Creates kernel queue. Used for debug queue. 72 * 73 * @destroy_kernel_queue: Destroys kernel queue. Used for debug queue. 74 * 75 * @set_cache_memory_policy: Sets memory policy (cached/ non cached) for the 76 * memory apertures. 77 * 78 * @process_termination: Clears all process queues belongs to that device. 79 * 80 * @evict_process_queues: Evict all active queues of a process 81 * 82 * @restore_process_queues: Restore all evicted queues queues of a process 83 * 84 * @get_wave_state: Retrieves context save state and optionally copies the 85 * control stack, if kept in the MQD, to the given userspace address. 86 */ 87 88 struct device_queue_manager_ops { 89 int (*create_queue)(struct device_queue_manager *dqm, 90 struct queue *q, 91 struct qcm_process_device *qpd); 92 93 int (*destroy_queue)(struct device_queue_manager *dqm, 94 struct qcm_process_device *qpd, 95 struct queue *q); 96 97 int (*update_queue)(struct device_queue_manager *dqm, 98 struct queue *q); 99 100 int (*register_process)(struct device_queue_manager *dqm, 101 struct qcm_process_device *qpd); 102 103 int (*unregister_process)(struct device_queue_manager *dqm, 104 struct qcm_process_device *qpd); 105 106 int (*initialize)(struct device_queue_manager *dqm); 107 int (*start)(struct device_queue_manager *dqm); 108 int (*stop)(struct device_queue_manager *dqm); 109 void (*pre_reset)(struct device_queue_manager *dqm); 110 void (*uninitialize)(struct device_queue_manager *dqm); 111 int (*create_kernel_queue)(struct device_queue_manager *dqm, 112 struct kernel_queue *kq, 113 struct qcm_process_device *qpd); 114 115 void (*destroy_kernel_queue)(struct device_queue_manager *dqm, 116 struct kernel_queue *kq, 117 struct qcm_process_device *qpd); 118 119 bool (*set_cache_memory_policy)(struct device_queue_manager *dqm, 120 struct qcm_process_device *qpd, 121 enum cache_policy default_policy, 122 enum cache_policy alternate_policy, 123 void __user *alternate_aperture_base, 124 uint64_t alternate_aperture_size); 125 126 int (*set_trap_handler)(struct device_queue_manager *dqm, 127 struct qcm_process_device *qpd, 128 uint64_t tba_addr, 129 uint64_t tma_addr); 130 131 int (*process_termination)(struct device_queue_manager *dqm, 132 struct qcm_process_device *qpd); 133 134 int (*evict_process_queues)(struct device_queue_manager *dqm, 135 struct qcm_process_device *qpd); 136 int (*restore_process_queues)(struct device_queue_manager *dqm, 137 struct qcm_process_device *qpd); 138 139 int (*get_wave_state)(struct device_queue_manager *dqm, 140 struct queue *q, 141 void __user *ctl_stack, 142 u32 *ctl_stack_used_size, 143 u32 *save_area_used_size); 144 }; 145 146 struct device_queue_manager_asic_ops { 147 int (*update_qpd)(struct device_queue_manager *dqm, 148 struct qcm_process_device *qpd); 149 bool (*set_cache_memory_policy)(struct device_queue_manager *dqm, 150 struct qcm_process_device *qpd, 151 enum cache_policy default_policy, 152 enum cache_policy alternate_policy, 153 void __user *alternate_aperture_base, 154 uint64_t alternate_aperture_size); 155 void (*init_sdma_vm)(struct device_queue_manager *dqm, 156 struct queue *q, 157 struct qcm_process_device *qpd); 158 struct mqd_manager * (*mqd_manager_init)(enum KFD_MQD_TYPE type, 159 struct kfd_dev *dev); 160 }; 161 162 /** 163 * struct device_queue_manager 164 * 165 * This struct is a base class for the kfd queues scheduler in the 166 * device level. The device base class should expose the basic operations 167 * for queue creation and queue destruction. This base class hides the 168 * scheduling mode of the driver and the specific implementation of the 169 * concrete device. This class is the only class in the queues scheduler 170 * that configures the H/W. 171 * 172 */ 173 174 struct device_queue_manager { 175 struct device_queue_manager_ops ops; 176 struct device_queue_manager_asic_ops asic_ops; 177 178 struct mqd_manager *mqd_mgrs[KFD_MQD_TYPE_MAX]; 179 struct packet_manager packets; 180 struct kfd_dev *dev; 181 struct mutex lock_hidden; /* use dqm_lock/unlock(dqm) */ 182 struct list_head queues; 183 unsigned int saved_flags; 184 unsigned int processes_count; 185 unsigned int queue_count; 186 unsigned int sdma_queue_count; 187 unsigned int xgmi_sdma_queue_count; 188 unsigned int total_queue_count; 189 unsigned int next_pipe_to_allocate; 190 unsigned int *allocated_queues; 191 uint64_t sdma_bitmap; 192 uint64_t xgmi_sdma_bitmap; 193 /* the pasid mapping for each kfd vmid */ 194 uint16_t vmid_pasid[VMID_NUM]; 195 uint64_t pipelines_addr; 196 uint64_t fence_gpu_addr; 197 unsigned int *fence_addr; 198 struct kfd_mem_obj *fence_mem; 199 bool active_runlist; 200 int sched_policy; 201 202 /* hw exception */ 203 bool is_hws_hang; 204 bool is_resetting; 205 struct work_struct hw_exception_work; 206 struct kfd_mem_obj hiq_sdma_mqd; 207 bool sched_running; 208 }; 209 210 void device_queue_manager_init_cik( 211 struct device_queue_manager_asic_ops *asic_ops); 212 void device_queue_manager_init_cik_hawaii( 213 struct device_queue_manager_asic_ops *asic_ops); 214 void device_queue_manager_init_vi( 215 struct device_queue_manager_asic_ops *asic_ops); 216 void device_queue_manager_init_vi_tonga( 217 struct device_queue_manager_asic_ops *asic_ops); 218 void device_queue_manager_init_v9( 219 struct device_queue_manager_asic_ops *asic_ops); 220 void device_queue_manager_init_v10_navi10( 221 struct device_queue_manager_asic_ops *asic_ops); 222 void program_sh_mem_settings(struct device_queue_manager *dqm, 223 struct qcm_process_device *qpd); 224 unsigned int get_queues_num(struct device_queue_manager *dqm); 225 unsigned int get_queues_per_pipe(struct device_queue_manager *dqm); 226 unsigned int get_pipes_per_mec(struct device_queue_manager *dqm); 227 unsigned int get_num_sdma_queues(struct device_queue_manager *dqm); 228 unsigned int get_num_xgmi_sdma_queues(struct device_queue_manager *dqm); 229 230 static inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd) 231 { 232 return (pdd->lds_base >> 16) & 0xFF; 233 } 234 235 static inline unsigned int 236 get_sh_mem_bases_nybble_64(struct kfd_process_device *pdd) 237 { 238 return (pdd->lds_base >> 60) & 0x0E; 239 } 240 241 /* The DQM lock can be taken in MMU notifiers. Make sure no reclaim-FS 242 * happens while holding this lock anywhere to prevent deadlocks when 243 * an MMU notifier runs in reclaim-FS context. 244 */ 245 static inline void dqm_lock(struct device_queue_manager *dqm) 246 { 247 mutex_lock(&dqm->lock_hidden); 248 dqm->saved_flags = memalloc_nofs_save(); 249 } 250 static inline void dqm_unlock(struct device_queue_manager *dqm) 251 { 252 memalloc_nofs_restore(dqm->saved_flags); 253 mutex_unlock(&dqm->lock_hidden); 254 } 255 256 #endif /* KFD_DEVICE_QUEUE_MANAGER_H_ */ 257