1 1.4 riastrad /* $NetBSD: gpu_scheduler.h,v 1.4 2021/12/19 12:23:16 riastradh Exp $ */ 2 1.1 riastrad 3 1.1 riastrad /* 4 1.1 riastrad * Copyright 2015 Advanced Micro Devices, Inc. 5 1.1 riastrad * 6 1.1 riastrad * Permission is hereby granted, free of charge, to any person obtaining a 7 1.1 riastrad * copy of this software and associated documentation files (the "Software"), 8 1.1 riastrad * to deal in the Software without restriction, including without limitation 9 1.1 riastrad * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 1.1 riastrad * and/or sell copies of the Software, and to permit persons to whom the 11 1.1 riastrad * Software is furnished to do so, subject to the following conditions: 12 1.1 riastrad * 13 1.1 riastrad * The above copyright notice and this permission notice shall be included in 14 1.1 riastrad * all copies or substantial portions of the Software. 15 1.1 riastrad * 16 1.1 riastrad * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 1.1 riastrad * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 1.1 riastrad * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 1.1 riastrad * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 1.1 riastrad * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 1.1 riastrad * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 1.1 riastrad * OTHER DEALINGS IN THE SOFTWARE. 23 1.1 riastrad * 24 1.1 riastrad */ 25 1.1 riastrad 26 1.1 riastrad #ifndef _DRM_GPU_SCHEDULER_H_ 27 1.1 riastrad #define _DRM_GPU_SCHEDULER_H_ 28 1.1 riastrad 29 1.1 riastrad #include <drm/spsc_queue.h> 30 1.3 riastrad #include <drm/drm_wait_netbsd.h> 31 1.1 riastrad #include <linux/dma-fence.h> 32 1.1 riastrad #include <linux/completion.h> 33 1.3 riastrad #include <linux/workqueue.h> 34 1.1 riastrad 35 1.1 riastrad #define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000) 36 1.1 riastrad 37 1.1 riastrad struct drm_gpu_scheduler; 38 1.1 riastrad struct drm_sched_rq; 39 1.1 riastrad 40 1.1 riastrad enum drm_sched_priority { 41 1.1 riastrad DRM_SCHED_PRIORITY_MIN, 42 1.1 riastrad DRM_SCHED_PRIORITY_LOW = DRM_SCHED_PRIORITY_MIN, 43 1.1 riastrad DRM_SCHED_PRIORITY_NORMAL, 44 1.1 riastrad DRM_SCHED_PRIORITY_HIGH_SW, 45 1.1 riastrad DRM_SCHED_PRIORITY_HIGH_HW, 46 1.1 riastrad DRM_SCHED_PRIORITY_KERNEL, 47 1.1 riastrad DRM_SCHED_PRIORITY_MAX, 48 1.1 riastrad DRM_SCHED_PRIORITY_INVALID = -1, 49 1.1 riastrad DRM_SCHED_PRIORITY_UNSET = -2 50 1.1 riastrad }; 51 1.1 riastrad 52 1.1 riastrad /** 53 1.1 riastrad * struct drm_sched_entity - A wrapper around a job queue (typically 54 1.1 riastrad * attached to the DRM file_priv). 55 1.1 riastrad * 56 1.1 riastrad * @list: used to append this struct to the list of entities in the 57 1.1 riastrad * runqueue. 58 1.1 riastrad * @rq: runqueue on which this entity is currently scheduled. 59 1.1 riastrad * @sched_list: A list of schedulers (drm_gpu_schedulers). 60 1.1 riastrad * Jobs from this entity can be scheduled on any scheduler 61 1.1 riastrad * on this list. 62 1.1 riastrad * @num_sched_list: number of drm_gpu_schedulers in the sched_list. 63 1.1 riastrad * @rq_lock: lock to modify the runqueue to which this entity belongs. 64 1.1 riastrad * @job_queue: the list of jobs of this entity. 65 1.1 riastrad * @fence_seq: a linearly increasing seqno incremented with each 66 1.1 riastrad * new &drm_sched_fence which is part of the entity. 67 1.1 riastrad * @fence_context: a unique context for all the fences which belong 68 1.1 riastrad * to this entity. 69 1.1 riastrad * The &drm_sched_fence.scheduled uses the 70 1.1 riastrad * fence_context but &drm_sched_fence.finished uses 71 1.1 riastrad * fence_context + 1. 72 1.1 riastrad * @dependency: the dependency fence of the job which is on the top 73 1.1 riastrad * of the job queue. 74 1.1 riastrad * @cb: callback for the dependency fence above. 75 1.1 riastrad * @guilty: points to ctx's guilty. 76 1.1 riastrad * @fini_status: contains the exit status in case the process was signalled. 77 1.1 riastrad * @last_scheduled: points to the finished fence of the last scheduled job. 78 1.1 riastrad * @last_user: last group leader pushing a job into the entity. 79 1.1 riastrad * @stopped: Marks the enity as removed from rq and destined for termination. 80 1.1 riastrad * @entity_idle: Signals when enityt is not in use 81 1.1 riastrad * 82 1.1 riastrad * Entities will emit jobs in order to their corresponding hardware 83 1.1 riastrad * ring, and the scheduler will alternate between entities based on 84 1.1 riastrad * scheduling policy. 85 1.1 riastrad */ 86 1.1 riastrad struct drm_sched_entity { 87 1.1 riastrad struct list_head list; 88 1.1 riastrad struct drm_sched_rq *rq; 89 1.1 riastrad struct drm_gpu_scheduler **sched_list; 90 1.1 riastrad unsigned int num_sched_list; 91 1.1 riastrad enum drm_sched_priority priority; 92 1.1 riastrad spinlock_t rq_lock; 93 1.1 riastrad 94 1.1 riastrad struct spsc_queue job_queue; 95 1.1 riastrad 96 1.1 riastrad atomic_t fence_seq; 97 1.1 riastrad uint64_t fence_context; 98 1.1 riastrad 99 1.1 riastrad struct dma_fence *dependency; 100 1.1 riastrad struct dma_fence_cb cb; 101 1.1 riastrad atomic_t *guilty; 102 1.1 riastrad struct dma_fence *last_scheduled; 103 1.4 riastrad #ifdef __NetBSD__ 104 1.4 riastrad struct proc *last_user; 105 1.4 riastrad #else 106 1.1 riastrad struct task_struct *last_user; 107 1.4 riastrad #endif 108 1.1 riastrad bool stopped; 109 1.1 riastrad struct completion entity_idle; 110 1.1 riastrad }; 111 1.1 riastrad 112 1.1 riastrad /** 113 1.1 riastrad * struct drm_sched_rq - queue of entities to be scheduled. 114 1.1 riastrad * 115 1.1 riastrad * @lock: to modify the entities list. 116 1.1 riastrad * @sched: the scheduler to which this rq belongs to. 117 1.1 riastrad * @entities: list of the entities to be scheduled. 118 1.1 riastrad * @current_entity: the entity which is to be scheduled. 119 1.1 riastrad * 120 1.1 riastrad * Run queue is a set of entities scheduling command submissions for 121 1.1 riastrad * one specific ring. It implements the scheduling policy that selects 122 1.1 riastrad * the next entity to emit commands from. 123 1.1 riastrad */ 124 1.1 riastrad struct drm_sched_rq { 125 1.1 riastrad spinlock_t lock; 126 1.1 riastrad struct drm_gpu_scheduler *sched; 127 1.1 riastrad struct list_head entities; 128 1.1 riastrad struct drm_sched_entity *current_entity; 129 1.1 riastrad }; 130 1.1 riastrad 131 1.1 riastrad /** 132 1.1 riastrad * struct drm_sched_fence - fences corresponding to the scheduling of a job. 133 1.1 riastrad */ 134 1.1 riastrad struct drm_sched_fence { 135 1.1 riastrad /** 136 1.1 riastrad * @scheduled: this fence is what will be signaled by the scheduler 137 1.1 riastrad * when the job is scheduled. 138 1.1 riastrad */ 139 1.1 riastrad struct dma_fence scheduled; 140 1.1 riastrad 141 1.1 riastrad /** 142 1.1 riastrad * @finished: this fence is what will be signaled by the scheduler 143 1.1 riastrad * when the job is completed. 144 1.1 riastrad * 145 1.1 riastrad * When setting up an out fence for the job, you should use 146 1.1 riastrad * this, since it's available immediately upon 147 1.1 riastrad * drm_sched_job_init(), and the fence returned by the driver 148 1.1 riastrad * from run_job() won't be created until the dependencies have 149 1.1 riastrad * resolved. 150 1.1 riastrad */ 151 1.1 riastrad struct dma_fence finished; 152 1.1 riastrad 153 1.1 riastrad /** 154 1.1 riastrad * @parent: the fence returned by &drm_sched_backend_ops.run_job 155 1.1 riastrad * when scheduling the job on hardware. We signal the 156 1.1 riastrad * &drm_sched_fence.finished fence once parent is signalled. 157 1.1 riastrad */ 158 1.1 riastrad struct dma_fence *parent; 159 1.1 riastrad /** 160 1.1 riastrad * @sched: the scheduler instance to which the job having this struct 161 1.1 riastrad * belongs to. 162 1.1 riastrad */ 163 1.1 riastrad struct drm_gpu_scheduler *sched; 164 1.1 riastrad /** 165 1.1 riastrad * @lock: the lock used by the scheduled and the finished fences. 166 1.1 riastrad */ 167 1.1 riastrad spinlock_t lock; 168 1.1 riastrad /** 169 1.1 riastrad * @owner: job owner for debugging 170 1.1 riastrad */ 171 1.1 riastrad void *owner; 172 1.1 riastrad }; 173 1.1 riastrad 174 1.1 riastrad struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f); 175 1.1 riastrad 176 1.1 riastrad /** 177 1.1 riastrad * struct drm_sched_job - A job to be run by an entity. 178 1.1 riastrad * 179 1.1 riastrad * @queue_node: used to append this struct to the queue of jobs in an entity. 180 1.1 riastrad * @sched: the scheduler instance on which this job is scheduled. 181 1.1 riastrad * @s_fence: contains the fences for the scheduling of job. 182 1.1 riastrad * @finish_cb: the callback for the finished fence. 183 1.1 riastrad * @node: used to append this struct to the @drm_gpu_scheduler.ring_mirror_list. 184 1.1 riastrad * @id: a unique id assigned to each job scheduled on the scheduler. 185 1.1 riastrad * @karma: increment on every hang caused by this job. If this exceeds the hang 186 1.1 riastrad * limit of the scheduler then the job is marked guilty and will not 187 1.1 riastrad * be scheduled further. 188 1.1 riastrad * @s_priority: the priority of the job. 189 1.1 riastrad * @entity: the entity to which this job belongs. 190 1.1 riastrad * @cb: the callback for the parent fence in s_fence. 191 1.1 riastrad * 192 1.1 riastrad * A job is created by the driver using drm_sched_job_init(), and 193 1.1 riastrad * should call drm_sched_entity_push_job() once it wants the scheduler 194 1.1 riastrad * to schedule the job. 195 1.1 riastrad */ 196 1.1 riastrad struct drm_sched_job { 197 1.1 riastrad struct spsc_node queue_node; 198 1.1 riastrad struct drm_gpu_scheduler *sched; 199 1.1 riastrad struct drm_sched_fence *s_fence; 200 1.1 riastrad struct dma_fence_cb finish_cb; 201 1.1 riastrad struct list_head node; 202 1.1 riastrad uint64_t id; 203 1.1 riastrad atomic_t karma; 204 1.1 riastrad enum drm_sched_priority s_priority; 205 1.1 riastrad struct drm_sched_entity *entity; 206 1.1 riastrad struct dma_fence_cb cb; 207 1.1 riastrad }; 208 1.1 riastrad 209 1.1 riastrad static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job, 210 1.1 riastrad int threshold) 211 1.1 riastrad { 212 1.1 riastrad return (s_job && atomic_inc_return(&s_job->karma) > threshold); 213 1.1 riastrad } 214 1.1 riastrad 215 1.1 riastrad /** 216 1.1 riastrad * struct drm_sched_backend_ops 217 1.1 riastrad * 218 1.1 riastrad * Define the backend operations called by the scheduler, 219 1.1 riastrad * these functions should be implemented in driver side. 220 1.1 riastrad */ 221 1.1 riastrad struct drm_sched_backend_ops { 222 1.1 riastrad /** 223 1.1 riastrad * @dependency: Called when the scheduler is considering scheduling 224 1.1 riastrad * this job next, to get another struct dma_fence for this job to 225 1.1 riastrad * block on. Once it returns NULL, run_job() may be called. 226 1.1 riastrad */ 227 1.1 riastrad struct dma_fence *(*dependency)(struct drm_sched_job *sched_job, 228 1.1 riastrad struct drm_sched_entity *s_entity); 229 1.1 riastrad 230 1.1 riastrad /** 231 1.1 riastrad * @run_job: Called to execute the job once all of the dependencies 232 1.1 riastrad * have been resolved. This may be called multiple times, if 233 1.1 riastrad * timedout_job() has happened and drm_sched_job_recovery() 234 1.1 riastrad * decides to try it again. 235 1.1 riastrad */ 236 1.1 riastrad struct dma_fence *(*run_job)(struct drm_sched_job *sched_job); 237 1.1 riastrad 238 1.1 riastrad /** 239 1.1 riastrad * @timedout_job: Called when a job has taken too long to execute, 240 1.1 riastrad * to trigger GPU recovery. 241 1.1 riastrad */ 242 1.1 riastrad void (*timedout_job)(struct drm_sched_job *sched_job); 243 1.1 riastrad 244 1.1 riastrad /** 245 1.1 riastrad * @free_job: Called once the job's finished fence has been signaled 246 1.1 riastrad * and it's time to clean it up. 247 1.1 riastrad */ 248 1.1 riastrad void (*free_job)(struct drm_sched_job *sched_job); 249 1.1 riastrad }; 250 1.1 riastrad 251 1.1 riastrad /** 252 1.1 riastrad * struct drm_gpu_scheduler 253 1.1 riastrad * 254 1.1 riastrad * @ops: backend operations provided by the driver. 255 1.1 riastrad * @hw_submission_limit: the max size of the hardware queue. 256 1.1 riastrad * @timeout: the time after which a job is removed from the scheduler. 257 1.1 riastrad * @name: name of the ring for which this scheduler is being used. 258 1.1 riastrad * @sched_rq: priority wise array of run queues. 259 1.1 riastrad * @wake_up_worker: the wait queue on which the scheduler sleeps until a job 260 1.1 riastrad * is ready to be scheduled. 261 1.1 riastrad * @job_scheduled: once @drm_sched_entity_do_release is called the scheduler 262 1.1 riastrad * waits on this wait queue until all the scheduled jobs are 263 1.1 riastrad * finished. 264 1.1 riastrad * @hw_rq_count: the number of jobs currently in the hardware queue. 265 1.1 riastrad * @job_id_count: used to assign unique id to the each job. 266 1.1 riastrad * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the 267 1.1 riastrad * timeout interval is over. 268 1.1 riastrad * @thread: the kthread on which the scheduler which run. 269 1.1 riastrad * @ring_mirror_list: the list of jobs which are currently in the job queue. 270 1.1 riastrad * @job_list_lock: lock to protect the ring_mirror_list. 271 1.1 riastrad * @hang_limit: once the hangs by a job crosses this limit then it is marked 272 1.1 riastrad * guilty and it will be considered for scheduling further. 273 1.1 riastrad * @score: score to help loadbalancer pick a idle sched 274 1.1 riastrad * @ready: marks if the underlying HW is ready to work 275 1.1 riastrad * @free_guilty: A hit to time out handler to free the guilty job. 276 1.1 riastrad * 277 1.1 riastrad * One scheduler is implemented for each hardware ring. 278 1.1 riastrad */ 279 1.1 riastrad struct drm_gpu_scheduler { 280 1.1 riastrad const struct drm_sched_backend_ops *ops; 281 1.1 riastrad uint32_t hw_submission_limit; 282 1.1 riastrad long timeout; 283 1.1 riastrad const char *name; 284 1.1 riastrad struct drm_sched_rq sched_rq[DRM_SCHED_PRIORITY_MAX]; 285 1.3 riastrad drm_waitqueue_t wake_up_worker; 286 1.3 riastrad drm_waitqueue_t job_scheduled; 287 1.1 riastrad atomic_t hw_rq_count; 288 1.1 riastrad atomic64_t job_id_count; 289 1.1 riastrad struct delayed_work work_tdr; 290 1.1 riastrad struct task_struct *thread; 291 1.1 riastrad struct list_head ring_mirror_list; 292 1.1 riastrad spinlock_t job_list_lock; 293 1.1 riastrad int hang_limit; 294 1.1 riastrad atomic_t score; 295 1.1 riastrad bool ready; 296 1.1 riastrad bool free_guilty; 297 1.1 riastrad }; 298 1.1 riastrad 299 1.1 riastrad int drm_sched_init(struct drm_gpu_scheduler *sched, 300 1.1 riastrad const struct drm_sched_backend_ops *ops, 301 1.1 riastrad uint32_t hw_submission, unsigned hang_limit, long timeout, 302 1.1 riastrad const char *name); 303 1.1 riastrad 304 1.1 riastrad void drm_sched_fini(struct drm_gpu_scheduler *sched); 305 1.1 riastrad int drm_sched_job_init(struct drm_sched_job *job, 306 1.1 riastrad struct drm_sched_entity *entity, 307 1.1 riastrad void *owner); 308 1.1 riastrad void drm_sched_job_cleanup(struct drm_sched_job *job); 309 1.1 riastrad void drm_sched_wakeup(struct drm_gpu_scheduler *sched); 310 1.1 riastrad void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad); 311 1.1 riastrad void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery); 312 1.1 riastrad void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched); 313 1.1 riastrad void drm_sched_increase_karma(struct drm_sched_job *bad); 314 1.1 riastrad bool drm_sched_dependency_optimized(struct dma_fence* fence, 315 1.1 riastrad struct drm_sched_entity *entity); 316 1.1 riastrad void drm_sched_fault(struct drm_gpu_scheduler *sched); 317 1.1 riastrad void drm_sched_job_kickout(struct drm_sched_job *s_job); 318 1.1 riastrad 319 1.1 riastrad void drm_sched_rq_add_entity(struct drm_sched_rq *rq, 320 1.1 riastrad struct drm_sched_entity *entity); 321 1.1 riastrad void drm_sched_rq_remove_entity(struct drm_sched_rq *rq, 322 1.1 riastrad struct drm_sched_entity *entity); 323 1.1 riastrad 324 1.1 riastrad int drm_sched_entity_init(struct drm_sched_entity *entity, 325 1.1 riastrad enum drm_sched_priority priority, 326 1.1 riastrad struct drm_gpu_scheduler **sched_list, 327 1.1 riastrad unsigned int num_sched_list, 328 1.1 riastrad atomic_t *guilty); 329 1.1 riastrad long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout); 330 1.1 riastrad void drm_sched_entity_fini(struct drm_sched_entity *entity); 331 1.1 riastrad void drm_sched_entity_destroy(struct drm_sched_entity *entity); 332 1.1 riastrad void drm_sched_entity_select_rq(struct drm_sched_entity *entity); 333 1.1 riastrad struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity); 334 1.1 riastrad void drm_sched_entity_push_job(struct drm_sched_job *sched_job, 335 1.1 riastrad struct drm_sched_entity *entity); 336 1.1 riastrad void drm_sched_entity_set_priority(struct drm_sched_entity *entity, 337 1.1 riastrad enum drm_sched_priority priority); 338 1.1 riastrad bool drm_sched_entity_is_ready(struct drm_sched_entity *entity); 339 1.1 riastrad 340 1.1 riastrad struct drm_sched_fence *drm_sched_fence_create( 341 1.1 riastrad struct drm_sched_entity *s_entity, void *owner); 342 1.1 riastrad void drm_sched_fence_scheduled(struct drm_sched_fence *fence); 343 1.1 riastrad void drm_sched_fence_finished(struct drm_sched_fence *fence); 344 1.1 riastrad 345 1.1 riastrad unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched); 346 1.1 riastrad void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched, 347 1.1 riastrad unsigned long remaining); 348 1.1 riastrad 349 1.1 riastrad #endif 350