Home | History | Annotate | Line # | Download | only in drm
gpu_scheduler.h revision 1.1.1.1
      1 /*	$NetBSD: gpu_scheduler.h,v 1.1.1.1 2021/12/18 20:15:57 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright 2015 Advanced Micro Devices, Inc.
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the "Software"),
      8  * to deal in the Software without restriction, including without limitation
      9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     10  * and/or sell copies of the Software, and to permit persons to whom the
     11  * Software is furnished to do so, subject to the following conditions:
     12  *
     13  * The above copyright notice and this permission notice shall be included in
     14  * all copies or substantial portions of the Software.
     15  *
     16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     22  * OTHER DEALINGS IN THE SOFTWARE.
     23  *
     24  */
     25 
     26 #ifndef _DRM_GPU_SCHEDULER_H_
     27 #define _DRM_GPU_SCHEDULER_H_
     28 
     29 #include <drm/spsc_queue.h>
     30 #include <linux/dma-fence.h>
     31 #include <linux/completion.h>
     32 
     33 #define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000)
     34 
     35 struct drm_gpu_scheduler;
     36 struct drm_sched_rq;
     37 
     38 enum drm_sched_priority {
     39 	DRM_SCHED_PRIORITY_MIN,
     40 	DRM_SCHED_PRIORITY_LOW = DRM_SCHED_PRIORITY_MIN,
     41 	DRM_SCHED_PRIORITY_NORMAL,
     42 	DRM_SCHED_PRIORITY_HIGH_SW,
     43 	DRM_SCHED_PRIORITY_HIGH_HW,
     44 	DRM_SCHED_PRIORITY_KERNEL,
     45 	DRM_SCHED_PRIORITY_MAX,
     46 	DRM_SCHED_PRIORITY_INVALID = -1,
     47 	DRM_SCHED_PRIORITY_UNSET = -2
     48 };
     49 
     50 /**
     51  * struct drm_sched_entity - A wrapper around a job queue (typically
     52  * attached to the DRM file_priv).
     53  *
     54  * @list: used to append this struct to the list of entities in the
     55  *        runqueue.
     56  * @rq: runqueue on which this entity is currently scheduled.
     57  * @sched_list: A list of schedulers (drm_gpu_schedulers).
     58  *              Jobs from this entity can be scheduled on any scheduler
     59  *              on this list.
     60  * @num_sched_list: number of drm_gpu_schedulers in the sched_list.
     61  * @rq_lock: lock to modify the runqueue to which this entity belongs.
     62  * @job_queue: the list of jobs of this entity.
     63  * @fence_seq: a linearly increasing seqno incremented with each
     64  *             new &drm_sched_fence which is part of the entity.
     65  * @fence_context: a unique context for all the fences which belong
     66  *                 to this entity.
     67  *                 The &drm_sched_fence.scheduled uses the
     68  *                 fence_context but &drm_sched_fence.finished uses
     69  *                 fence_context + 1.
     70  * @dependency: the dependency fence of the job which is on the top
     71  *              of the job queue.
     72  * @cb: callback for the dependency fence above.
     73  * @guilty: points to ctx's guilty.
     74  * @fini_status: contains the exit status in case the process was signalled.
     75  * @last_scheduled: points to the finished fence of the last scheduled job.
     76  * @last_user: last group leader pushing a job into the entity.
     77  * @stopped: Marks the enity as removed from rq and destined for termination.
     78  * @entity_idle: Signals when enityt is not in use
     79  *
     80  * Entities will emit jobs in order to their corresponding hardware
     81  * ring, and the scheduler will alternate between entities based on
     82  * scheduling policy.
     83  */
     84 struct drm_sched_entity {
     85 	struct list_head		list;
     86 	struct drm_sched_rq		*rq;
     87 	struct drm_gpu_scheduler        **sched_list;
     88 	unsigned int                    num_sched_list;
     89 	enum drm_sched_priority         priority;
     90 	spinlock_t			rq_lock;
     91 
     92 	struct spsc_queue		job_queue;
     93 
     94 	atomic_t			fence_seq;
     95 	uint64_t			fence_context;
     96 
     97 	struct dma_fence		*dependency;
     98 	struct dma_fence_cb		cb;
     99 	atomic_t			*guilty;
    100 	struct dma_fence                *last_scheduled;
    101 	struct task_struct		*last_user;
    102 	bool 				stopped;
    103 	struct completion		entity_idle;
    104 };
    105 
    106 /**
    107  * struct drm_sched_rq - queue of entities to be scheduled.
    108  *
    109  * @lock: to modify the entities list.
    110  * @sched: the scheduler to which this rq belongs to.
    111  * @entities: list of the entities to be scheduled.
    112  * @current_entity: the entity which is to be scheduled.
    113  *
    114  * Run queue is a set of entities scheduling command submissions for
    115  * one specific ring. It implements the scheduling policy that selects
    116  * the next entity to emit commands from.
    117  */
    118 struct drm_sched_rq {
    119 	spinlock_t			lock;
    120 	struct drm_gpu_scheduler	*sched;
    121 	struct list_head		entities;
    122 	struct drm_sched_entity		*current_entity;
    123 };
    124 
    125 /**
    126  * struct drm_sched_fence - fences corresponding to the scheduling of a job.
    127  */
    128 struct drm_sched_fence {
    129         /**
    130          * @scheduled: this fence is what will be signaled by the scheduler
    131          * when the job is scheduled.
    132          */
    133 	struct dma_fence		scheduled;
    134 
    135         /**
    136          * @finished: this fence is what will be signaled by the scheduler
    137          * when the job is completed.
    138          *
    139          * When setting up an out fence for the job, you should use
    140          * this, since it's available immediately upon
    141          * drm_sched_job_init(), and the fence returned by the driver
    142          * from run_job() won't be created until the dependencies have
    143          * resolved.
    144          */
    145 	struct dma_fence		finished;
    146 
    147         /**
    148          * @parent: the fence returned by &drm_sched_backend_ops.run_job
    149          * when scheduling the job on hardware. We signal the
    150          * &drm_sched_fence.finished fence once parent is signalled.
    151          */
    152 	struct dma_fence		*parent;
    153         /**
    154          * @sched: the scheduler instance to which the job having this struct
    155          * belongs to.
    156          */
    157 	struct drm_gpu_scheduler	*sched;
    158         /**
    159          * @lock: the lock used by the scheduled and the finished fences.
    160          */
    161 	spinlock_t			lock;
    162         /**
    163          * @owner: job owner for debugging
    164          */
    165 	void				*owner;
    166 };
    167 
    168 struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
    169 
    170 /**
    171  * struct drm_sched_job - A job to be run by an entity.
    172  *
    173  * @queue_node: used to append this struct to the queue of jobs in an entity.
    174  * @sched: the scheduler instance on which this job is scheduled.
    175  * @s_fence: contains the fences for the scheduling of job.
    176  * @finish_cb: the callback for the finished fence.
    177  * @node: used to append this struct to the @drm_gpu_scheduler.ring_mirror_list.
    178  * @id: a unique id assigned to each job scheduled on the scheduler.
    179  * @karma: increment on every hang caused by this job. If this exceeds the hang
    180  *         limit of the scheduler then the job is marked guilty and will not
    181  *         be scheduled further.
    182  * @s_priority: the priority of the job.
    183  * @entity: the entity to which this job belongs.
    184  * @cb: the callback for the parent fence in s_fence.
    185  *
    186  * A job is created by the driver using drm_sched_job_init(), and
    187  * should call drm_sched_entity_push_job() once it wants the scheduler
    188  * to schedule the job.
    189  */
    190 struct drm_sched_job {
    191 	struct spsc_node		queue_node;
    192 	struct drm_gpu_scheduler	*sched;
    193 	struct drm_sched_fence		*s_fence;
    194 	struct dma_fence_cb		finish_cb;
    195 	struct list_head		node;
    196 	uint64_t			id;
    197 	atomic_t			karma;
    198 	enum drm_sched_priority		s_priority;
    199 	struct drm_sched_entity  *entity;
    200 	struct dma_fence_cb		cb;
    201 };
    202 
    203 static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
    204 					    int threshold)
    205 {
    206 	return (s_job && atomic_inc_return(&s_job->karma) > threshold);
    207 }
    208 
    209 /**
    210  * struct drm_sched_backend_ops
    211  *
    212  * Define the backend operations called by the scheduler,
    213  * these functions should be implemented in driver side.
    214  */
    215 struct drm_sched_backend_ops {
    216 	/**
    217          * @dependency: Called when the scheduler is considering scheduling
    218          * this job next, to get another struct dma_fence for this job to
    219 	 * block on.  Once it returns NULL, run_job() may be called.
    220 	 */
    221 	struct dma_fence *(*dependency)(struct drm_sched_job *sched_job,
    222 					struct drm_sched_entity *s_entity);
    223 
    224 	/**
    225          * @run_job: Called to execute the job once all of the dependencies
    226          * have been resolved.  This may be called multiple times, if
    227 	 * timedout_job() has happened and drm_sched_job_recovery()
    228 	 * decides to try it again.
    229 	 */
    230 	struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
    231 
    232 	/**
    233          * @timedout_job: Called when a job has taken too long to execute,
    234          * to trigger GPU recovery.
    235 	 */
    236 	void (*timedout_job)(struct drm_sched_job *sched_job);
    237 
    238 	/**
    239          * @free_job: Called once the job's finished fence has been signaled
    240          * and it's time to clean it up.
    241 	 */
    242 	void (*free_job)(struct drm_sched_job *sched_job);
    243 };
    244 
    245 /**
    246  * struct drm_gpu_scheduler
    247  *
    248  * @ops: backend operations provided by the driver.
    249  * @hw_submission_limit: the max size of the hardware queue.
    250  * @timeout: the time after which a job is removed from the scheduler.
    251  * @name: name of the ring for which this scheduler is being used.
    252  * @sched_rq: priority wise array of run queues.
    253  * @wake_up_worker: the wait queue on which the scheduler sleeps until a job
    254  *                  is ready to be scheduled.
    255  * @job_scheduled: once @drm_sched_entity_do_release is called the scheduler
    256  *                 waits on this wait queue until all the scheduled jobs are
    257  *                 finished.
    258  * @hw_rq_count: the number of jobs currently in the hardware queue.
    259  * @job_id_count: used to assign unique id to the each job.
    260  * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the
    261  *            timeout interval is over.
    262  * @thread: the kthread on which the scheduler which run.
    263  * @ring_mirror_list: the list of jobs which are currently in the job queue.
    264  * @job_list_lock: lock to protect the ring_mirror_list.
    265  * @hang_limit: once the hangs by a job crosses this limit then it is marked
    266  *              guilty and it will be considered for scheduling further.
    267  * @score: score to help loadbalancer pick a idle sched
    268  * @ready: marks if the underlying HW is ready to work
    269  * @free_guilty: A hit to time out handler to free the guilty job.
    270  *
    271  * One scheduler is implemented for each hardware ring.
    272  */
    273 struct drm_gpu_scheduler {
    274 	const struct drm_sched_backend_ops	*ops;
    275 	uint32_t			hw_submission_limit;
    276 	long				timeout;
    277 	const char			*name;
    278 	struct drm_sched_rq		sched_rq[DRM_SCHED_PRIORITY_MAX];
    279 	wait_queue_head_t		wake_up_worker;
    280 	wait_queue_head_t		job_scheduled;
    281 	atomic_t			hw_rq_count;
    282 	atomic64_t			job_id_count;
    283 	struct delayed_work		work_tdr;
    284 	struct task_struct		*thread;
    285 	struct list_head		ring_mirror_list;
    286 	spinlock_t			job_list_lock;
    287 	int				hang_limit;
    288 	atomic_t                        score;
    289 	bool				ready;
    290 	bool				free_guilty;
    291 };
    292 
    293 int drm_sched_init(struct drm_gpu_scheduler *sched,
    294 		   const struct drm_sched_backend_ops *ops,
    295 		   uint32_t hw_submission, unsigned hang_limit, long timeout,
    296 		   const char *name);
    297 
    298 void drm_sched_fini(struct drm_gpu_scheduler *sched);
    299 int drm_sched_job_init(struct drm_sched_job *job,
    300 		       struct drm_sched_entity *entity,
    301 		       void *owner);
    302 void drm_sched_job_cleanup(struct drm_sched_job *job);
    303 void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
    304 void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad);
    305 void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery);
    306 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);
    307 void drm_sched_increase_karma(struct drm_sched_job *bad);
    308 bool drm_sched_dependency_optimized(struct dma_fence* fence,
    309 				    struct drm_sched_entity *entity);
    310 void drm_sched_fault(struct drm_gpu_scheduler *sched);
    311 void drm_sched_job_kickout(struct drm_sched_job *s_job);
    312 
    313 void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
    314 			     struct drm_sched_entity *entity);
    315 void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
    316 				struct drm_sched_entity *entity);
    317 
    318 int drm_sched_entity_init(struct drm_sched_entity *entity,
    319 			  enum drm_sched_priority priority,
    320 			  struct drm_gpu_scheduler **sched_list,
    321 			  unsigned int num_sched_list,
    322 			  atomic_t *guilty);
    323 long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout);
    324 void drm_sched_entity_fini(struct drm_sched_entity *entity);
    325 void drm_sched_entity_destroy(struct drm_sched_entity *entity);
    326 void drm_sched_entity_select_rq(struct drm_sched_entity *entity);
    327 struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity);
    328 void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
    329 			       struct drm_sched_entity *entity);
    330 void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
    331 				   enum drm_sched_priority priority);
    332 bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);
    333 
    334 struct drm_sched_fence *drm_sched_fence_create(
    335 	struct drm_sched_entity *s_entity, void *owner);
    336 void drm_sched_fence_scheduled(struct drm_sched_fence *fence);
    337 void drm_sched_fence_finished(struct drm_sched_fence *fence);
    338 
    339 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched);
    340 void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
    341 		                unsigned long remaining);
    342 
    343 #endif
    344