Home | History | Annotate | Download | only in scheduler

Lines Matching defs:sched

54 #include <linux/sched.h>
56 #include <uapi/linux/sched/types.h>
77 static void drm_sched_rq_init(struct drm_gpu_scheduler *sched,
83 rq->sched = sched;
100 atomic_inc(&rq->sched->score);
119 atomic_dec(&rq->sched->score);
181 struct drm_gpu_scheduler *sched = entity->rq->sched;
189 if (s_fence && s_fence->sched == sched)
199 * @sched: scheduler instance to start the worker for
203 static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
205 if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
206 !list_empty(&sched->ring_mirror_list))
207 schedule_delayed_work(&sched->work_tdr, sched->timeout);
213 * @sched: scheduler where the timeout handling should be started.
217 void drm_sched_fault(struct drm_gpu_scheduler *sched)
219 mod_delayed_work(system_wq, &sched->work_tdr, 0);
226 * @sched: scheduler instance for which to suspend the timeout
236 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched)
240 cancel_delayed_work(&sched->work_tdr);
245 sched_timeout = sched->work_tdr.timer.expires;
251 if (mod_delayed_work(system_wq, &sched->work_tdr, MAX_SCHEDULE_TIMEOUT)
255 return sched->timeout;
263 * @sched: scheduler instance for which to resume the timeout
269 void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
274 spin_lock_irqsave(&sched->job_list_lock, flags);
276 if (list_empty(&sched->ring_mirror_list))
277 cancel_delayed_work(&sched->work_tdr);
279 mod_delayed_work(system_wq, &sched->work_tdr, remaining);
281 spin_unlock_irqrestore(&sched->job_list_lock, flags);
287 struct drm_gpu_scheduler *sched = s_job->sched;
290 spin_lock_irqsave(&sched->job_list_lock, flags);
291 list_add_tail(&s_job->node, &sched->ring_mirror_list);
292 drm_sched_start_timeout(sched);
293 spin_unlock_irqrestore(&sched->job_list_lock, flags);
298 struct drm_gpu_scheduler *sched;
302 sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
305 spin_lock_irqsave(&sched->job_list_lock, flags);
306 job = list_first_entry_or_null(&sched->ring_mirror_list,
312 * drm_sched_cleanup_jobs. It will be reinserted back after sched->thread
316 spin_unlock_irqrestore(&sched->job_list_lock, flags);
318 job->sched->ops->timedout_job(job);
324 if (sched->free_guilty) {
325 job->sched->ops->free_job(job);
326 sched->free_guilty = false;
329 spin_unlock_irqrestore(&sched->job_list_lock, flags);
332 spin_lock_irqsave(&sched->job_list_lock, flags);
333 drm_sched_start_timeout(sched);
334 spin_unlock_irqrestore(&sched->job_list_lock, flags);
343 * limit of the scheduler then the respective sched entity is marked guilty and
351 struct drm_gpu_scheduler *sched = bad->sched;
361 struct drm_sched_rq *rq = &sched->sched_rq[i];
368 bad->sched->hang_limit)
385 * @sched: scheduler instance
394 void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
399 kthread_park(sched->thread);
408 if (bad && bad->sched == sched)
413 list_add(&bad->node, &sched->ring_mirror_list);
419 * This iteration is thread safe as sched thread is stopped.
421 list_for_each_entry_safe_reverse(s_job, tmp, &sched->ring_mirror_list, node) {
425 atomic_dec(&sched->hw_rq_count);
431 spin_lock_irqsave(&sched->job_list_lock, flags);
433 spin_unlock_irqrestore(&sched->job_list_lock, flags);
449 sched->ops->free_job(s_job);
451 sched->free_guilty = true;
461 cancel_delayed_work(&sched->work_tdr);
469 * @sched: scheduler instance
470 * @full_recovery: proceed with complete sched restart
473 void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
480 * Locking the list is not required here as the sched thread is parked
484 list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
487 atomic_inc(&sched->hw_rq_count);
505 spin_lock_irqsave(&sched->job_list_lock, flags);
506 drm_sched_start_timeout(sched);
507 spin_unlock_irqrestore(&sched->job_list_lock, flags);
510 kthread_unpark(sched->thread);
517 * @sched: scheduler instance
520 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
527 list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
530 if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
539 fence = sched->ops->run_job(s_job);
571 sched;
577 sched = entity->rq->sched;
579 job->sched = sched;
581 job->s_priority = entity->rq - sched->sched_rq;
585 job->id = atomic64_inc_return(&sched->job_id_count);
608 * @sched: scheduler instance
612 static bool drm_sched_ready(struct drm_gpu_scheduler *sched)
614 return atomic_read(&sched->hw_rq_count) <
615 sched->hw_submission_limit;
621 * @sched: scheduler instance
624 void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
626 assert_spin_locked(&sched->job_list_lock);
627 if (drm_sched_ready(sched))
628 DRM_SPIN_WAKEUP_ONE(&sched->wake_up_worker,
629 &sched->job_list_lock);
635 * @sched: scheduler instance
640 drm_sched_select_entity(struct drm_gpu_scheduler *sched)
645 if (!drm_sched_ready(sched))
650 entity = drm_sched_rq_select_entity(&sched->sched_rq[i]);
670 struct drm_gpu_scheduler *sched = s_fence->sched;
672 atomic_dec(&sched->hw_rq_count);
673 atomic_dec(&sched->score);
678 spin_lock(&sched->job_list_lock);
679 DRM_SPIN_WAKEUP_ONE(&sched->wake_up_worker, &sched->job_list_lock);
680 spin_unlock(&sched->job_list_lock);
686 * @sched: scheduler instance
692 drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
696 assert_spin_locked(&sched->job_list_lock);
702 if ((sched->timeout != MAX_SCHEDULE_TIMEOUT &&
703 !cancel_delayed_work(&sched->work_tdr)) ||
704 __kthread_should_park(sched->thread))
707 job = list_first_entry_or_null(&sched->ring_mirror_list,
716 drm_sched_start_timeout(sched);
725 * @sched: scheduler instance
729 static bool drm_sched_blocked(struct drm_gpu_scheduler *sched)
749 struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param;
754 /* Wait for sched->thread to be initialized by drm_sched_init. */
755 spin_lock(&sched->job_list_lock);
756 DRM_SPIN_WAIT_UNTIL(r, &sched->wake_up_worker, &sched->job_list_lock,
757 sched->thread != NULL);
758 spin_unlock(&sched->job_list_lock);
767 spin_lock(&sched->job_list_lock);
768 DRM_SPIN_WAIT_UNTIL(r, &sched->wake_up_worker,
769 &sched->job_list_lock,
770 ((cleanup_job = drm_sched_get_cleanup_job(sched)) ||
771 (!drm_sched_blocked(sched) &&
772 (entity = drm_sched_select_entity(sched))) ||
774 spin_unlock(&sched->job_list_lock);
777 sched->ops->free_job(cleanup_job);
779 drm_sched_start_timeout(sched);
794 atomic_inc(&sched->hw_rq_count);
797 fence = sched->ops->run_job(sched_job);
817 spin_lock(&sched->job_list_lock);
818 DRM_SPIN_WAKEUP_ONE(&sched->job_scheduled,
819 &sched->job_list_lock);
820 spin_unlock(&sched->job_list_lock);
828 * @sched: scheduler instance
837 int drm_sched_init(struct drm_gpu_scheduler *sched,
845 sched->ops = ops;
846 sched->hw_submission_limit = hw_submission;
847 sched->name = name;
848 sched->timeout = timeout;
849 sched->hang_limit = hang_limit;
851 drm_sched_rq_init(sched, &sched->sched_rq[i]);
853 DRM_INIT_WAITQUEUE(&sched->wake_up_worker, "drmschedw");
854 DRM_INIT_WAITQUEUE(&sched->job_scheduled, "drmschedj");
855 INIT_LIST_HEAD(&sched->ring_mirror_list);
856 spin_lock_init(&sched->job_list_lock);
857 atomic_set(&sched->hw_rq_count, 0);
858 INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
859 atomic_set(&sched->score, 0);
860 atomic64_set(&sched->job_id_count, 0);
864 kthread_run(drm_sched_main, sched, sched->name,
865 &sched->job_list_lock, &sched->wake_up_worker);
868 sched->thread = NULL;
872 spin_lock(&sched->job_list_lock);
873 sched->thread = thread;
874 DRM_SPIN_WAKEUP_ALL(&sched->wake_up_worker, &sched->job_list_lock);
875 spin_unlock(&sched->job_list_lock);
877 sched->ready = true;
885 * @sched: scheduler instance
889 void drm_sched_fini(struct drm_gpu_scheduler *sched)
891 if (sched->thread)
892 kthread_stop(sched->thread);
894 sched->ready = false;
896 spin_lock_destroy(&sched->job_list_lock);
897 DRM_DESTROY_WAITQUEUE(&sched->job_scheduled);
898 DRM_DESTROY_WAITQUEUE(&sched->wake_up_worker);
901 struct drm_sched_rq *rq = &sched->sched_rq[i];