Lines Matching defs:job
34 * backend operations to the scheduler like submitting a job to hardware run queue,
35 * returning the dependencies of a job etc.
127 * drm_sched_rq_select_entity - Select an entity which could provide a job to run
224 * drm_sched_suspend_timeout - Suspend scheduler job timeout
261 * drm_sched_resume_timeout - Resume scheduler job timeout
299 struct drm_sched_job *job;
306 job = list_first_entry_or_null(&sched->ring_mirror_list,
309 if (job) {
311 * Remove the bad job so it cannot be freed by concurrent
315 list_del_init(&job->node);
318 job->sched->ops->timedout_job(job);
321 * Guilty job did complete and hence needs to be manually removed
325 job->sched->ops->free_job(job);
340 * @bad: The job guilty of time out
342 * Increment on every hang caused by the 'bad' job. If this exceeds the hang
386 * @bad: job which caused the time out
389 * Note: bad job will not be freed as it might be used later and so it's
402 * Reinsert back the bad job here - now it's safe as
404 * bad job at this point - we parked (waited for) any in progress
411 * job extracted.
416 * Iterate the job list from later to earlier one and either deactive
428 * remove job from ring_mirror_list.
436 * Wait for job's HW fence callback to finish using s_job
439 * Job is still alive so fence refcount at least 1
444 * We must keep bad job alive for later use during
446 * that the guilty job must be released.
515 * drm_sched_resubmit_jobs - helper to relunch job from mirror ring list
556 * drm_sched_job_init - init a scheduler job
558 * @job: scheduler job to init
560 * @owner: job owner for debugging
567 int drm_sched_job_init(struct drm_sched_job *job,
579 job->sched = sched;
580 job->entity = entity;
581 job->s_priority = entity->rq - sched->sched_rq;
582 job->s_fence = drm_sched_fence_create(entity, owner);
583 if (!job->s_fence)
585 job->id = atomic64_inc_return(&sched->job_id_count);
587 INIT_LIST_HEAD(&job->node);
594 * drm_sched_job_cleanup - clean up scheduler job resources
596 * @job: scheduler job to clean up
598 void drm_sched_job_cleanup(struct drm_sched_job *job)
600 dma_fence_put(&job->s_fence->finished);
601 job->s_fence = NULL;
659 * drm_sched_process_job - process a job
664 * Called after job has finished execution.
684 * drm_sched_get_cleanup_job - fetch the next finished job to be destroyed
688 * Returns the next finished job from the mirror list (if there is one)
694 struct drm_sched_job *job;
707 job = list_first_entry_or_null(&sched->ring_mirror_list,
710 if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
711 /* remove job from ring_mirror_list */
712 list_del_init(&job->node);
714 job = NULL;
715 /* queue timeout for next job */
719 return job;
778 /* queue timeout for next job */
831 * @hang_limit: number of times to allow a job to hang before dropping it