1 1.7 riastrad /* $NetBSD: sched_entity.c,v 1.7 2021/12/24 15:26:35 riastradh Exp $ */ 2 1.1 riastrad 3 1.1 riastrad /* 4 1.1 riastrad * Copyright 2015 Advanced Micro Devices, Inc. 5 1.1 riastrad * 6 1.1 riastrad * Permission is hereby granted, free of charge, to any person obtaining a 7 1.1 riastrad * copy of this software and associated documentation files (the "Software"), 8 1.1 riastrad * to deal in the Software without restriction, including without limitation 9 1.1 riastrad * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 1.1 riastrad * and/or sell copies of the Software, and to permit persons to whom the 11 1.1 riastrad * Software is furnished to do so, subject to the following conditions: 12 1.1 riastrad * 13 1.1 riastrad * The above copyright notice and this permission notice shall be included in 14 1.1 riastrad * all copies or substantial portions of the Software. 15 1.1 riastrad * 16 1.1 riastrad * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 1.1 riastrad * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 1.1 riastrad * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 1.1 riastrad * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 1.1 riastrad * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 1.1 riastrad * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 1.1 riastrad * OTHER DEALINGS IN THE SOFTWARE. 23 1.1 riastrad * 24 1.1 riastrad */ 25 1.1 riastrad 26 1.1 riastrad #include <sys/cdefs.h> 27 1.7 riastrad __KERNEL_RCSID(0, "$NetBSD: sched_entity.c,v 1.7 2021/12/24 15:26:35 riastradh Exp $"); 28 1.1 riastrad 29 1.1 riastrad #include <linux/kthread.h> 30 1.1 riastrad #include <linux/slab.h> 31 1.1 riastrad #include <linux/completion.h> 32 1.1 riastrad 33 1.1 riastrad #include <drm/drm_print.h> 34 1.1 riastrad #include <drm/gpu_scheduler.h> 35 1.1 riastrad 36 1.1 riastrad #include "gpu_scheduler_trace.h" 37 1.1 riastrad 38 1.1 riastrad #define to_drm_sched_job(sched_job) \ 39 1.1 riastrad container_of((sched_job), struct drm_sched_job, queue_node) 40 1.1 riastrad 41 1.1 riastrad /** 42 1.1 riastrad * drm_sched_entity_init - Init a context entity used by scheduler when 43 1.1 riastrad * submit to HW ring. 44 1.1 riastrad * 45 1.1 riastrad * @entity: scheduler entity to init 46 1.1 riastrad * @priority: priority of the entity 47 1.1 riastrad * @sched_list: the list of drm scheds on which jobs from this 48 1.1 riastrad * entity can be submitted 49 1.1 riastrad * @num_sched_list: number of drm sched in sched_list 50 1.1 riastrad * @guilty: atomic_t set to 1 when a job on this queue 51 1.1 riastrad * is found to be guilty causing a timeout 52 1.1 riastrad * 53 1.1 riastrad * Note: the sched_list should have at least one element to schedule 54 1.1 riastrad * the entity 55 1.1 riastrad * 56 1.1 riastrad * Returns 0 on success or a negative error code on failure. 57 1.1 riastrad */ 58 1.1 riastrad int drm_sched_entity_init(struct drm_sched_entity *entity, 59 1.1 riastrad enum drm_sched_priority priority, 60 1.1 riastrad struct drm_gpu_scheduler **sched_list, 61 1.1 riastrad unsigned int num_sched_list, 62 1.1 riastrad atomic_t *guilty) 63 1.1 riastrad { 64 1.1 riastrad if (!(entity && sched_list && (num_sched_list == 0 || sched_list[0]))) 65 1.1 riastrad return -EINVAL; 66 1.1 riastrad 67 1.1 riastrad memset(entity, 0, sizeof(struct drm_sched_entity)); 68 1.1 riastrad INIT_LIST_HEAD(&entity->list); 69 1.1 riastrad entity->rq = NULL; 70 1.1 riastrad entity->guilty = guilty; 71 1.1 riastrad entity->num_sched_list = num_sched_list; 72 1.1 riastrad entity->priority = priority; 73 1.1 riastrad entity->sched_list = num_sched_list > 1 ? sched_list : NULL; 74 1.1 riastrad entity->last_scheduled = NULL; 75 1.1 riastrad 76 1.1 riastrad if(num_sched_list) 77 1.1 riastrad entity->rq = &sched_list[0]->sched_rq[entity->priority]; 78 1.1 riastrad 79 1.1 riastrad init_completion(&entity->entity_idle); 80 1.1 riastrad 81 1.1 riastrad spin_lock_init(&entity->rq_lock); 82 1.1 riastrad spsc_queue_init(&entity->job_queue); 83 1.1 riastrad 84 1.1 riastrad atomic_set(&entity->fence_seq, 0); 85 1.1 riastrad entity->fence_context = dma_fence_context_alloc(2); 86 1.1 riastrad 87 1.1 riastrad return 0; 88 1.1 riastrad } 89 1.1 riastrad EXPORT_SYMBOL(drm_sched_entity_init); 90 1.1 riastrad 91 1.1 riastrad /** 92 1.1 riastrad * drm_sched_entity_is_idle - Check if entity is idle 93 1.1 riastrad * 94 1.1 riastrad * @entity: scheduler entity 95 1.1 riastrad * 96 1.1 riastrad * Returns true if the entity does not have any unscheduled jobs. 97 1.1 riastrad */ 98 1.1 riastrad static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity) 99 1.1 riastrad { 100 1.3 riastrad assert_spin_locked(&entity->rq->sched->job_list_lock); 101 1.1 riastrad 102 1.1 riastrad if (list_empty(&entity->list) || 103 1.1 riastrad spsc_queue_count(&entity->job_queue) == 0) 104 1.1 riastrad return true; 105 1.1 riastrad 106 1.1 riastrad return false; 107 1.1 riastrad } 108 1.1 riastrad 109 1.1 riastrad /** 110 1.1 riastrad * drm_sched_entity_is_ready - Check if entity is ready 111 1.1 riastrad * 112 1.1 riastrad * @entity: scheduler entity 113 1.1 riastrad * 114 1.1 riastrad * Return true if entity could provide a job. 115 1.1 riastrad */ 116 1.1 riastrad bool drm_sched_entity_is_ready(struct drm_sched_entity *entity) 117 1.1 riastrad { 118 1.1 riastrad if (spsc_queue_peek(&entity->job_queue) == NULL) 119 1.1 riastrad return false; 120 1.1 riastrad 121 1.1 riastrad if (READ_ONCE(entity->dependency)) 122 1.1 riastrad return false; 123 1.1 riastrad 124 1.1 riastrad return true; 125 1.1 riastrad } 126 1.1 riastrad 127 1.1 riastrad /** 128 1.1 riastrad * drm_sched_entity_get_free_sched - Get the rq from rq_list with least load 129 1.1 riastrad * 130 1.1 riastrad * @entity: scheduler entity 131 1.1 riastrad * 132 1.1 riastrad * Return the pointer to the rq with least load. 133 1.1 riastrad */ 134 1.1 riastrad static struct drm_sched_rq * 135 1.1 riastrad drm_sched_entity_get_free_sched(struct drm_sched_entity *entity) 136 1.1 riastrad { 137 1.1 riastrad struct drm_sched_rq *rq = NULL; 138 1.1 riastrad unsigned int min_score = UINT_MAX, num_score; 139 1.1 riastrad int i; 140 1.1 riastrad 141 1.1 riastrad for (i = 0; i < entity->num_sched_list; ++i) { 142 1.1 riastrad struct drm_gpu_scheduler *sched = entity->sched_list[i]; 143 1.1 riastrad 144 1.1 riastrad if (!entity->sched_list[i]->ready) { 145 1.7 riastrad DRM_WARN("sched%s is not ready, skipping\n", sched->name); 146 1.1 riastrad continue; 147 1.1 riastrad } 148 1.1 riastrad 149 1.1 riastrad num_score = atomic_read(&sched->score); 150 1.1 riastrad if (num_score < min_score) { 151 1.1 riastrad min_score = num_score; 152 1.1 riastrad rq = &entity->sched_list[i]->sched_rq[entity->priority]; 153 1.1 riastrad } 154 1.1 riastrad } 155 1.1 riastrad 156 1.1 riastrad return rq; 157 1.1 riastrad } 158 1.1 riastrad 159 1.1 riastrad /** 160 1.1 riastrad * drm_sched_entity_flush - Flush a context entity 161 1.1 riastrad * 162 1.1 riastrad * @entity: scheduler entity 163 1.1 riastrad * @timeout: time to wait in for Q to become empty in jiffies. 164 1.1 riastrad * 165 1.1 riastrad * Splitting drm_sched_entity_fini() into two functions, The first one does the 166 1.1 riastrad * waiting, removes the entity from the runqueue and returns an error when the 167 1.1 riastrad * process was killed. 168 1.1 riastrad * 169 1.1 riastrad * Returns the remaining time in jiffies left from the input timeout 170 1.1 riastrad */ 171 1.1 riastrad long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout) 172 1.1 riastrad { 173 1.1 riastrad struct drm_gpu_scheduler *sched; 174 1.3 riastrad #ifdef __NetBSD__ 175 1.3 riastrad struct proc *last_user; 176 1.3 riastrad #else 177 1.1 riastrad struct task_struct *last_user; 178 1.3 riastrad #endif 179 1.1 riastrad long ret = timeout; 180 1.1 riastrad 181 1.1 riastrad if (!entity->rq) 182 1.1 riastrad return 0; 183 1.1 riastrad 184 1.1 riastrad sched = entity->rq->sched; 185 1.3 riastrad #ifdef __NetBSD__ 186 1.3 riastrad spin_lock(&sched->job_list_lock); 187 1.3 riastrad DRM_SPIN_WAIT_NOINTR_UNTIL(ret, &sched->job_scheduled, 188 1.3 riastrad &sched->job_list_lock, 189 1.3 riastrad drm_sched_entity_is_idle(entity)); 190 1.3 riastrad spin_unlock(&sched->job_list_lock); 191 1.3 riastrad #else 192 1.1 riastrad /** 193 1.1 riastrad * The client will not queue more IBs during this fini, consume existing 194 1.1 riastrad * queued IBs or discard them on SIGKILL 195 1.1 riastrad */ 196 1.1 riastrad if (current->flags & PF_EXITING) { 197 1.1 riastrad if (timeout) 198 1.1 riastrad ret = wait_event_timeout( 199 1.1 riastrad sched->job_scheduled, 200 1.1 riastrad drm_sched_entity_is_idle(entity), 201 1.1 riastrad timeout); 202 1.1 riastrad } else { 203 1.1 riastrad wait_event_killable(sched->job_scheduled, 204 1.1 riastrad drm_sched_entity_is_idle(entity)); 205 1.1 riastrad } 206 1.3 riastrad #endif 207 1.1 riastrad 208 1.1 riastrad /* For killed process disable any more IBs enqueue right now */ 209 1.3 riastrad #ifdef __NetBSD__ 210 1.3 riastrad last_user = cmpxchg(&entity->last_user, curproc, NULL); 211 1.3 riastrad if ((!last_user || last_user == curproc) && 212 1.3 riastrad (curproc->p_sflag & PS_WEXIT)) 213 1.3 riastrad #else 214 1.1 riastrad last_user = cmpxchg(&entity->last_user, current->group_leader, NULL); 215 1.1 riastrad if ((!last_user || last_user == current->group_leader) && 216 1.3 riastrad (current->flags & PF_EXITING) && (current->exit_code == SIGKILL)) 217 1.3 riastrad #endif 218 1.3 riastrad { 219 1.1 riastrad spin_lock(&entity->rq_lock); 220 1.1 riastrad entity->stopped = true; 221 1.1 riastrad drm_sched_rq_remove_entity(entity->rq, entity); 222 1.1 riastrad spin_unlock(&entity->rq_lock); 223 1.1 riastrad } 224 1.1 riastrad 225 1.1 riastrad return ret; 226 1.1 riastrad } 227 1.1 riastrad EXPORT_SYMBOL(drm_sched_entity_flush); 228 1.1 riastrad 229 1.1 riastrad /** 230 1.1 riastrad * drm_sched_entity_kill_jobs - helper for drm_sched_entity_kill_jobs 231 1.1 riastrad * 232 1.1 riastrad * @f: signaled fence 233 1.1 riastrad * @cb: our callback structure 234 1.1 riastrad * 235 1.1 riastrad * Signal the scheduler finished fence when the entity in question is killed. 236 1.1 riastrad */ 237 1.1 riastrad static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, 238 1.1 riastrad struct dma_fence_cb *cb) 239 1.1 riastrad { 240 1.1 riastrad struct drm_sched_job *job = container_of(cb, struct drm_sched_job, 241 1.1 riastrad finish_cb); 242 1.1 riastrad 243 1.1 riastrad drm_sched_fence_finished(job->s_fence); 244 1.1 riastrad WARN_ON(job->s_fence->parent); 245 1.1 riastrad job->sched->ops->free_job(job); 246 1.1 riastrad } 247 1.1 riastrad 248 1.1 riastrad /** 249 1.1 riastrad * drm_sched_entity_kill_jobs - Make sure all remaining jobs are killed 250 1.1 riastrad * 251 1.1 riastrad * @entity: entity which is cleaned up 252 1.1 riastrad * 253 1.1 riastrad * Makes sure that all remaining jobs in an entity are killed before it is 254 1.1 riastrad * destroyed. 255 1.1 riastrad */ 256 1.1 riastrad static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity) 257 1.1 riastrad { 258 1.1 riastrad struct drm_sched_job *job; 259 1.1 riastrad int r; 260 1.1 riastrad 261 1.1 riastrad while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) { 262 1.1 riastrad struct drm_sched_fence *s_fence = job->s_fence; 263 1.1 riastrad 264 1.1 riastrad drm_sched_fence_scheduled(s_fence); 265 1.1 riastrad dma_fence_set_error(&s_fence->finished, -ESRCH); 266 1.1 riastrad 267 1.1 riastrad /* 268 1.1 riastrad * When pipe is hanged by older entity, new entity might 269 1.1 riastrad * not even have chance to submit it's first job to HW 270 1.1 riastrad * and so entity->last_scheduled will remain NULL 271 1.1 riastrad */ 272 1.1 riastrad if (!entity->last_scheduled) { 273 1.1 riastrad drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb); 274 1.1 riastrad continue; 275 1.1 riastrad } 276 1.1 riastrad 277 1.1 riastrad r = dma_fence_add_callback(entity->last_scheduled, 278 1.1 riastrad &job->finish_cb, 279 1.1 riastrad drm_sched_entity_kill_jobs_cb); 280 1.1 riastrad if (r == -ENOENT) 281 1.1 riastrad drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb); 282 1.1 riastrad else if (r) 283 1.1 riastrad DRM_ERROR("fence add callback failed (%d)\n", r); 284 1.1 riastrad } 285 1.1 riastrad } 286 1.1 riastrad 287 1.1 riastrad /** 288 1.1 riastrad * drm_sched_entity_cleanup - Destroy a context entity 289 1.1 riastrad * 290 1.1 riastrad * @entity: scheduler entity 291 1.1 riastrad * 292 1.1 riastrad * This should be called after @drm_sched_entity_do_release. It goes over the 293 1.1 riastrad * entity and signals all jobs with an error code if the process was killed. 294 1.1 riastrad * 295 1.1 riastrad */ 296 1.1 riastrad void drm_sched_entity_fini(struct drm_sched_entity *entity) 297 1.1 riastrad { 298 1.1 riastrad struct drm_gpu_scheduler *sched = NULL; 299 1.1 riastrad 300 1.1 riastrad if (entity->rq) { 301 1.1 riastrad sched = entity->rq->sched; 302 1.1 riastrad drm_sched_rq_remove_entity(entity->rq, entity); 303 1.1 riastrad } 304 1.1 riastrad 305 1.4 riastrad spin_lock_destroy(&entity->rq_lock); 306 1.4 riastrad 307 1.1 riastrad /* Consumption of existing IBs wasn't completed. Forcefully 308 1.1 riastrad * remove them here. 309 1.1 riastrad */ 310 1.1 riastrad if (spsc_queue_count(&entity->job_queue)) { 311 1.1 riastrad if (sched) { 312 1.1 riastrad /* 313 1.1 riastrad * Wait for thread to idle to make sure it isn't processing 314 1.1 riastrad * this entity. 315 1.1 riastrad */ 316 1.1 riastrad wait_for_completion(&entity->entity_idle); 317 1.1 riastrad 318 1.1 riastrad } 319 1.1 riastrad if (entity->dependency) { 320 1.1 riastrad dma_fence_remove_callback(entity->dependency, 321 1.1 riastrad &entity->cb); 322 1.1 riastrad dma_fence_put(entity->dependency); 323 1.1 riastrad entity->dependency = NULL; 324 1.1 riastrad } 325 1.1 riastrad 326 1.1 riastrad drm_sched_entity_kill_jobs(entity); 327 1.1 riastrad } 328 1.1 riastrad 329 1.4 riastrad destroy_completion(&entity->entity_idle); 330 1.4 riastrad 331 1.1 riastrad dma_fence_put(entity->last_scheduled); 332 1.1 riastrad entity->last_scheduled = NULL; 333 1.1 riastrad } 334 1.1 riastrad EXPORT_SYMBOL(drm_sched_entity_fini); 335 1.1 riastrad 336 1.1 riastrad /** 337 1.1 riastrad * drm_sched_entity_fini - Destroy a context entity 338 1.1 riastrad * 339 1.1 riastrad * @entity: scheduler entity 340 1.1 riastrad * 341 1.1 riastrad * Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup() 342 1.1 riastrad */ 343 1.1 riastrad void drm_sched_entity_destroy(struct drm_sched_entity *entity) 344 1.1 riastrad { 345 1.1 riastrad drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY); 346 1.1 riastrad drm_sched_entity_fini(entity); 347 1.1 riastrad } 348 1.1 riastrad EXPORT_SYMBOL(drm_sched_entity_destroy); 349 1.1 riastrad 350 1.1 riastrad /** 351 1.1 riastrad * drm_sched_entity_clear_dep - callback to clear the entities dependency 352 1.1 riastrad */ 353 1.1 riastrad static void drm_sched_entity_clear_dep(struct dma_fence *f, 354 1.1 riastrad struct dma_fence_cb *cb) 355 1.1 riastrad { 356 1.1 riastrad struct drm_sched_entity *entity = 357 1.1 riastrad container_of(cb, struct drm_sched_entity, cb); 358 1.1 riastrad 359 1.1 riastrad entity->dependency = NULL; 360 1.1 riastrad dma_fence_put(f); 361 1.1 riastrad } 362 1.1 riastrad 363 1.1 riastrad /** 364 1.1 riastrad * drm_sched_entity_clear_dep - callback to clear the entities dependency and 365 1.1 riastrad * wake up scheduler 366 1.1 riastrad */ 367 1.1 riastrad static void drm_sched_entity_wakeup(struct dma_fence *f, 368 1.1 riastrad struct dma_fence_cb *cb) 369 1.1 riastrad { 370 1.1 riastrad struct drm_sched_entity *entity = 371 1.1 riastrad container_of(cb, struct drm_sched_entity, cb); 372 1.1 riastrad 373 1.1 riastrad drm_sched_entity_clear_dep(f, cb); 374 1.3 riastrad spin_lock(&entity->rq->sched->job_list_lock); 375 1.1 riastrad drm_sched_wakeup(entity->rq->sched); 376 1.3 riastrad spin_unlock(&entity->rq->sched->job_list_lock); 377 1.1 riastrad } 378 1.1 riastrad 379 1.1 riastrad /** 380 1.1 riastrad * drm_sched_entity_set_priority - Sets priority of the entity 381 1.1 riastrad * 382 1.1 riastrad * @entity: scheduler entity 383 1.1 riastrad * @priority: scheduler priority 384 1.1 riastrad * 385 1.1 riastrad * Update the priority of runqueus used for the entity. 386 1.1 riastrad */ 387 1.1 riastrad void drm_sched_entity_set_priority(struct drm_sched_entity *entity, 388 1.1 riastrad enum drm_sched_priority priority) 389 1.1 riastrad { 390 1.1 riastrad spin_lock(&entity->rq_lock); 391 1.1 riastrad entity->priority = priority; 392 1.1 riastrad spin_unlock(&entity->rq_lock); 393 1.1 riastrad } 394 1.1 riastrad EXPORT_SYMBOL(drm_sched_entity_set_priority); 395 1.1 riastrad 396 1.1 riastrad /** 397 1.1 riastrad * drm_sched_entity_add_dependency_cb - add callback for the entities dependency 398 1.1 riastrad * 399 1.1 riastrad * @entity: entity with dependency 400 1.1 riastrad * 401 1.1 riastrad * Add a callback to the current dependency of the entity to wake up the 402 1.1 riastrad * scheduler when the entity becomes available. 403 1.1 riastrad */ 404 1.1 riastrad static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity) 405 1.1 riastrad { 406 1.1 riastrad struct drm_gpu_scheduler *sched = entity->rq->sched; 407 1.1 riastrad struct dma_fence *fence = entity->dependency; 408 1.1 riastrad struct drm_sched_fence *s_fence; 409 1.1 riastrad 410 1.1 riastrad if (fence->context == entity->fence_context || 411 1.1 riastrad fence->context == entity->fence_context + 1) { 412 1.1 riastrad /* 413 1.1 riastrad * Fence is a scheduled/finished fence from a job 414 1.1 riastrad * which belongs to the same entity, we can ignore 415 1.1 riastrad * fences from ourself 416 1.1 riastrad */ 417 1.1 riastrad dma_fence_put(entity->dependency); 418 1.1 riastrad return false; 419 1.1 riastrad } 420 1.1 riastrad 421 1.1 riastrad s_fence = to_drm_sched_fence(fence); 422 1.1 riastrad if (s_fence && s_fence->sched == sched) { 423 1.1 riastrad 424 1.1 riastrad /* 425 1.1 riastrad * Fence is from the same scheduler, only need to wait for 426 1.1 riastrad * it to be scheduled 427 1.1 riastrad */ 428 1.1 riastrad fence = dma_fence_get(&s_fence->scheduled); 429 1.1 riastrad dma_fence_put(entity->dependency); 430 1.1 riastrad entity->dependency = fence; 431 1.1 riastrad if (!dma_fence_add_callback(fence, &entity->cb, 432 1.1 riastrad drm_sched_entity_clear_dep)) 433 1.1 riastrad return true; 434 1.1 riastrad 435 1.1 riastrad /* Ignore it when it is already scheduled */ 436 1.1 riastrad dma_fence_put(fence); 437 1.1 riastrad return false; 438 1.1 riastrad } 439 1.1 riastrad 440 1.1 riastrad if (!dma_fence_add_callback(entity->dependency, &entity->cb, 441 1.1 riastrad drm_sched_entity_wakeup)) 442 1.1 riastrad return true; 443 1.1 riastrad 444 1.1 riastrad dma_fence_put(entity->dependency); 445 1.1 riastrad return false; 446 1.1 riastrad } 447 1.1 riastrad 448 1.1 riastrad /** 449 1.1 riastrad * drm_sched_entity_pop_job - get a ready to be scheduled job from the entity 450 1.1 riastrad * 451 1.1 riastrad * @entity: entity to get the job from 452 1.1 riastrad * 453 1.1 riastrad * Process all dependencies and try to get one job from the entities queue. 454 1.1 riastrad */ 455 1.1 riastrad struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity) 456 1.1 riastrad { 457 1.1 riastrad struct drm_gpu_scheduler *sched = entity->rq->sched; 458 1.1 riastrad struct drm_sched_job *sched_job; 459 1.1 riastrad 460 1.1 riastrad sched_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue)); 461 1.1 riastrad if (!sched_job) 462 1.1 riastrad return NULL; 463 1.1 riastrad 464 1.1 riastrad while ((entity->dependency = 465 1.1 riastrad sched->ops->dependency(sched_job, entity))) { 466 1.1 riastrad trace_drm_sched_job_wait_dep(sched_job, entity->dependency); 467 1.1 riastrad 468 1.1 riastrad if (drm_sched_entity_add_dependency_cb(entity)) 469 1.1 riastrad return NULL; 470 1.1 riastrad } 471 1.1 riastrad 472 1.1 riastrad /* skip jobs from entity that marked guilty */ 473 1.1 riastrad if (entity->guilty && atomic_read(entity->guilty)) 474 1.1 riastrad dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED); 475 1.1 riastrad 476 1.1 riastrad dma_fence_put(entity->last_scheduled); 477 1.1 riastrad entity->last_scheduled = dma_fence_get(&sched_job->s_fence->finished); 478 1.1 riastrad 479 1.1 riastrad spsc_queue_pop(&entity->job_queue); 480 1.1 riastrad return sched_job; 481 1.1 riastrad } 482 1.1 riastrad 483 1.1 riastrad /** 484 1.1 riastrad * drm_sched_entity_select_rq - select a new rq for the entity 485 1.1 riastrad * 486 1.1 riastrad * @entity: scheduler entity 487 1.1 riastrad * 488 1.1 riastrad * Check all prerequisites and select a new rq for the entity for load 489 1.1 riastrad * balancing. 490 1.1 riastrad */ 491 1.1 riastrad void drm_sched_entity_select_rq(struct drm_sched_entity *entity) 492 1.1 riastrad { 493 1.1 riastrad struct dma_fence *fence; 494 1.1 riastrad struct drm_sched_rq *rq; 495 1.1 riastrad 496 1.1 riastrad if (spsc_queue_count(&entity->job_queue) || entity->num_sched_list <= 1) 497 1.1 riastrad return; 498 1.1 riastrad 499 1.1 riastrad fence = READ_ONCE(entity->last_scheduled); 500 1.1 riastrad if (fence && !dma_fence_is_signaled(fence)) 501 1.1 riastrad return; 502 1.1 riastrad 503 1.1 riastrad spin_lock(&entity->rq_lock); 504 1.1 riastrad rq = drm_sched_entity_get_free_sched(entity); 505 1.1 riastrad if (rq != entity->rq) { 506 1.1 riastrad drm_sched_rq_remove_entity(entity->rq, entity); 507 1.1 riastrad entity->rq = rq; 508 1.1 riastrad } 509 1.1 riastrad 510 1.1 riastrad spin_unlock(&entity->rq_lock); 511 1.1 riastrad } 512 1.1 riastrad 513 1.1 riastrad /** 514 1.1 riastrad * drm_sched_entity_push_job - Submit a job to the entity's job queue 515 1.1 riastrad * 516 1.1 riastrad * @sched_job: job to submit 517 1.1 riastrad * @entity: scheduler entity 518 1.1 riastrad * 519 1.1 riastrad * Note: To guarantee that the order of insertion to queue matches 520 1.1 riastrad * the job's fence sequence number this function should be 521 1.1 riastrad * called with drm_sched_job_init under common lock. 522 1.1 riastrad * 523 1.1 riastrad * Returns 0 for success, negative error code otherwise. 524 1.1 riastrad */ 525 1.1 riastrad void drm_sched_entity_push_job(struct drm_sched_job *sched_job, 526 1.1 riastrad struct drm_sched_entity *entity) 527 1.1 riastrad { 528 1.1 riastrad bool first; 529 1.1 riastrad 530 1.1 riastrad trace_drm_sched_job(sched_job, entity); 531 1.1 riastrad atomic_inc(&entity->rq->sched->score); 532 1.3 riastrad #ifdef __NetBSD__ 533 1.3 riastrad WRITE_ONCE(entity->last_user, curproc); 534 1.3 riastrad #else 535 1.1 riastrad WRITE_ONCE(entity->last_user, current->group_leader); 536 1.3 riastrad #endif 537 1.1 riastrad first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node); 538 1.1 riastrad 539 1.1 riastrad /* first job wakes up scheduler */ 540 1.1 riastrad if (first) { 541 1.1 riastrad /* Add the entity to the run queue */ 542 1.1 riastrad spin_lock(&entity->rq_lock); 543 1.1 riastrad if (entity->stopped) { 544 1.1 riastrad spin_unlock(&entity->rq_lock); 545 1.1 riastrad 546 1.1 riastrad DRM_ERROR("Trying to push to a killed entity\n"); 547 1.1 riastrad return; 548 1.1 riastrad } 549 1.1 riastrad drm_sched_rq_add_entity(entity->rq, entity); 550 1.1 riastrad spin_unlock(&entity->rq_lock); 551 1.3 riastrad spin_lock(&entity->rq->sched->job_list_lock); 552 1.1 riastrad drm_sched_wakeup(entity->rq->sched); 553 1.3 riastrad spin_unlock(&entity->rq->sched->job_list_lock); 554 1.1 riastrad } 555 1.1 riastrad } 556 1.1 riastrad EXPORT_SYMBOL(drm_sched_entity_push_job); 557