sched_entity.c revision 1.1.1.1 1 /* $NetBSD: sched_entity.c,v 1.1.1.1 2021/12/18 20:15:53 riastradh Exp $ */
2
3 /*
4 * Copyright 2015 Advanced Micro Devices, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 */
25
26 #include <sys/cdefs.h>
27 __KERNEL_RCSID(0, "$NetBSD: sched_entity.c,v 1.1.1.1 2021/12/18 20:15:53 riastradh Exp $");
28
29 #include <linux/kthread.h>
30 #include <linux/slab.h>
31 #include <linux/completion.h>
32
33 #include <drm/drm_print.h>
34 #include <drm/gpu_scheduler.h>
35
36 #include "gpu_scheduler_trace.h"
37
38 #define to_drm_sched_job(sched_job) \
39 container_of((sched_job), struct drm_sched_job, queue_node)
40
41 /**
42 * drm_sched_entity_init - Init a context entity used by scheduler when
43 * submit to HW ring.
44 *
45 * @entity: scheduler entity to init
46 * @priority: priority of the entity
47 * @sched_list: the list of drm scheds on which jobs from this
48 * entity can be submitted
49 * @num_sched_list: number of drm sched in sched_list
50 * @guilty: atomic_t set to 1 when a job on this queue
51 * is found to be guilty causing a timeout
52 *
53 * Note: the sched_list should have at least one element to schedule
54 * the entity
55 *
56 * Returns 0 on success or a negative error code on failure.
57 */
58 int drm_sched_entity_init(struct drm_sched_entity *entity,
59 enum drm_sched_priority priority,
60 struct drm_gpu_scheduler **sched_list,
61 unsigned int num_sched_list,
62 atomic_t *guilty)
63 {
64 if (!(entity && sched_list && (num_sched_list == 0 || sched_list[0])))
65 return -EINVAL;
66
67 memset(entity, 0, sizeof(struct drm_sched_entity));
68 INIT_LIST_HEAD(&entity->list);
69 entity->rq = NULL;
70 entity->guilty = guilty;
71 entity->num_sched_list = num_sched_list;
72 entity->priority = priority;
73 entity->sched_list = num_sched_list > 1 ? sched_list : NULL;
74 entity->last_scheduled = NULL;
75
76 if(num_sched_list)
77 entity->rq = &sched_list[0]->sched_rq[entity->priority];
78
79 init_completion(&entity->entity_idle);
80
81 spin_lock_init(&entity->rq_lock);
82 spsc_queue_init(&entity->job_queue);
83
84 atomic_set(&entity->fence_seq, 0);
85 entity->fence_context = dma_fence_context_alloc(2);
86
87 return 0;
88 }
89 EXPORT_SYMBOL(drm_sched_entity_init);
90
91 /**
92 * drm_sched_entity_is_idle - Check if entity is idle
93 *
94 * @entity: scheduler entity
95 *
96 * Returns true if the entity does not have any unscheduled jobs.
97 */
98 static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
99 {
100 rmb(); /* for list_empty to work without lock */
101
102 if (list_empty(&entity->list) ||
103 spsc_queue_count(&entity->job_queue) == 0)
104 return true;
105
106 return false;
107 }
108
109 /**
110 * drm_sched_entity_is_ready - Check if entity is ready
111 *
112 * @entity: scheduler entity
113 *
114 * Return true if entity could provide a job.
115 */
116 bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
117 {
118 if (spsc_queue_peek(&entity->job_queue) == NULL)
119 return false;
120
121 if (READ_ONCE(entity->dependency))
122 return false;
123
124 return true;
125 }
126
127 /**
128 * drm_sched_entity_get_free_sched - Get the rq from rq_list with least load
129 *
130 * @entity: scheduler entity
131 *
132 * Return the pointer to the rq with least load.
133 */
134 static struct drm_sched_rq *
135 drm_sched_entity_get_free_sched(struct drm_sched_entity *entity)
136 {
137 struct drm_sched_rq *rq = NULL;
138 unsigned int min_score = UINT_MAX, num_score;
139 int i;
140
141 for (i = 0; i < entity->num_sched_list; ++i) {
142 struct drm_gpu_scheduler *sched = entity->sched_list[i];
143
144 if (!entity->sched_list[i]->ready) {
145 DRM_WARN("sched%s is not ready, skipping", sched->name);
146 continue;
147 }
148
149 num_score = atomic_read(&sched->score);
150 if (num_score < min_score) {
151 min_score = num_score;
152 rq = &entity->sched_list[i]->sched_rq[entity->priority];
153 }
154 }
155
156 return rq;
157 }
158
159 /**
160 * drm_sched_entity_flush - Flush a context entity
161 *
162 * @entity: scheduler entity
163 * @timeout: time to wait in for Q to become empty in jiffies.
164 *
165 * Splitting drm_sched_entity_fini() into two functions, The first one does the
166 * waiting, removes the entity from the runqueue and returns an error when the
167 * process was killed.
168 *
169 * Returns the remaining time in jiffies left from the input timeout
170 */
171 long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
172 {
173 struct drm_gpu_scheduler *sched;
174 struct task_struct *last_user;
175 long ret = timeout;
176
177 if (!entity->rq)
178 return 0;
179
180 sched = entity->rq->sched;
181 /**
182 * The client will not queue more IBs during this fini, consume existing
183 * queued IBs or discard them on SIGKILL
184 */
185 if (current->flags & PF_EXITING) {
186 if (timeout)
187 ret = wait_event_timeout(
188 sched->job_scheduled,
189 drm_sched_entity_is_idle(entity),
190 timeout);
191 } else {
192 wait_event_killable(sched->job_scheduled,
193 drm_sched_entity_is_idle(entity));
194 }
195
196 /* For killed process disable any more IBs enqueue right now */
197 last_user = cmpxchg(&entity->last_user, current->group_leader, NULL);
198 if ((!last_user || last_user == current->group_leader) &&
199 (current->flags & PF_EXITING) && (current->exit_code == SIGKILL)) {
200 spin_lock(&entity->rq_lock);
201 entity->stopped = true;
202 drm_sched_rq_remove_entity(entity->rq, entity);
203 spin_unlock(&entity->rq_lock);
204 }
205
206 return ret;
207 }
208 EXPORT_SYMBOL(drm_sched_entity_flush);
209
210 /**
211 * drm_sched_entity_kill_jobs - helper for drm_sched_entity_kill_jobs
212 *
213 * @f: signaled fence
214 * @cb: our callback structure
215 *
216 * Signal the scheduler finished fence when the entity in question is killed.
217 */
218 static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
219 struct dma_fence_cb *cb)
220 {
221 struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
222 finish_cb);
223
224 drm_sched_fence_finished(job->s_fence);
225 WARN_ON(job->s_fence->parent);
226 job->sched->ops->free_job(job);
227 }
228
229 /**
230 * drm_sched_entity_kill_jobs - Make sure all remaining jobs are killed
231 *
232 * @entity: entity which is cleaned up
233 *
234 * Makes sure that all remaining jobs in an entity are killed before it is
235 * destroyed.
236 */
237 static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity)
238 {
239 struct drm_sched_job *job;
240 int r;
241
242 while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) {
243 struct drm_sched_fence *s_fence = job->s_fence;
244
245 drm_sched_fence_scheduled(s_fence);
246 dma_fence_set_error(&s_fence->finished, -ESRCH);
247
248 /*
249 * When pipe is hanged by older entity, new entity might
250 * not even have chance to submit it's first job to HW
251 * and so entity->last_scheduled will remain NULL
252 */
253 if (!entity->last_scheduled) {
254 drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
255 continue;
256 }
257
258 r = dma_fence_add_callback(entity->last_scheduled,
259 &job->finish_cb,
260 drm_sched_entity_kill_jobs_cb);
261 if (r == -ENOENT)
262 drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
263 else if (r)
264 DRM_ERROR("fence add callback failed (%d)\n", r);
265 }
266 }
267
268 /**
269 * drm_sched_entity_cleanup - Destroy a context entity
270 *
271 * @entity: scheduler entity
272 *
273 * This should be called after @drm_sched_entity_do_release. It goes over the
274 * entity and signals all jobs with an error code if the process was killed.
275 *
276 */
277 void drm_sched_entity_fini(struct drm_sched_entity *entity)
278 {
279 struct drm_gpu_scheduler *sched = NULL;
280
281 if (entity->rq) {
282 sched = entity->rq->sched;
283 drm_sched_rq_remove_entity(entity->rq, entity);
284 }
285
286 /* Consumption of existing IBs wasn't completed. Forcefully
287 * remove them here.
288 */
289 if (spsc_queue_count(&entity->job_queue)) {
290 if (sched) {
291 /*
292 * Wait for thread to idle to make sure it isn't processing
293 * this entity.
294 */
295 wait_for_completion(&entity->entity_idle);
296
297 }
298 if (entity->dependency) {
299 dma_fence_remove_callback(entity->dependency,
300 &entity->cb);
301 dma_fence_put(entity->dependency);
302 entity->dependency = NULL;
303 }
304
305 drm_sched_entity_kill_jobs(entity);
306 }
307
308 dma_fence_put(entity->last_scheduled);
309 entity->last_scheduled = NULL;
310 }
311 EXPORT_SYMBOL(drm_sched_entity_fini);
312
313 /**
314 * drm_sched_entity_fini - Destroy a context entity
315 *
316 * @entity: scheduler entity
317 *
318 * Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup()
319 */
320 void drm_sched_entity_destroy(struct drm_sched_entity *entity)
321 {
322 drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
323 drm_sched_entity_fini(entity);
324 }
325 EXPORT_SYMBOL(drm_sched_entity_destroy);
326
327 /**
328 * drm_sched_entity_clear_dep - callback to clear the entities dependency
329 */
330 static void drm_sched_entity_clear_dep(struct dma_fence *f,
331 struct dma_fence_cb *cb)
332 {
333 struct drm_sched_entity *entity =
334 container_of(cb, struct drm_sched_entity, cb);
335
336 entity->dependency = NULL;
337 dma_fence_put(f);
338 }
339
340 /**
341 * drm_sched_entity_clear_dep - callback to clear the entities dependency and
342 * wake up scheduler
343 */
344 static void drm_sched_entity_wakeup(struct dma_fence *f,
345 struct dma_fence_cb *cb)
346 {
347 struct drm_sched_entity *entity =
348 container_of(cb, struct drm_sched_entity, cb);
349
350 drm_sched_entity_clear_dep(f, cb);
351 drm_sched_wakeup(entity->rq->sched);
352 }
353
354 /**
355 * drm_sched_entity_set_priority - Sets priority of the entity
356 *
357 * @entity: scheduler entity
358 * @priority: scheduler priority
359 *
360 * Update the priority of runqueus used for the entity.
361 */
362 void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
363 enum drm_sched_priority priority)
364 {
365 spin_lock(&entity->rq_lock);
366 entity->priority = priority;
367 spin_unlock(&entity->rq_lock);
368 }
369 EXPORT_SYMBOL(drm_sched_entity_set_priority);
370
371 /**
372 * drm_sched_entity_add_dependency_cb - add callback for the entities dependency
373 *
374 * @entity: entity with dependency
375 *
376 * Add a callback to the current dependency of the entity to wake up the
377 * scheduler when the entity becomes available.
378 */
379 static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
380 {
381 struct drm_gpu_scheduler *sched = entity->rq->sched;
382 struct dma_fence *fence = entity->dependency;
383 struct drm_sched_fence *s_fence;
384
385 if (fence->context == entity->fence_context ||
386 fence->context == entity->fence_context + 1) {
387 /*
388 * Fence is a scheduled/finished fence from a job
389 * which belongs to the same entity, we can ignore
390 * fences from ourself
391 */
392 dma_fence_put(entity->dependency);
393 return false;
394 }
395
396 s_fence = to_drm_sched_fence(fence);
397 if (s_fence && s_fence->sched == sched) {
398
399 /*
400 * Fence is from the same scheduler, only need to wait for
401 * it to be scheduled
402 */
403 fence = dma_fence_get(&s_fence->scheduled);
404 dma_fence_put(entity->dependency);
405 entity->dependency = fence;
406 if (!dma_fence_add_callback(fence, &entity->cb,
407 drm_sched_entity_clear_dep))
408 return true;
409
410 /* Ignore it when it is already scheduled */
411 dma_fence_put(fence);
412 return false;
413 }
414
415 if (!dma_fence_add_callback(entity->dependency, &entity->cb,
416 drm_sched_entity_wakeup))
417 return true;
418
419 dma_fence_put(entity->dependency);
420 return false;
421 }
422
423 /**
424 * drm_sched_entity_pop_job - get a ready to be scheduled job from the entity
425 *
426 * @entity: entity to get the job from
427 *
428 * Process all dependencies and try to get one job from the entities queue.
429 */
430 struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
431 {
432 struct drm_gpu_scheduler *sched = entity->rq->sched;
433 struct drm_sched_job *sched_job;
434
435 sched_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
436 if (!sched_job)
437 return NULL;
438
439 while ((entity->dependency =
440 sched->ops->dependency(sched_job, entity))) {
441 trace_drm_sched_job_wait_dep(sched_job, entity->dependency);
442
443 if (drm_sched_entity_add_dependency_cb(entity))
444 return NULL;
445 }
446
447 /* skip jobs from entity that marked guilty */
448 if (entity->guilty && atomic_read(entity->guilty))
449 dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED);
450
451 dma_fence_put(entity->last_scheduled);
452 entity->last_scheduled = dma_fence_get(&sched_job->s_fence->finished);
453
454 spsc_queue_pop(&entity->job_queue);
455 return sched_job;
456 }
457
458 /**
459 * drm_sched_entity_select_rq - select a new rq for the entity
460 *
461 * @entity: scheduler entity
462 *
463 * Check all prerequisites and select a new rq for the entity for load
464 * balancing.
465 */
466 void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
467 {
468 struct dma_fence *fence;
469 struct drm_sched_rq *rq;
470
471 if (spsc_queue_count(&entity->job_queue) || entity->num_sched_list <= 1)
472 return;
473
474 fence = READ_ONCE(entity->last_scheduled);
475 if (fence && !dma_fence_is_signaled(fence))
476 return;
477
478 spin_lock(&entity->rq_lock);
479 rq = drm_sched_entity_get_free_sched(entity);
480 if (rq != entity->rq) {
481 drm_sched_rq_remove_entity(entity->rq, entity);
482 entity->rq = rq;
483 }
484
485 spin_unlock(&entity->rq_lock);
486 }
487
488 /**
489 * drm_sched_entity_push_job - Submit a job to the entity's job queue
490 *
491 * @sched_job: job to submit
492 * @entity: scheduler entity
493 *
494 * Note: To guarantee that the order of insertion to queue matches
495 * the job's fence sequence number this function should be
496 * called with drm_sched_job_init under common lock.
497 *
498 * Returns 0 for success, negative error code otherwise.
499 */
500 void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
501 struct drm_sched_entity *entity)
502 {
503 bool first;
504
505 trace_drm_sched_job(sched_job, entity);
506 atomic_inc(&entity->rq->sched->score);
507 WRITE_ONCE(entity->last_user, current->group_leader);
508 first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
509
510 /* first job wakes up scheduler */
511 if (first) {
512 /* Add the entity to the run queue */
513 spin_lock(&entity->rq_lock);
514 if (entity->stopped) {
515 spin_unlock(&entity->rq_lock);
516
517 DRM_ERROR("Trying to push to a killed entity\n");
518 return;
519 }
520 drm_sched_rq_add_entity(entity->rq, entity);
521 spin_unlock(&entity->rq_lock);
522 drm_sched_wakeup(entity->rq->sched);
523 }
524 }
525 EXPORT_SYMBOL(drm_sched_entity_push_job);
526