Home | History | Annotate | Line # | Download | only in amdgpu
      1 /*	$NetBSD: amdgpu_job.c,v 1.2 2021/12/18 23:44:58 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright 2015 Advanced Micro Devices, Inc.
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the "Software"),
      8  * to deal in the Software without restriction, including without limitation
      9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     10  * and/or sell copies of the Software, and to permit persons to whom the
     11  * Software is furnished to do so, subject to the following conditions:
     12  *
     13  * The above copyright notice and this permission notice shall be included in
     14  * all copies or substantial portions of the Software.
     15  *
     16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     22  * OTHER DEALINGS IN THE SOFTWARE.
     23  *
     24  *
     25  */
     26 #include <sys/cdefs.h>
     27 __KERNEL_RCSID(0, "$NetBSD: amdgpu_job.c,v 1.2 2021/12/18 23:44:58 riastradh Exp $");
     28 
     29 #include <linux/kthread.h>
     30 #include <linux/wait.h>
     31 #include <linux/sched.h>
     32 
     33 #include "amdgpu.h"
     34 #include "amdgpu_trace.h"
     35 
     36 static void amdgpu_job_timedout(struct drm_sched_job *s_job)
     37 {
     38 	struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
     39 	struct amdgpu_job *job = to_amdgpu_job(s_job);
     40 	struct amdgpu_task_info ti;
     41 
     42 	memset(&ti, 0, sizeof(struct amdgpu_task_info));
     43 
     44 	if (amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
     45 		DRM_ERROR("ring %s timeout, but soft recovered\n",
     46 			  s_job->sched->name);
     47 		return;
     48 	}
     49 
     50 	amdgpu_vm_get_task_info(ring->adev, job->pasid, &ti);
     51 	DRM_ERROR("ring %s timeout, signaled seq=%u, emitted seq=%u\n",
     52 		  job->base.sched->name, atomic_read(&ring->fence_drv.last_seq),
     53 		  ring->fence_drv.sync_seq);
     54 	DRM_ERROR("Process information: process %s pid %d thread %s pid %d\n",
     55 		  ti.process_name, ti.tgid, ti.task_name, ti.pid);
     56 
     57 	if (amdgpu_device_should_recover_gpu(ring->adev))
     58 		amdgpu_device_gpu_recover(ring->adev, job);
     59 	else
     60 		drm_sched_suspend_timeout(&ring->sched);
     61 }
     62 
     63 int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
     64 		     struct amdgpu_job **job, struct amdgpu_vm *vm)
     65 {
     66 	size_t size = sizeof(struct amdgpu_job);
     67 
     68 	if (num_ibs == 0)
     69 		return -EINVAL;
     70 
     71 	size += sizeof(struct amdgpu_ib) * num_ibs;
     72 
     73 	*job = kzalloc(size, GFP_KERNEL);
     74 	if (!*job)
     75 		return -ENOMEM;
     76 
     77 	/*
     78 	 * Initialize the scheduler to at least some ring so that we always
     79 	 * have a pointer to adev.
     80 	 */
     81 	(*job)->base.sched = &adev->rings[0]->sched;
     82 	(*job)->vm = vm;
     83 	(*job)->ibs = (void *)&(*job)[1];
     84 	(*job)->num_ibs = num_ibs;
     85 
     86 	amdgpu_sync_create(&(*job)->sync);
     87 	amdgpu_sync_create(&(*job)->sched_sync);
     88 	(*job)->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
     89 	(*job)->vm_pd_addr = AMDGPU_BO_INVALID_OFFSET;
     90 
     91 	return 0;
     92 }
     93 
     94 int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
     95 			     struct amdgpu_job **job)
     96 {
     97 	int r;
     98 
     99 	r = amdgpu_job_alloc(adev, 1, job, NULL);
    100 	if (r)
    101 		return r;
    102 
    103 	r = amdgpu_ib_get(adev, NULL, size, &(*job)->ibs[0]);
    104 	if (r)
    105 		kfree(*job);
    106 
    107 	return r;
    108 }
    109 
    110 void amdgpu_job_free_resources(struct amdgpu_job *job)
    111 {
    112 	struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
    113 	struct dma_fence *f;
    114 	unsigned i;
    115 
    116 	/* use sched fence if available */
    117 	f = job->base.s_fence ? &job->base.s_fence->finished : job->fence;
    118 
    119 	for (i = 0; i < job->num_ibs; ++i)
    120 		amdgpu_ib_free(ring->adev, &job->ibs[i], f);
    121 }
    122 
    123 static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
    124 {
    125 	struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
    126 	struct amdgpu_job *job = to_amdgpu_job(s_job);
    127 
    128 	drm_sched_job_cleanup(s_job);
    129 
    130 	amdgpu_ring_priority_put(ring, s_job->s_priority);
    131 	dma_fence_put(job->fence);
    132 	amdgpu_sync_free(&job->sync);
    133 	amdgpu_sync_free(&job->sched_sync);
    134 	kfree(job);
    135 }
    136 
    137 void amdgpu_job_free(struct amdgpu_job *job)
    138 {
    139 	amdgpu_job_free_resources(job);
    140 
    141 	dma_fence_put(job->fence);
    142 	amdgpu_sync_free(&job->sync);
    143 	amdgpu_sync_free(&job->sched_sync);
    144 	kfree(job);
    145 }
    146 
    147 int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
    148 		      void *owner, struct dma_fence **f)
    149 {
    150 	enum drm_sched_priority priority;
    151 	struct amdgpu_ring *ring;
    152 	int r;
    153 
    154 	if (!f)
    155 		return -EINVAL;
    156 
    157 	r = drm_sched_job_init(&job->base, entity, owner);
    158 	if (r)
    159 		return r;
    160 
    161 	*f = dma_fence_get(&job->base.s_fence->finished);
    162 	amdgpu_job_free_resources(job);
    163 	priority = job->base.s_priority;
    164 	drm_sched_entity_push_job(&job->base, entity);
    165 
    166 	ring = to_amdgpu_ring(entity->rq->sched);
    167 	amdgpu_ring_priority_get(ring, priority);
    168 
    169 	return 0;
    170 }
    171 
    172 int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
    173 			     struct dma_fence **fence)
    174 {
    175 	int r;
    176 
    177 	job->base.sched = &ring->sched;
    178 	r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, NULL, fence);
    179 	job->fence = dma_fence_get(*fence);
    180 	if (r)
    181 		return r;
    182 
    183 	amdgpu_job_free(job);
    184 	return 0;
    185 }
    186 
    187 static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
    188 					       struct drm_sched_entity *s_entity)
    189 {
    190 	struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched);
    191 	struct amdgpu_job *job = to_amdgpu_job(sched_job);
    192 	struct amdgpu_vm *vm = job->vm;
    193 	struct dma_fence *fence;
    194 	bool explicit = false;
    195 	int r;
    196 
    197 	fence = amdgpu_sync_get_fence(&job->sync, &explicit);
    198 	if (fence && explicit) {
    199 		if (drm_sched_dependency_optimized(fence, s_entity)) {
    200 			r = amdgpu_sync_fence(&job->sched_sync, fence, false);
    201 			if (r)
    202 				DRM_ERROR("Error adding fence (%d)\n", r);
    203 		}
    204 	}
    205 
    206 	while (fence == NULL && vm && !job->vmid) {
    207 		r = amdgpu_vmid_grab(vm, ring, &job->sync,
    208 				     &job->base.s_fence->finished,
    209 				     job);
    210 		if (r)
    211 			DRM_ERROR("Error getting VM ID (%d)\n", r);
    212 
    213 		fence = amdgpu_sync_get_fence(&job->sync, NULL);
    214 	}
    215 
    216 	return fence;
    217 }
    218 
    219 static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
    220 {
    221 	struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched);
    222 	struct dma_fence *fence = NULL, *finished;
    223 	struct amdgpu_job *job;
    224 	int r = 0;
    225 
    226 	job = to_amdgpu_job(sched_job);
    227 	finished = &job->base.s_fence->finished;
    228 
    229 	BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
    230 
    231 	trace_amdgpu_sched_run_job(job);
    232 
    233 	if (job->vram_lost_counter != atomic_read(&ring->adev->vram_lost_counter))
    234 		dma_fence_set_error(finished, -ECANCELED);/* skip IB as well if VRAM lost */
    235 
    236 	if (finished->error < 0) {
    237 		DRM_INFO("Skip scheduling IBs!\n");
    238 	} else {
    239 		r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job,
    240 				       &fence);
    241 		if (r)
    242 			DRM_ERROR("Error scheduling IBs (%d)\n", r);
    243 	}
    244 	/* if gpu reset, hw fence will be replaced here */
    245 	dma_fence_put(job->fence);
    246 	job->fence = dma_fence_get(fence);
    247 
    248 	amdgpu_job_free_resources(job);
    249 
    250 	fence = r ? ERR_PTR(r) : fence;
    251 	return fence;
    252 }
    253 
    254 #define to_drm_sched_job(sched_job)		\
    255 		container_of((sched_job), struct drm_sched_job, queue_node)
    256 
    257 void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
    258 {
    259 	struct drm_sched_job *s_job;
    260 	struct drm_sched_entity *s_entity = NULL;
    261 	int i;
    262 
    263 	/* Signal all jobs not yet scheduled */
    264 	for (i = DRM_SCHED_PRIORITY_MAX - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
    265 		struct drm_sched_rq *rq = &sched->sched_rq[i];
    266 
    267 		if (!rq)
    268 			continue;
    269 
    270 		spin_lock(&rq->lock);
    271 		list_for_each_entry(s_entity, &rq->entities, list) {
    272 			while ((s_job = to_drm_sched_job(spsc_queue_pop(&s_entity->job_queue)))) {
    273 				struct drm_sched_fence *s_fence = s_job->s_fence;
    274 
    275 				dma_fence_signal(&s_fence->scheduled);
    276 				dma_fence_set_error(&s_fence->finished, -EHWPOISON);
    277 				dma_fence_signal(&s_fence->finished);
    278 			}
    279 		}
    280 		spin_unlock(&rq->lock);
    281 	}
    282 
    283 	/* Signal all jobs already scheduled to HW */
    284 	list_for_each_entry(s_job, &sched->ring_mirror_list, node) {
    285 		struct drm_sched_fence *s_fence = s_job->s_fence;
    286 
    287 		dma_fence_set_error(&s_fence->finished, -EHWPOISON);
    288 		dma_fence_signal(&s_fence->finished);
    289 	}
    290 }
    291 
    292 const struct drm_sched_backend_ops amdgpu_sched_ops = {
    293 	.dependency = amdgpu_job_dependency,
    294 	.run_job = amdgpu_job_run,
    295 	.timedout_job = amdgpu_job_timedout,
    296 	.free_job = amdgpu_job_free_cb
    297 };
    298