Home | History | Annotate | Line # | Download | only in amdgpu
amdgpu_ctx.c revision 1.6
      1 /*	$NetBSD: amdgpu_ctx.c,v 1.6 2021/12/18 23:44:58 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright 2015 Advanced Micro Devices, Inc.
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the "Software"),
      8  * to deal in the Software without restriction, including without limitation
      9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     10  * and/or sell copies of the Software, and to permit persons to whom the
     11  * Software is furnished to do so, subject to the following conditions:
     12  *
     13  * The above copyright notice and this permission notice shall be included in
     14  * all copies or substantial portions of the Software.
     15  *
     16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     22  * OTHER DEALINGS IN THE SOFTWARE.
     23  *
     24  * Authors: monk liu <monk.liu (at) amd.com>
     25  */
     26 
     27 #include <sys/cdefs.h>
     28 __KERNEL_RCSID(0, "$NetBSD: amdgpu_ctx.c,v 1.6 2021/12/18 23:44:58 riastradh Exp $");
     29 
     30 #include <drm/drm_auth.h>
     31 #include "amdgpu.h"
     32 #include "amdgpu_sched.h"
     33 #include "amdgpu_ras.h"
     34 
     35 #include <linux/nbsd-namespace.h>
     36 #define to_amdgpu_ctx_entity(e)	\
     37 	container_of((e), struct amdgpu_ctx_entity, entity)
     38 
     39 const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
     40 	[AMDGPU_HW_IP_GFX]	=	1,
     41 	[AMDGPU_HW_IP_COMPUTE]	=	4,
     42 	[AMDGPU_HW_IP_DMA]	=	2,
     43 	[AMDGPU_HW_IP_UVD]	=	1,
     44 	[AMDGPU_HW_IP_VCE]	=	1,
     45 	[AMDGPU_HW_IP_UVD_ENC]	=	1,
     46 	[AMDGPU_HW_IP_VCN_DEC]	=	1,
     47 	[AMDGPU_HW_IP_VCN_ENC]	=	1,
     48 	[AMDGPU_HW_IP_VCN_JPEG]	=	1,
     49 };
     50 
     51 static int amdgpu_ctx_priority_permit(struct drm_file *filp,
     52 				      enum drm_sched_priority priority)
     53 {
     54 	if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
     55 		return -EINVAL;
     56 
     57 	/* NORMAL and below are accessible by everyone */
     58 	if (priority <= DRM_SCHED_PRIORITY_NORMAL)
     59 		return 0;
     60 
     61 	if (capable(CAP_SYS_NICE))
     62 		return 0;
     63 
     64 	if (drm_is_current_master(filp))
     65 		return 0;
     66 
     67 	return -EACCES;
     68 }
     69 
     70 static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, const u32 hw_ip, const u32 ring)
     71 {
     72 	struct amdgpu_device *adev = ctx->adev;
     73 	struct amdgpu_ctx_entity *entity;
     74 	struct drm_gpu_scheduler **scheds = NULL, *sched = NULL;
     75 	unsigned num_scheds = 0;
     76 	enum drm_sched_priority priority;
     77 	int r;
     78 
     79 	entity = kcalloc(1, offsetof(typeof(*entity), fences[amdgpu_sched_jobs]),
     80 			 GFP_KERNEL);
     81 	if (!entity)
     82 		return  -ENOMEM;
     83 
     84 	entity->sequence = 1;
     85 	priority = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
     86 				ctx->init_priority : ctx->override_priority;
     87 	switch (hw_ip) {
     88 		case AMDGPU_HW_IP_GFX:
     89 			sched = &adev->gfx.gfx_ring[0].sched;
     90 			scheds = &sched;
     91 			num_scheds = 1;
     92 			break;
     93 		case AMDGPU_HW_IP_COMPUTE:
     94 			scheds = adev->gfx.compute_sched;
     95 			num_scheds = adev->gfx.num_compute_sched;
     96 			break;
     97 		case AMDGPU_HW_IP_DMA:
     98 			scheds = adev->sdma.sdma_sched;
     99 			num_scheds = adev->sdma.num_sdma_sched;
    100 			break;
    101 		case AMDGPU_HW_IP_UVD:
    102 			sched = &adev->uvd.inst[0].ring.sched;
    103 			scheds = &sched;
    104 			num_scheds = 1;
    105 			break;
    106 		case AMDGPU_HW_IP_VCE:
    107 			sched = &adev->vce.ring[0].sched;
    108 			scheds = &sched;
    109 			num_scheds = 1;
    110 			break;
    111 		case AMDGPU_HW_IP_UVD_ENC:
    112 			sched = &adev->uvd.inst[0].ring_enc[0].sched;
    113 			scheds = &sched;
    114 			num_scheds = 1;
    115 			break;
    116 		case AMDGPU_HW_IP_VCN_DEC:
    117 			scheds = adev->vcn.vcn_dec_sched;
    118 			num_scheds =  adev->vcn.num_vcn_dec_sched;
    119 			break;
    120 		case AMDGPU_HW_IP_VCN_ENC:
    121 			scheds = adev->vcn.vcn_enc_sched;
    122 			num_scheds =  adev->vcn.num_vcn_enc_sched;
    123 			break;
    124 		case AMDGPU_HW_IP_VCN_JPEG:
    125 			scheds = adev->jpeg.jpeg_sched;
    126 			num_scheds =  adev->jpeg.num_jpeg_sched;
    127 			break;
    128 	}
    129 
    130 	r = drm_sched_entity_init(&entity->entity, priority, scheds, num_scheds,
    131 				  &ctx->guilty);
    132 	if (r)
    133 		goto error_free_entity;
    134 
    135 	ctx->entities[hw_ip][ring] = entity;
    136 	return 0;
    137 
    138 error_free_entity:
    139 	kfree(entity);
    140 
    141 	return r;
    142 }
    143 
    144 static int amdgpu_ctx_init(struct amdgpu_device *adev,
    145 			   enum drm_sched_priority priority,
    146 			   struct drm_file *filp,
    147 			   struct amdgpu_ctx *ctx)
    148 {
    149 	int r;
    150 
    151 	r = amdgpu_ctx_priority_permit(filp, priority);
    152 	if (r)
    153 		return r;
    154 
    155 	memset(ctx, 0, sizeof(*ctx));
    156 
    157 	ctx->adev = adev;
    158 
    159 	kref_init(&ctx->refcount);
    160 	spin_lock_init(&ctx->ring_lock);
    161 	mutex_init(&ctx->lock);
    162 
    163 	ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
    164 	ctx->reset_counter_query = ctx->reset_counter;
    165 	ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
    166 	ctx->init_priority = priority;
    167 	ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
    168 
    169 	return 0;
    170 
    171 }
    172 
    173 static void amdgpu_ctx_fini_entity(struct amdgpu_ctx_entity *entity)
    174 {
    175 
    176 	int i;
    177 
    178 	if (!entity)
    179 		return;
    180 
    181 	for (i = 0; i < amdgpu_sched_jobs; ++i)
    182 		dma_fence_put(entity->fences[i]);
    183 
    184 	kfree(entity);
    185 }
    186 
    187 static void amdgpu_ctx_fini(struct kref *ref)
    188 {
    189 	struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount);
    190 	struct amdgpu_device *adev = ctx->adev;
    191 	unsigned i, j;
    192 
    193 	if (!adev)
    194 		return;
    195 
    196 	for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
    197 		for (j = 0; j < AMDGPU_MAX_ENTITY_NUM; ++j) {
    198 			amdgpu_ctx_fini_entity(ctx->entities[i][j]);
    199 			ctx->entities[i][j] = NULL;
    200 		}
    201 	}
    202 
    203 	mutex_destroy(&ctx->lock);
    204 	kfree(ctx);
    205 }
    206 
    207 int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance,
    208 			  u32 ring, struct drm_sched_entity **entity)
    209 {
    210 	int r;
    211 
    212 	if (hw_ip >= AMDGPU_HW_IP_NUM) {
    213 		DRM_ERROR("unknown HW IP type: %d\n", hw_ip);
    214 		return -EINVAL;
    215 	}
    216 
    217 	/* Right now all IPs have only one instance - multiple rings. */
    218 	if (instance != 0) {
    219 		DRM_DEBUG("invalid ip instance: %d\n", instance);
    220 		return -EINVAL;
    221 	}
    222 
    223 	if (ring >= amdgpu_ctx_num_entities[hw_ip]) {
    224 		DRM_DEBUG("invalid ring: %d %d\n", hw_ip, ring);
    225 		return -EINVAL;
    226 	}
    227 
    228 	if (ctx->entities[hw_ip][ring] == NULL) {
    229 		r = amdgpu_ctx_init_entity(ctx, hw_ip, ring);
    230 		if (r)
    231 			return r;
    232 	}
    233 
    234 	*entity = &ctx->entities[hw_ip][ring]->entity;
    235 	return 0;
    236 }
    237 
    238 static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
    239 			    struct amdgpu_fpriv *fpriv,
    240 			    struct drm_file *filp,
    241 			    enum drm_sched_priority priority,
    242 			    uint32_t *id)
    243 {
    244 	struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
    245 	struct amdgpu_ctx *ctx;
    246 	int r;
    247 
    248 	ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
    249 	if (!ctx)
    250 		return -ENOMEM;
    251 
    252 	idr_preload(GFP_KERNEL);
    253 	mutex_lock(&mgr->lock);
    254 	r = idr_alloc(&mgr->ctx_handles, ctx, 1, AMDGPU_VM_MAX_NUM_CTX, GFP_KERNEL);
    255 	if (r < 0) {
    256 		mutex_unlock(&mgr->lock);
    257 		idr_preload_end();
    258 		kfree(ctx);
    259 		return r;
    260 	}
    261 
    262 	*id = (uint32_t)r;
    263 	r = amdgpu_ctx_init(adev, priority, filp, ctx);
    264 	if (r) {
    265 		idr_remove(&mgr->ctx_handles, *id);
    266 		*id = 0;
    267 		kfree(ctx);
    268 	}
    269 	mutex_unlock(&mgr->lock);
    270 	idr_preload_end();
    271 
    272 	return r;
    273 }
    274 
    275 static void amdgpu_ctx_do_release(struct kref *ref)
    276 {
    277 	struct amdgpu_ctx *ctx;
    278 	u32 i, j;
    279 
    280 	ctx = container_of(ref, struct amdgpu_ctx, refcount);
    281 	for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
    282 		for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
    283 			if (!ctx->entities[i][j])
    284 				continue;
    285 
    286 			drm_sched_entity_destroy(&ctx->entities[i][j]->entity);
    287 		}
    288 	}
    289 
    290 	amdgpu_ctx_fini(ref);
    291 }
    292 
    293 static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
    294 {
    295 	struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
    296 	struct amdgpu_ctx *ctx;
    297 
    298 	mutex_lock(&mgr->lock);
    299 	ctx = idr_remove(&mgr->ctx_handles, id);
    300 	if (ctx)
    301 		kref_put(&ctx->refcount, amdgpu_ctx_do_release);
    302 	mutex_unlock(&mgr->lock);
    303 	return ctx ? 0 : -EINVAL;
    304 }
    305 
    306 static int amdgpu_ctx_query(struct amdgpu_device *adev,
    307 			    struct amdgpu_fpriv *fpriv, uint32_t id,
    308 			    union drm_amdgpu_ctx_out *out)
    309 {
    310 	struct amdgpu_ctx *ctx;
    311 	struct amdgpu_ctx_mgr *mgr;
    312 	unsigned reset_counter;
    313 
    314 	if (!fpriv)
    315 		return -EINVAL;
    316 
    317 	mgr = &fpriv->ctx_mgr;
    318 	mutex_lock(&mgr->lock);
    319 	ctx = idr_find(&mgr->ctx_handles, id);
    320 	if (!ctx) {
    321 		mutex_unlock(&mgr->lock);
    322 		return -EINVAL;
    323 	}
    324 
    325 	/* TODO: these two are always zero */
    326 	out->state.flags = 0x0;
    327 	out->state.hangs = 0x0;
    328 
    329 	/* determine if a GPU reset has occured since the last call */
    330 	reset_counter = atomic_read(&adev->gpu_reset_counter);
    331 	/* TODO: this should ideally return NO, GUILTY, or INNOCENT. */
    332 	if (ctx->reset_counter_query == reset_counter)
    333 		out->state.reset_status = AMDGPU_CTX_NO_RESET;
    334 	else
    335 		out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET;
    336 	ctx->reset_counter_query = reset_counter;
    337 
    338 	mutex_unlock(&mgr->lock);
    339 	return 0;
    340 }
    341 
    342 static int amdgpu_ctx_query2(struct amdgpu_device *adev,
    343 	struct amdgpu_fpriv *fpriv, uint32_t id,
    344 	union drm_amdgpu_ctx_out *out)
    345 {
    346 	struct amdgpu_ctx *ctx;
    347 	struct amdgpu_ctx_mgr *mgr;
    348 	unsigned long ras_counter;
    349 
    350 	if (!fpriv)
    351 		return -EINVAL;
    352 
    353 	mgr = &fpriv->ctx_mgr;
    354 	mutex_lock(&mgr->lock);
    355 	ctx = idr_find(&mgr->ctx_handles, id);
    356 	if (!ctx) {
    357 		mutex_unlock(&mgr->lock);
    358 		return -EINVAL;
    359 	}
    360 
    361 	out->state.flags = 0x0;
    362 	out->state.hangs = 0x0;
    363 
    364 	if (ctx->reset_counter != atomic_read(&adev->gpu_reset_counter))
    365 		out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RESET;
    366 
    367 	if (ctx->vram_lost_counter != atomic_read(&adev->vram_lost_counter))
    368 		out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST;
    369 
    370 	if (atomic_read(&ctx->guilty))
    371 		out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY;
    372 
    373 	/*query ue count*/
    374 	ras_counter = amdgpu_ras_query_error_count(adev, false);
    375 	/*ras counter is monotonic increasing*/
    376 	if (ras_counter != ctx->ras_counter_ue) {
    377 		out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_UE;
    378 		ctx->ras_counter_ue = ras_counter;
    379 	}
    380 
    381 	/*query ce count*/
    382 	ras_counter = amdgpu_ras_query_error_count(adev, true);
    383 	if (ras_counter != ctx->ras_counter_ce) {
    384 		out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_CE;
    385 		ctx->ras_counter_ce = ras_counter;
    386 	}
    387 
    388 	mutex_unlock(&mgr->lock);
    389 	return 0;
    390 }
    391 
    392 int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
    393 		     struct drm_file *filp)
    394 {
    395 	int r;
    396 	uint32_t id;
    397 	enum drm_sched_priority priority;
    398 
    399 	union drm_amdgpu_ctx *args = data;
    400 	struct amdgpu_device *adev = dev->dev_private;
    401 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
    402 
    403 	r = 0;
    404 	id = args->in.ctx_id;
    405 	priority = amdgpu_to_sched_priority(args->in.priority);
    406 
    407 	/* For backwards compatibility reasons, we need to accept
    408 	 * ioctls with garbage in the priority field */
    409 	if (priority == DRM_SCHED_PRIORITY_INVALID)
    410 		priority = DRM_SCHED_PRIORITY_NORMAL;
    411 
    412 	switch (args->in.op) {
    413 	case AMDGPU_CTX_OP_ALLOC_CTX:
    414 		r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id);
    415 		args->out.alloc.ctx_id = id;
    416 		break;
    417 	case AMDGPU_CTX_OP_FREE_CTX:
    418 		r = amdgpu_ctx_free(fpriv, id);
    419 		break;
    420 	case AMDGPU_CTX_OP_QUERY_STATE:
    421 		r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
    422 		break;
    423 	case AMDGPU_CTX_OP_QUERY_STATE2:
    424 		r = amdgpu_ctx_query2(adev, fpriv, id, &args->out);
    425 		break;
    426 	default:
    427 		return -EINVAL;
    428 	}
    429 
    430 	return r;
    431 }
    432 
    433 struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
    434 {
    435 	struct amdgpu_ctx *ctx;
    436 	struct amdgpu_ctx_mgr *mgr;
    437 
    438 	if (!fpriv)
    439 		return NULL;
    440 
    441 	mgr = &fpriv->ctx_mgr;
    442 
    443 	mutex_lock(&mgr->lock);
    444 	ctx = idr_find(&mgr->ctx_handles, id);
    445 	if (ctx)
    446 		kref_get(&ctx->refcount);
    447 	mutex_unlock(&mgr->lock);
    448 	return ctx;
    449 }
    450 
    451 int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
    452 {
    453 	if (ctx == NULL)
    454 		return -EINVAL;
    455 
    456 	kref_put(&ctx->refcount, amdgpu_ctx_do_release);
    457 	return 0;
    458 }
    459 
    460 void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
    461 			  struct drm_sched_entity *entity,
    462 			  struct dma_fence *fence, uint64_t* handle)
    463 {
    464 	struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
    465 	uint64_t seq = centity->sequence;
    466 	struct dma_fence *other = NULL;
    467 	unsigned idx = 0;
    468 
    469 	idx = seq & (amdgpu_sched_jobs - 1);
    470 	other = centity->fences[idx];
    471 	if (other)
    472 		BUG_ON(!dma_fence_is_signaled(other));
    473 
    474 	dma_fence_get(fence);
    475 
    476 	spin_lock(&ctx->ring_lock);
    477 	centity->fences[idx] = fence;
    478 	centity->sequence++;
    479 	spin_unlock(&ctx->ring_lock);
    480 
    481 	dma_fence_put(other);
    482 	if (handle)
    483 		*handle = seq;
    484 }
    485 
    486 struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
    487 				       struct drm_sched_entity *entity,
    488 				       uint64_t seq)
    489 {
    490 	struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
    491 	struct dma_fence *fence;
    492 
    493 	spin_lock(&ctx->ring_lock);
    494 
    495 	if (seq == ~0ull)
    496 		seq = centity->sequence - 1;
    497 
    498 	if (seq >= centity->sequence) {
    499 		spin_unlock(&ctx->ring_lock);
    500 		return ERR_PTR(-EINVAL);
    501 	}
    502 
    503 
    504 	if (seq + amdgpu_sched_jobs < centity->sequence) {
    505 		spin_unlock(&ctx->ring_lock);
    506 		return NULL;
    507 	}
    508 
    509 	fence = dma_fence_get(centity->fences[seq & (amdgpu_sched_jobs - 1)]);
    510 	spin_unlock(&ctx->ring_lock);
    511 
    512 	return fence;
    513 }
    514 
    515 void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
    516 				  enum drm_sched_priority priority)
    517 {
    518 	enum drm_sched_priority ctx_prio;
    519 	unsigned i, j;
    520 
    521 	ctx->override_priority = priority;
    522 
    523 	ctx_prio = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
    524 			ctx->init_priority : ctx->override_priority;
    525 	for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
    526 		for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
    527 			struct drm_sched_entity *entity;
    528 
    529 			if (!ctx->entities[i][j])
    530 				continue;
    531 
    532 			entity = &ctx->entities[i][j]->entity;
    533 			drm_sched_entity_set_priority(entity, ctx_prio);
    534 		}
    535 	}
    536 }
    537 
    538 int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
    539 			       struct drm_sched_entity *entity)
    540 {
    541 	struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
    542 	struct dma_fence *other;
    543 	unsigned idx;
    544 	long r;
    545 
    546 	spin_lock(&ctx->ring_lock);
    547 	idx = centity->sequence & (amdgpu_sched_jobs - 1);
    548 	other = dma_fence_get(centity->fences[idx]);
    549 	spin_unlock(&ctx->ring_lock);
    550 
    551 	if (!other)
    552 		return 0;
    553 
    554 	r = dma_fence_wait(other, true);
    555 	if (r < 0 && r != -ERESTARTSYS)
    556 		DRM_ERROR("Error (%ld) waiting for fence!\n", r);
    557 
    558 	dma_fence_put(other);
    559 	return r;
    560 }
    561 
    562 void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
    563 {
    564 	mutex_init(&mgr->lock);
    565 	idr_init(&mgr->ctx_handles);
    566 }
    567 
    568 long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout)
    569 {
    570 	struct amdgpu_ctx *ctx;
    571 	struct idr *idp;
    572 	uint32_t id, i, j;
    573 
    574 	idp = &mgr->ctx_handles;
    575 
    576 	mutex_lock(&mgr->lock);
    577 	idr_for_each_entry(idp, ctx, id) {
    578 		for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
    579 			for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
    580 				struct drm_sched_entity *entity;
    581 
    582 				if (!ctx->entities[i][j])
    583 					continue;
    584 
    585 				entity = &ctx->entities[i][j]->entity;
    586 				timeout = drm_sched_entity_flush(entity, timeout);
    587 			}
    588 		}
    589 	}
    590 	mutex_unlock(&mgr->lock);
    591 	return timeout;
    592 }
    593 
    594 void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
    595 {
    596 	struct amdgpu_ctx *ctx;
    597 	struct idr *idp;
    598 	uint32_t id, i, j;
    599 
    600 	idp = &mgr->ctx_handles;
    601 
    602 	idr_for_each_entry(idp, ctx, id) {
    603 		if (kref_read(&ctx->refcount) != 1) {
    604 			DRM_ERROR("ctx %p is still alive\n", ctx);
    605 			continue;
    606 		}
    607 
    608 		for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
    609 			for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
    610 				struct drm_sched_entity *entity;
    611 
    612 				if (!ctx->entities[i][j])
    613 					continue;
    614 
    615 				entity = &ctx->entities[i][j]->entity;
    616 				drm_sched_entity_fini(entity);
    617 			}
    618 		}
    619 	}
    620 }
    621 
    622 void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
    623 {
    624 	struct amdgpu_ctx *ctx;
    625 	struct idr *idp;
    626 	uint32_t id;
    627 
    628 	amdgpu_ctx_mgr_entity_fini(mgr);
    629 
    630 	idp = &mgr->ctx_handles;
    631 
    632 	idr_for_each_entry(idp, ctx, id) {
    633 		if (kref_put(&ctx->refcount, amdgpu_ctx_fini) != 1)
    634 			DRM_ERROR("ctx %p is still alive\n", ctx);
    635 	}
    636 
    637 	idr_destroy(&mgr->ctx_handles);
    638 	mutex_destroy(&mgr->lock);
    639 }
    640 
    641 void amdgpu_ctx_init_sched(struct amdgpu_device *adev)
    642 {
    643 	int i, j;
    644 
    645 	for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
    646 		adev->gfx.gfx_sched[i] = &adev->gfx.gfx_ring[i].sched;
    647 		adev->gfx.num_gfx_sched++;
    648 	}
    649 
    650 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
    651 		adev->gfx.compute_sched[i] = &adev->gfx.compute_ring[i].sched;
    652 		adev->gfx.num_compute_sched++;
    653 	}
    654 
    655 	for (i = 0; i < adev->sdma.num_instances; i++) {
    656 		adev->sdma.sdma_sched[i] = &adev->sdma.instance[i].ring.sched;
    657 		adev->sdma.num_sdma_sched++;
    658 	}
    659 
    660 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
    661 		if (adev->vcn.harvest_config & (1 << i))
    662 			continue;
    663 		adev->vcn.vcn_dec_sched[adev->vcn.num_vcn_dec_sched++] =
    664 			&adev->vcn.inst[i].ring_dec.sched;
    665 	}
    666 
    667 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
    668 		if (adev->vcn.harvest_config & (1 << i))
    669 			continue;
    670 		for (j = 0; j < adev->vcn.num_enc_rings; ++j)
    671 			adev->vcn.vcn_enc_sched[adev->vcn.num_vcn_enc_sched++] =
    672 				&adev->vcn.inst[i].ring_enc[j].sched;
    673 	}
    674 
    675 	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
    676 		if (adev->jpeg.harvest_config & (1 << i))
    677 			continue;
    678 		adev->jpeg.jpeg_sched[adev->jpeg.num_jpeg_sched++] =
    679 			&adev->jpeg.inst[i].ring_dec.sched;
    680 	}
    681 }
    682