Home | History | Annotate | Line # | Download | only in amdgpu
amdgpu_ctx.c revision 1.1.1.2
      1 /*	$NetBSD: amdgpu_ctx.c,v 1.1.1.2 2021/12/18 20:11:05 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright 2015 Advanced Micro Devices, Inc.
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the "Software"),
      8  * to deal in the Software without restriction, including without limitation
      9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     10  * and/or sell copies of the Software, and to permit persons to whom the
     11  * Software is furnished to do so, subject to the following conditions:
     12  *
     13  * The above copyright notice and this permission notice shall be included in
     14  * all copies or substantial portions of the Software.
     15  *
     16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     22  * OTHER DEALINGS IN THE SOFTWARE.
     23  *
     24  * Authors: monk liu <monk.liu (at) amd.com>
     25  */
     26 
     27 #include <sys/cdefs.h>
     28 __KERNEL_RCSID(0, "$NetBSD: amdgpu_ctx.c,v 1.1.1.2 2021/12/18 20:11:05 riastradh Exp $");
     29 
     30 #include <drm/drm_auth.h>
     31 #include "amdgpu.h"
     32 #include "amdgpu_sched.h"
     33 #include "amdgpu_ras.h"
     34 
     35 #define to_amdgpu_ctx_entity(e)	\
     36 	container_of((e), struct amdgpu_ctx_entity, entity)
     37 
     38 const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
     39 	[AMDGPU_HW_IP_GFX]	=	1,
     40 	[AMDGPU_HW_IP_COMPUTE]	=	4,
     41 	[AMDGPU_HW_IP_DMA]	=	2,
     42 	[AMDGPU_HW_IP_UVD]	=	1,
     43 	[AMDGPU_HW_IP_VCE]	=	1,
     44 	[AMDGPU_HW_IP_UVD_ENC]	=	1,
     45 	[AMDGPU_HW_IP_VCN_DEC]	=	1,
     46 	[AMDGPU_HW_IP_VCN_ENC]	=	1,
     47 	[AMDGPU_HW_IP_VCN_JPEG]	=	1,
     48 };
     49 
     50 static int amdgpu_ctx_priority_permit(struct drm_file *filp,
     51 				      enum drm_sched_priority priority)
     52 {
     53 	if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
     54 		return -EINVAL;
     55 
     56 	/* NORMAL and below are accessible by everyone */
     57 	if (priority <= DRM_SCHED_PRIORITY_NORMAL)
     58 		return 0;
     59 
     60 	if (capable(CAP_SYS_NICE))
     61 		return 0;
     62 
     63 	if (drm_is_current_master(filp))
     64 		return 0;
     65 
     66 	return -EACCES;
     67 }
     68 
     69 static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, const u32 hw_ip, const u32 ring)
     70 {
     71 	struct amdgpu_device *adev = ctx->adev;
     72 	struct amdgpu_ctx_entity *entity;
     73 	struct drm_gpu_scheduler **scheds = NULL, *sched = NULL;
     74 	unsigned num_scheds = 0;
     75 	enum drm_sched_priority priority;
     76 	int r;
     77 
     78 	entity = kcalloc(1, offsetof(typeof(*entity), fences[amdgpu_sched_jobs]),
     79 			 GFP_KERNEL);
     80 	if (!entity)
     81 		return  -ENOMEM;
     82 
     83 	entity->sequence = 1;
     84 	priority = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
     85 				ctx->init_priority : ctx->override_priority;
     86 	switch (hw_ip) {
     87 		case AMDGPU_HW_IP_GFX:
     88 			sched = &adev->gfx.gfx_ring[0].sched;
     89 			scheds = &sched;
     90 			num_scheds = 1;
     91 			break;
     92 		case AMDGPU_HW_IP_COMPUTE:
     93 			scheds = adev->gfx.compute_sched;
     94 			num_scheds = adev->gfx.num_compute_sched;
     95 			break;
     96 		case AMDGPU_HW_IP_DMA:
     97 			scheds = adev->sdma.sdma_sched;
     98 			num_scheds = adev->sdma.num_sdma_sched;
     99 			break;
    100 		case AMDGPU_HW_IP_UVD:
    101 			sched = &adev->uvd.inst[0].ring.sched;
    102 			scheds = &sched;
    103 			num_scheds = 1;
    104 			break;
    105 		case AMDGPU_HW_IP_VCE:
    106 			sched = &adev->vce.ring[0].sched;
    107 			scheds = &sched;
    108 			num_scheds = 1;
    109 			break;
    110 		case AMDGPU_HW_IP_UVD_ENC:
    111 			sched = &adev->uvd.inst[0].ring_enc[0].sched;
    112 			scheds = &sched;
    113 			num_scheds = 1;
    114 			break;
    115 		case AMDGPU_HW_IP_VCN_DEC:
    116 			scheds = adev->vcn.vcn_dec_sched;
    117 			num_scheds =  adev->vcn.num_vcn_dec_sched;
    118 			break;
    119 		case AMDGPU_HW_IP_VCN_ENC:
    120 			scheds = adev->vcn.vcn_enc_sched;
    121 			num_scheds =  adev->vcn.num_vcn_enc_sched;
    122 			break;
    123 		case AMDGPU_HW_IP_VCN_JPEG:
    124 			scheds = adev->jpeg.jpeg_sched;
    125 			num_scheds =  adev->jpeg.num_jpeg_sched;
    126 			break;
    127 	}
    128 
    129 	r = drm_sched_entity_init(&entity->entity, priority, scheds, num_scheds,
    130 				  &ctx->guilty);
    131 	if (r)
    132 		goto error_free_entity;
    133 
    134 	ctx->entities[hw_ip][ring] = entity;
    135 	return 0;
    136 
    137 error_free_entity:
    138 	kfree(entity);
    139 
    140 	return r;
    141 }
    142 
    143 static int amdgpu_ctx_init(struct amdgpu_device *adev,
    144 			   enum drm_sched_priority priority,
    145 			   struct drm_file *filp,
    146 			   struct amdgpu_ctx *ctx)
    147 {
    148 	int r;
    149 
    150 	r = amdgpu_ctx_priority_permit(filp, priority);
    151 	if (r)
    152 		return r;
    153 
    154 	memset(ctx, 0, sizeof(*ctx));
    155 
    156 	ctx->adev = adev;
    157 
    158 	kref_init(&ctx->refcount);
    159 	spin_lock_init(&ctx->ring_lock);
    160 	mutex_init(&ctx->lock);
    161 
    162 	ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
    163 	ctx->reset_counter_query = ctx->reset_counter;
    164 	ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
    165 	ctx->init_priority = priority;
    166 	ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
    167 
    168 	return 0;
    169 
    170 }
    171 
    172 static void amdgpu_ctx_fini_entity(struct amdgpu_ctx_entity *entity)
    173 {
    174 
    175 	int i;
    176 
    177 	if (!entity)
    178 		return;
    179 
    180 	for (i = 0; i < amdgpu_sched_jobs; ++i)
    181 		dma_fence_put(entity->fences[i]);
    182 
    183 	kfree(entity);
    184 }
    185 
    186 static void amdgpu_ctx_fini(struct kref *ref)
    187 {
    188 	struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount);
    189 	struct amdgpu_device *adev = ctx->adev;
    190 	unsigned i, j;
    191 
    192 	if (!adev)
    193 		return;
    194 
    195 	for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
    196 		for (j = 0; j < AMDGPU_MAX_ENTITY_NUM; ++j) {
    197 			amdgpu_ctx_fini_entity(ctx->entities[i][j]);
    198 			ctx->entities[i][j] = NULL;
    199 		}
    200 	}
    201 
    202 	mutex_destroy(&ctx->lock);
    203 	kfree(ctx);
    204 }
    205 
    206 int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance,
    207 			  u32 ring, struct drm_sched_entity **entity)
    208 {
    209 	int r;
    210 
    211 	if (hw_ip >= AMDGPU_HW_IP_NUM) {
    212 		DRM_ERROR("unknown HW IP type: %d\n", hw_ip);
    213 		return -EINVAL;
    214 	}
    215 
    216 	/* Right now all IPs have only one instance - multiple rings. */
    217 	if (instance != 0) {
    218 		DRM_DEBUG("invalid ip instance: %d\n", instance);
    219 		return -EINVAL;
    220 	}
    221 
    222 	if (ring >= amdgpu_ctx_num_entities[hw_ip]) {
    223 		DRM_DEBUG("invalid ring: %d %d\n", hw_ip, ring);
    224 		return -EINVAL;
    225 	}
    226 
    227 	if (ctx->entities[hw_ip][ring] == NULL) {
    228 		r = amdgpu_ctx_init_entity(ctx, hw_ip, ring);
    229 		if (r)
    230 			return r;
    231 	}
    232 
    233 	*entity = &ctx->entities[hw_ip][ring]->entity;
    234 	return 0;
    235 }
    236 
    237 static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
    238 			    struct amdgpu_fpriv *fpriv,
    239 			    struct drm_file *filp,
    240 			    enum drm_sched_priority priority,
    241 			    uint32_t *id)
    242 {
    243 	struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
    244 	struct amdgpu_ctx *ctx;
    245 	int r;
    246 
    247 	ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
    248 	if (!ctx)
    249 		return -ENOMEM;
    250 
    251 	mutex_lock(&mgr->lock);
    252 	r = idr_alloc(&mgr->ctx_handles, ctx, 1, AMDGPU_VM_MAX_NUM_CTX, GFP_KERNEL);
    253 	if (r < 0) {
    254 		mutex_unlock(&mgr->lock);
    255 		kfree(ctx);
    256 		return r;
    257 	}
    258 
    259 	*id = (uint32_t)r;
    260 	r = amdgpu_ctx_init(adev, priority, filp, ctx);
    261 	if (r) {
    262 		idr_remove(&mgr->ctx_handles, *id);
    263 		*id = 0;
    264 		kfree(ctx);
    265 	}
    266 	mutex_unlock(&mgr->lock);
    267 	return r;
    268 }
    269 
    270 static void amdgpu_ctx_do_release(struct kref *ref)
    271 {
    272 	struct amdgpu_ctx *ctx;
    273 	u32 i, j;
    274 
    275 	ctx = container_of(ref, struct amdgpu_ctx, refcount);
    276 	for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
    277 		for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
    278 			if (!ctx->entities[i][j])
    279 				continue;
    280 
    281 			drm_sched_entity_destroy(&ctx->entities[i][j]->entity);
    282 		}
    283 	}
    284 
    285 	amdgpu_ctx_fini(ref);
    286 }
    287 
    288 static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
    289 {
    290 	struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
    291 	struct amdgpu_ctx *ctx;
    292 
    293 	mutex_lock(&mgr->lock);
    294 	ctx = idr_remove(&mgr->ctx_handles, id);
    295 	if (ctx)
    296 		kref_put(&ctx->refcount, amdgpu_ctx_do_release);
    297 	mutex_unlock(&mgr->lock);
    298 	return ctx ? 0 : -EINVAL;
    299 }
    300 
    301 static int amdgpu_ctx_query(struct amdgpu_device *adev,
    302 			    struct amdgpu_fpriv *fpriv, uint32_t id,
    303 			    union drm_amdgpu_ctx_out *out)
    304 {
    305 	struct amdgpu_ctx *ctx;
    306 	struct amdgpu_ctx_mgr *mgr;
    307 	unsigned reset_counter;
    308 
    309 	if (!fpriv)
    310 		return -EINVAL;
    311 
    312 	mgr = &fpriv->ctx_mgr;
    313 	mutex_lock(&mgr->lock);
    314 	ctx = idr_find(&mgr->ctx_handles, id);
    315 	if (!ctx) {
    316 		mutex_unlock(&mgr->lock);
    317 		return -EINVAL;
    318 	}
    319 
    320 	/* TODO: these two are always zero */
    321 	out->state.flags = 0x0;
    322 	out->state.hangs = 0x0;
    323 
    324 	/* determine if a GPU reset has occured since the last call */
    325 	reset_counter = atomic_read(&adev->gpu_reset_counter);
    326 	/* TODO: this should ideally return NO, GUILTY, or INNOCENT. */
    327 	if (ctx->reset_counter_query == reset_counter)
    328 		out->state.reset_status = AMDGPU_CTX_NO_RESET;
    329 	else
    330 		out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET;
    331 	ctx->reset_counter_query = reset_counter;
    332 
    333 	mutex_unlock(&mgr->lock);
    334 	return 0;
    335 }
    336 
    337 static int amdgpu_ctx_query2(struct amdgpu_device *adev,
    338 	struct amdgpu_fpriv *fpriv, uint32_t id,
    339 	union drm_amdgpu_ctx_out *out)
    340 {
    341 	struct amdgpu_ctx *ctx;
    342 	struct amdgpu_ctx_mgr *mgr;
    343 	unsigned long ras_counter;
    344 
    345 	if (!fpriv)
    346 		return -EINVAL;
    347 
    348 	mgr = &fpriv->ctx_mgr;
    349 	mutex_lock(&mgr->lock);
    350 	ctx = idr_find(&mgr->ctx_handles, id);
    351 	if (!ctx) {
    352 		mutex_unlock(&mgr->lock);
    353 		return -EINVAL;
    354 	}
    355 
    356 	out->state.flags = 0x0;
    357 	out->state.hangs = 0x0;
    358 
    359 	if (ctx->reset_counter != atomic_read(&adev->gpu_reset_counter))
    360 		out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RESET;
    361 
    362 	if (ctx->vram_lost_counter != atomic_read(&adev->vram_lost_counter))
    363 		out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST;
    364 
    365 	if (atomic_read(&ctx->guilty))
    366 		out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY;
    367 
    368 	/*query ue count*/
    369 	ras_counter = amdgpu_ras_query_error_count(adev, false);
    370 	/*ras counter is monotonic increasing*/
    371 	if (ras_counter != ctx->ras_counter_ue) {
    372 		out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_UE;
    373 		ctx->ras_counter_ue = ras_counter;
    374 	}
    375 
    376 	/*query ce count*/
    377 	ras_counter = amdgpu_ras_query_error_count(adev, true);
    378 	if (ras_counter != ctx->ras_counter_ce) {
    379 		out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_CE;
    380 		ctx->ras_counter_ce = ras_counter;
    381 	}
    382 
    383 	mutex_unlock(&mgr->lock);
    384 	return 0;
    385 }
    386 
    387 int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
    388 		     struct drm_file *filp)
    389 {
    390 	int r;
    391 	uint32_t id;
    392 	enum drm_sched_priority priority;
    393 
    394 	union drm_amdgpu_ctx *args = data;
    395 	struct amdgpu_device *adev = dev->dev_private;
    396 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
    397 
    398 	r = 0;
    399 	id = args->in.ctx_id;
    400 	priority = amdgpu_to_sched_priority(args->in.priority);
    401 
    402 	/* For backwards compatibility reasons, we need to accept
    403 	 * ioctls with garbage in the priority field */
    404 	if (priority == DRM_SCHED_PRIORITY_INVALID)
    405 		priority = DRM_SCHED_PRIORITY_NORMAL;
    406 
    407 	switch (args->in.op) {
    408 	case AMDGPU_CTX_OP_ALLOC_CTX:
    409 		r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id);
    410 		args->out.alloc.ctx_id = id;
    411 		break;
    412 	case AMDGPU_CTX_OP_FREE_CTX:
    413 		r = amdgpu_ctx_free(fpriv, id);
    414 		break;
    415 	case AMDGPU_CTX_OP_QUERY_STATE:
    416 		r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
    417 		break;
    418 	case AMDGPU_CTX_OP_QUERY_STATE2:
    419 		r = amdgpu_ctx_query2(adev, fpriv, id, &args->out);
    420 		break;
    421 	default:
    422 		return -EINVAL;
    423 	}
    424 
    425 	return r;
    426 }
    427 
    428 struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
    429 {
    430 	struct amdgpu_ctx *ctx;
    431 	struct amdgpu_ctx_mgr *mgr;
    432 
    433 	if (!fpriv)
    434 		return NULL;
    435 
    436 	mgr = &fpriv->ctx_mgr;
    437 
    438 	mutex_lock(&mgr->lock);
    439 	ctx = idr_find(&mgr->ctx_handles, id);
    440 	if (ctx)
    441 		kref_get(&ctx->refcount);
    442 	mutex_unlock(&mgr->lock);
    443 	return ctx;
    444 }
    445 
    446 int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
    447 {
    448 	if (ctx == NULL)
    449 		return -EINVAL;
    450 
    451 	kref_put(&ctx->refcount, amdgpu_ctx_do_release);
    452 	return 0;
    453 }
    454 
    455 void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
    456 			  struct drm_sched_entity *entity,
    457 			  struct dma_fence *fence, uint64_t* handle)
    458 {
    459 	struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
    460 	uint64_t seq = centity->sequence;
    461 	struct dma_fence *other = NULL;
    462 	unsigned idx = 0;
    463 
    464 	idx = seq & (amdgpu_sched_jobs - 1);
    465 	other = centity->fences[idx];
    466 	if (other)
    467 		BUG_ON(!dma_fence_is_signaled(other));
    468 
    469 	dma_fence_get(fence);
    470 
    471 	spin_lock(&ctx->ring_lock);
    472 	centity->fences[idx] = fence;
    473 	centity->sequence++;
    474 	spin_unlock(&ctx->ring_lock);
    475 
    476 	dma_fence_put(other);
    477 	if (handle)
    478 		*handle = seq;
    479 }
    480 
    481 struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
    482 				       struct drm_sched_entity *entity,
    483 				       uint64_t seq)
    484 {
    485 	struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
    486 	struct dma_fence *fence;
    487 
    488 	spin_lock(&ctx->ring_lock);
    489 
    490 	if (seq == ~0ull)
    491 		seq = centity->sequence - 1;
    492 
    493 	if (seq >= centity->sequence) {
    494 		spin_unlock(&ctx->ring_lock);
    495 		return ERR_PTR(-EINVAL);
    496 	}
    497 
    498 
    499 	if (seq + amdgpu_sched_jobs < centity->sequence) {
    500 		spin_unlock(&ctx->ring_lock);
    501 		return NULL;
    502 	}
    503 
    504 	fence = dma_fence_get(centity->fences[seq & (amdgpu_sched_jobs - 1)]);
    505 	spin_unlock(&ctx->ring_lock);
    506 
    507 	return fence;
    508 }
    509 
    510 void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
    511 				  enum drm_sched_priority priority)
    512 {
    513 	enum drm_sched_priority ctx_prio;
    514 	unsigned i, j;
    515 
    516 	ctx->override_priority = priority;
    517 
    518 	ctx_prio = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
    519 			ctx->init_priority : ctx->override_priority;
    520 	for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
    521 		for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
    522 			struct drm_sched_entity *entity;
    523 
    524 			if (!ctx->entities[i][j])
    525 				continue;
    526 
    527 			entity = &ctx->entities[i][j]->entity;
    528 			drm_sched_entity_set_priority(entity, ctx_prio);
    529 		}
    530 	}
    531 }
    532 
    533 int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
    534 			       struct drm_sched_entity *entity)
    535 {
    536 	struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
    537 	struct dma_fence *other;
    538 	unsigned idx;
    539 	long r;
    540 
    541 	spin_lock(&ctx->ring_lock);
    542 	idx = centity->sequence & (amdgpu_sched_jobs - 1);
    543 	other = dma_fence_get(centity->fences[idx]);
    544 	spin_unlock(&ctx->ring_lock);
    545 
    546 	if (!other)
    547 		return 0;
    548 
    549 	r = dma_fence_wait(other, true);
    550 	if (r < 0 && r != -ERESTARTSYS)
    551 		DRM_ERROR("Error (%ld) waiting for fence!\n", r);
    552 
    553 	dma_fence_put(other);
    554 	return r;
    555 }
    556 
    557 void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
    558 {
    559 	mutex_init(&mgr->lock);
    560 	idr_init(&mgr->ctx_handles);
    561 }
    562 
    563 long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout)
    564 {
    565 	struct amdgpu_ctx *ctx;
    566 	struct idr *idp;
    567 	uint32_t id, i, j;
    568 
    569 	idp = &mgr->ctx_handles;
    570 
    571 	mutex_lock(&mgr->lock);
    572 	idr_for_each_entry(idp, ctx, id) {
    573 		for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
    574 			for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
    575 				struct drm_sched_entity *entity;
    576 
    577 				if (!ctx->entities[i][j])
    578 					continue;
    579 
    580 				entity = &ctx->entities[i][j]->entity;
    581 				timeout = drm_sched_entity_flush(entity, timeout);
    582 			}
    583 		}
    584 	}
    585 	mutex_unlock(&mgr->lock);
    586 	return timeout;
    587 }
    588 
    589 void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
    590 {
    591 	struct amdgpu_ctx *ctx;
    592 	struct idr *idp;
    593 	uint32_t id, i, j;
    594 
    595 	idp = &mgr->ctx_handles;
    596 
    597 	idr_for_each_entry(idp, ctx, id) {
    598 		if (kref_read(&ctx->refcount) != 1) {
    599 			DRM_ERROR("ctx %p is still alive\n", ctx);
    600 			continue;
    601 		}
    602 
    603 		for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
    604 			for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
    605 				struct drm_sched_entity *entity;
    606 
    607 				if (!ctx->entities[i][j])
    608 					continue;
    609 
    610 				entity = &ctx->entities[i][j]->entity;
    611 				drm_sched_entity_fini(entity);
    612 			}
    613 		}
    614 	}
    615 }
    616 
    617 void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
    618 {
    619 	struct amdgpu_ctx *ctx;
    620 	struct idr *idp;
    621 	uint32_t id;
    622 
    623 	amdgpu_ctx_mgr_entity_fini(mgr);
    624 
    625 	idp = &mgr->ctx_handles;
    626 
    627 	idr_for_each_entry(idp, ctx, id) {
    628 		if (kref_put(&ctx->refcount, amdgpu_ctx_fini) != 1)
    629 			DRM_ERROR("ctx %p is still alive\n", ctx);
    630 	}
    631 
    632 	idr_destroy(&mgr->ctx_handles);
    633 	mutex_destroy(&mgr->lock);
    634 }
    635 
    636 void amdgpu_ctx_init_sched(struct amdgpu_device *adev)
    637 {
    638 	int i, j;
    639 
    640 	for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
    641 		adev->gfx.gfx_sched[i] = &adev->gfx.gfx_ring[i].sched;
    642 		adev->gfx.num_gfx_sched++;
    643 	}
    644 
    645 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
    646 		adev->gfx.compute_sched[i] = &adev->gfx.compute_ring[i].sched;
    647 		adev->gfx.num_compute_sched++;
    648 	}
    649 
    650 	for (i = 0; i < adev->sdma.num_instances; i++) {
    651 		adev->sdma.sdma_sched[i] = &adev->sdma.instance[i].ring.sched;
    652 		adev->sdma.num_sdma_sched++;
    653 	}
    654 
    655 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
    656 		if (adev->vcn.harvest_config & (1 << i))
    657 			continue;
    658 		adev->vcn.vcn_dec_sched[adev->vcn.num_vcn_dec_sched++] =
    659 			&adev->vcn.inst[i].ring_dec.sched;
    660 	}
    661 
    662 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
    663 		if (adev->vcn.harvest_config & (1 << i))
    664 			continue;
    665 		for (j = 0; j < adev->vcn.num_enc_rings; ++j)
    666 			adev->vcn.vcn_enc_sched[adev->vcn.num_vcn_enc_sched++] =
    667 				&adev->vcn.inst[i].ring_enc[j].sched;
    668 	}
    669 
    670 	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
    671 		if (adev->jpeg.harvest_config & (1 << i))
    672 			continue;
    673 		adev->jpeg.jpeg_sched[adev->jpeg.num_jpeg_sched++] =
    674 			&adev->jpeg.inst[i].ring_dec.sched;
    675 	}
    676 }
    677