Home | History | Annotate | Line # | Download | only in amdgpu
amdgpu_ids.c revision 1.2
      1 /*	$NetBSD: amdgpu_ids.c,v 1.2 2021/12/18 23:44:58 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright 2017 Advanced Micro Devices, Inc.
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the "Software"),
      8  * to deal in the Software without restriction, including without limitation
      9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     10  * and/or sell copies of the Software, and to permit persons to whom the
     11  * Software is furnished to do so, subject to the following conditions:
     12  *
     13  * The above copyright notice and this permission notice shall be included in
     14  * all copies or substantial portions of the Software.
     15  *
     16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     22  * OTHER DEALINGS IN THE SOFTWARE.
     23  *
     24  */
     25 #include <sys/cdefs.h>
     26 __KERNEL_RCSID(0, "$NetBSD: amdgpu_ids.c,v 1.2 2021/12/18 23:44:58 riastradh Exp $");
     27 
     28 #include "amdgpu_ids.h"
     29 
     30 #include <linux/idr.h>
     31 #include <linux/dma-fence-array.h>
     32 
     33 
     34 #include "amdgpu.h"
     35 #include "amdgpu_trace.h"
     36 
     37 /*
     38  * PASID manager
     39  *
     40  * PASIDs are global address space identifiers that can be shared
     41  * between the GPU, an IOMMU and the driver. VMs on different devices
     42  * may use the same PASID if they share the same address
     43  * space. Therefore PASIDs are allocated using a global IDA. VMs are
     44  * looked up from the PASID per amdgpu_device.
     45  */
     46 static DEFINE_IDA(amdgpu_pasid_ida);
     47 
     48 /* Helper to free pasid from a fence callback */
     49 struct amdgpu_pasid_cb {
     50 	struct dma_fence_cb cb;
     51 	unsigned int pasid;
     52 };
     53 
     54 /**
     55  * amdgpu_pasid_alloc - Allocate a PASID
     56  * @bits: Maximum width of the PASID in bits, must be at least 1
     57  *
     58  * Allocates a PASID of the given width while keeping smaller PASIDs
     59  * available if possible.
     60  *
     61  * Returns a positive integer on success. Returns %-EINVAL if bits==0.
     62  * Returns %-ENOSPC if no PASID was available. Returns %-ENOMEM on
     63  * memory allocation failure.
     64  */
     65 int amdgpu_pasid_alloc(unsigned int bits)
     66 {
     67 	int pasid = -EINVAL;
     68 
     69 	for (bits = min(bits, 31U); bits > 0; bits--) {
     70 		pasid = ida_simple_get(&amdgpu_pasid_ida,
     71 				       1U << (bits - 1), 1U << bits,
     72 				       GFP_KERNEL);
     73 		if (pasid != -ENOSPC)
     74 			break;
     75 	}
     76 
     77 	if (pasid >= 0)
     78 		trace_amdgpu_pasid_allocated(pasid);
     79 
     80 	return pasid;
     81 }
     82 
     83 /**
     84  * amdgpu_pasid_free - Free a PASID
     85  * @pasid: PASID to free
     86  */
     87 void amdgpu_pasid_free(unsigned int pasid)
     88 {
     89 	trace_amdgpu_pasid_freed(pasid);
     90 	ida_simple_remove(&amdgpu_pasid_ida, pasid);
     91 }
     92 
     93 static void amdgpu_pasid_free_cb(struct dma_fence *fence,
     94 				 struct dma_fence_cb *_cb)
     95 {
     96 	struct amdgpu_pasid_cb *cb =
     97 		container_of(_cb, struct amdgpu_pasid_cb, cb);
     98 
     99 	amdgpu_pasid_free(cb->pasid);
    100 	dma_fence_put(fence);
    101 	kfree(cb);
    102 }
    103 
    104 /**
    105  * amdgpu_pasid_free_delayed - free pasid when fences signal
    106  *
    107  * @resv: reservation object with the fences to wait for
    108  * @pasid: pasid to free
    109  *
    110  * Free the pasid only after all the fences in resv are signaled.
    111  */
    112 void amdgpu_pasid_free_delayed(struct dma_resv *resv,
    113 			       unsigned int pasid)
    114 {
    115 	struct dma_fence *fence, **fences;
    116 	struct amdgpu_pasid_cb *cb;
    117 	unsigned count;
    118 	int r;
    119 
    120 	r = dma_resv_get_fences_rcu(resv, NULL, &count, &fences);
    121 	if (r)
    122 		goto fallback;
    123 
    124 	if (count == 0) {
    125 		amdgpu_pasid_free(pasid);
    126 		return;
    127 	}
    128 
    129 	if (count == 1) {
    130 		fence = fences[0];
    131 		kfree(fences);
    132 	} else {
    133 		uint64_t context = dma_fence_context_alloc(1);
    134 		struct dma_fence_array *array;
    135 
    136 		array = dma_fence_array_create(count, fences, context,
    137 					       1, false);
    138 		if (!array) {
    139 			kfree(fences);
    140 			goto fallback;
    141 		}
    142 		fence = &array->base;
    143 	}
    144 
    145 	cb = kmalloc(sizeof(*cb), GFP_KERNEL);
    146 	if (!cb) {
    147 		/* Last resort when we are OOM */
    148 		dma_fence_wait(fence, false);
    149 		dma_fence_put(fence);
    150 		amdgpu_pasid_free(pasid);
    151 	} else {
    152 		cb->pasid = pasid;
    153 		if (dma_fence_add_callback(fence, &cb->cb,
    154 					   amdgpu_pasid_free_cb))
    155 			amdgpu_pasid_free_cb(fence, &cb->cb);
    156 	}
    157 
    158 	return;
    159 
    160 fallback:
    161 	/* Not enough memory for the delayed delete, as last resort
    162 	 * block for all the fences to complete.
    163 	 */
    164 	dma_resv_wait_timeout_rcu(resv, true, false,
    165 					    MAX_SCHEDULE_TIMEOUT);
    166 	amdgpu_pasid_free(pasid);
    167 }
    168 
    169 /*
    170  * VMID manager
    171  *
    172  * VMIDs are a per VMHUB identifier for page tables handling.
    173  */
    174 
    175 /**
    176  * amdgpu_vmid_had_gpu_reset - check if reset occured since last use
    177  *
    178  * @adev: amdgpu_device pointer
    179  * @id: VMID structure
    180  *
    181  * Check if GPU reset occured since last use of the VMID.
    182  */
    183 bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
    184 			       struct amdgpu_vmid *id)
    185 {
    186 	return id->current_gpu_reset_count !=
    187 		atomic_read(&adev->gpu_reset_counter);
    188 }
    189 
    190 /**
    191  * amdgpu_vm_grab_idle - grab idle VMID
    192  *
    193  * @vm: vm to allocate id for
    194  * @ring: ring we want to submit job to
    195  * @sync: sync object where we add dependencies
    196  * @idle: resulting idle VMID
    197  *
    198  * Try to find an idle VMID, if none is idle add a fence to wait to the sync
    199  * object. Returns -ENOMEM when we are out of memory.
    200  */
    201 static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
    202 				 struct amdgpu_ring *ring,
    203 				 struct amdgpu_sync *sync,
    204 				 struct amdgpu_vmid **idle)
    205 {
    206 	struct amdgpu_device *adev = ring->adev;
    207 	unsigned vmhub = ring->funcs->vmhub;
    208 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
    209 	struct dma_fence **fences;
    210 	unsigned i;
    211 	int r;
    212 
    213 	if (ring->vmid_wait && !dma_fence_is_signaled(ring->vmid_wait))
    214 		return amdgpu_sync_fence(sync, ring->vmid_wait, false);
    215 
    216 	fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL);
    217 	if (!fences)
    218 		return -ENOMEM;
    219 
    220 	/* Check if we have an idle VMID */
    221 	i = 0;
    222 	list_for_each_entry((*idle), &id_mgr->ids_lru, list) {
    223 		fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, ring);
    224 		if (!fences[i])
    225 			break;
    226 		++i;
    227 	}
    228 
    229 	/* If we can't find a idle VMID to use, wait till one becomes available */
    230 	if (&(*idle)->list == &id_mgr->ids_lru) {
    231 		u64 fence_context = adev->vm_manager.fence_context + ring->idx;
    232 		unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
    233 		struct dma_fence_array *array;
    234 		unsigned j;
    235 
    236 		*idle = NULL;
    237 		for (j = 0; j < i; ++j)
    238 			dma_fence_get(fences[j]);
    239 
    240 		array = dma_fence_array_create(i, fences, fence_context,
    241 					       seqno, true);
    242 		if (!array) {
    243 			for (j = 0; j < i; ++j)
    244 				dma_fence_put(fences[j]);
    245 			kfree(fences);
    246 			return -ENOMEM;
    247 		}
    248 
    249 		r = amdgpu_sync_fence(sync, &array->base, false);
    250 		dma_fence_put(ring->vmid_wait);
    251 		ring->vmid_wait = &array->base;
    252 		return r;
    253 	}
    254 	kfree(fences);
    255 
    256 	return 0;
    257 }
    258 
    259 /**
    260  * amdgpu_vm_grab_reserved - try to assign reserved VMID
    261  *
    262  * @vm: vm to allocate id for
    263  * @ring: ring we want to submit job to
    264  * @sync: sync object where we add dependencies
    265  * @fence: fence protecting ID from reuse
    266  * @job: job who wants to use the VMID
    267  *
    268  * Try to assign a reserved VMID.
    269  */
    270 static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
    271 				     struct amdgpu_ring *ring,
    272 				     struct amdgpu_sync *sync,
    273 				     struct dma_fence *fence,
    274 				     struct amdgpu_job *job,
    275 				     struct amdgpu_vmid **id)
    276 {
    277 	struct amdgpu_device *adev = ring->adev;
    278 	unsigned vmhub = ring->funcs->vmhub;
    279 	uint64_t fence_context = adev->fence_context + ring->idx;
    280 	struct dma_fence *updates = sync->last_vm_update;
    281 	bool needs_flush = vm->use_cpu_for_update;
    282 	int r = 0;
    283 
    284 	*id = vm->reserved_vmid[vmhub];
    285 	if (updates && (*id)->flushed_updates &&
    286 	    updates->context == (*id)->flushed_updates->context &&
    287 	    !dma_fence_is_later(updates, (*id)->flushed_updates))
    288 	    updates = NULL;
    289 
    290 	if ((*id)->owner != vm->direct.fence_context ||
    291 	    job->vm_pd_addr != (*id)->pd_gpu_addr ||
    292 	    updates || !(*id)->last_flush ||
    293 	    ((*id)->last_flush->context != fence_context &&
    294 	     !dma_fence_is_signaled((*id)->last_flush))) {
    295 		struct dma_fence *tmp;
    296 
    297 		/* to prevent one context starved by another context */
    298 		(*id)->pd_gpu_addr = 0;
    299 		tmp = amdgpu_sync_peek_fence(&(*id)->active, ring);
    300 		if (tmp) {
    301 			*id = NULL;
    302 			r = amdgpu_sync_fence(sync, tmp, false);
    303 			return r;
    304 		}
    305 		needs_flush = true;
    306 	}
    307 
    308 	/* Good we can use this VMID. Remember this submission as
    309 	* user of the VMID.
    310 	*/
    311 	r = amdgpu_sync_fence(&(*id)->active, fence, false);
    312 	if (r)
    313 		return r;
    314 
    315 	if (updates) {
    316 		dma_fence_put((*id)->flushed_updates);
    317 		(*id)->flushed_updates = dma_fence_get(updates);
    318 	}
    319 	job->vm_needs_flush = needs_flush;
    320 	return 0;
    321 }
    322 
    323 /**
    324  * amdgpu_vm_grab_used - try to reuse a VMID
    325  *
    326  * @vm: vm to allocate id for
    327  * @ring: ring we want to submit job to
    328  * @sync: sync object where we add dependencies
    329  * @fence: fence protecting ID from reuse
    330  * @job: job who wants to use the VMID
    331  * @id: resulting VMID
    332  *
    333  * Try to reuse a VMID for this submission.
    334  */
    335 static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
    336 				 struct amdgpu_ring *ring,
    337 				 struct amdgpu_sync *sync,
    338 				 struct dma_fence *fence,
    339 				 struct amdgpu_job *job,
    340 				 struct amdgpu_vmid **id)
    341 {
    342 	struct amdgpu_device *adev = ring->adev;
    343 	unsigned vmhub = ring->funcs->vmhub;
    344 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
    345 	uint64_t fence_context = adev->fence_context + ring->idx;
    346 	struct dma_fence *updates = sync->last_vm_update;
    347 	int r;
    348 
    349 	job->vm_needs_flush = vm->use_cpu_for_update;
    350 
    351 	/* Check if we can use a VMID already assigned to this VM */
    352 	list_for_each_entry_reverse((*id), &id_mgr->ids_lru, list) {
    353 		bool needs_flush = vm->use_cpu_for_update;
    354 		struct dma_fence *flushed;
    355 
    356 		/* Check all the prerequisites to using this VMID */
    357 		if ((*id)->owner != vm->direct.fence_context)
    358 			continue;
    359 
    360 		if ((*id)->pd_gpu_addr != job->vm_pd_addr)
    361 			continue;
    362 
    363 		if (!(*id)->last_flush ||
    364 		    ((*id)->last_flush->context != fence_context &&
    365 		     !dma_fence_is_signaled((*id)->last_flush)))
    366 			needs_flush = true;
    367 
    368 		flushed  = (*id)->flushed_updates;
    369 		if (updates && (!flushed || dma_fence_is_later(updates, flushed)))
    370 			needs_flush = true;
    371 
    372 		/* Concurrent flushes are only possible starting with Vega10 and
    373 		 * are broken on Navi10 and Navi14.
    374 		 */
    375 		if (needs_flush && (adev->asic_type < CHIP_VEGA10 ||
    376 				    adev->asic_type == CHIP_NAVI10 ||
    377 				    adev->asic_type == CHIP_NAVI14))
    378 			continue;
    379 
    380 		/* Good, we can use this VMID. Remember this submission as
    381 		 * user of the VMID.
    382 		 */
    383 		r = amdgpu_sync_fence(&(*id)->active, fence, false);
    384 		if (r)
    385 			return r;
    386 
    387 		if (updates && (!flushed || dma_fence_is_later(updates, flushed))) {
    388 			dma_fence_put((*id)->flushed_updates);
    389 			(*id)->flushed_updates = dma_fence_get(updates);
    390 		}
    391 
    392 		job->vm_needs_flush |= needs_flush;
    393 		return 0;
    394 	}
    395 
    396 	*id = NULL;
    397 	return 0;
    398 }
    399 
    400 /**
    401  * amdgpu_vm_grab_id - allocate the next free VMID
    402  *
    403  * @vm: vm to allocate id for
    404  * @ring: ring we want to submit job to
    405  * @sync: sync object where we add dependencies
    406  * @fence: fence protecting ID from reuse
    407  * @job: job who wants to use the VMID
    408  *
    409  * Allocate an id for the vm, adding fences to the sync obj as necessary.
    410  */
    411 int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
    412 		     struct amdgpu_sync *sync, struct dma_fence *fence,
    413 		     struct amdgpu_job *job)
    414 {
    415 	struct amdgpu_device *adev = ring->adev;
    416 	unsigned vmhub = ring->funcs->vmhub;
    417 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
    418 	struct amdgpu_vmid *idle = NULL;
    419 	struct amdgpu_vmid *id = NULL;
    420 	int r = 0;
    421 
    422 	mutex_lock(&id_mgr->lock);
    423 	r = amdgpu_vmid_grab_idle(vm, ring, sync, &idle);
    424 	if (r || !idle)
    425 		goto error;
    426 
    427 	if (vm->reserved_vmid[vmhub]) {
    428 		r = amdgpu_vmid_grab_reserved(vm, ring, sync, fence, job, &id);
    429 		if (r || !id)
    430 			goto error;
    431 	} else {
    432 		r = amdgpu_vmid_grab_used(vm, ring, sync, fence, job, &id);
    433 		if (r)
    434 			goto error;
    435 
    436 		if (!id) {
    437 			struct dma_fence *updates = sync->last_vm_update;
    438 
    439 			/* Still no ID to use? Then use the idle one found earlier */
    440 			id = idle;
    441 
    442 			/* Remember this submission as user of the VMID */
    443 			r = amdgpu_sync_fence(&id->active, fence, false);
    444 			if (r)
    445 				goto error;
    446 
    447 			dma_fence_put(id->flushed_updates);
    448 			id->flushed_updates = dma_fence_get(updates);
    449 			job->vm_needs_flush = true;
    450 		}
    451 
    452 		list_move_tail(&id->list, &id_mgr->ids_lru);
    453 	}
    454 
    455 	id->pd_gpu_addr = job->vm_pd_addr;
    456 	id->owner = vm->direct.fence_context;
    457 
    458 	if (job->vm_needs_flush) {
    459 		dma_fence_put(id->last_flush);
    460 		id->last_flush = NULL;
    461 	}
    462 	job->vmid = id - id_mgr->ids;
    463 	job->pasid = vm->pasid;
    464 	trace_amdgpu_vm_grab_id(vm, ring, job);
    465 
    466 error:
    467 	mutex_unlock(&id_mgr->lock);
    468 	return r;
    469 }
    470 
    471 int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
    472 			       struct amdgpu_vm *vm,
    473 			       unsigned vmhub)
    474 {
    475 	struct amdgpu_vmid_mgr *id_mgr;
    476 	struct amdgpu_vmid *idle;
    477 	int r = 0;
    478 
    479 	id_mgr = &adev->vm_manager.id_mgr[vmhub];
    480 	mutex_lock(&id_mgr->lock);
    481 	if (vm->reserved_vmid[vmhub])
    482 		goto unlock;
    483 	if (atomic_inc_return(&id_mgr->reserved_vmid_num) >
    484 	    AMDGPU_VM_MAX_RESERVED_VMID) {
    485 		DRM_ERROR("Over limitation of reserved vmid\n");
    486 		atomic_dec(&id_mgr->reserved_vmid_num);
    487 		r = -EINVAL;
    488 		goto unlock;
    489 	}
    490 	/* Select the first entry VMID */
    491 	idle = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid, list);
    492 	list_del_init(&idle->list);
    493 	vm->reserved_vmid[vmhub] = idle;
    494 	mutex_unlock(&id_mgr->lock);
    495 
    496 	return 0;
    497 unlock:
    498 	mutex_unlock(&id_mgr->lock);
    499 	return r;
    500 }
    501 
    502 void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
    503 			       struct amdgpu_vm *vm,
    504 			       unsigned vmhub)
    505 {
    506 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
    507 
    508 	mutex_lock(&id_mgr->lock);
    509 	if (vm->reserved_vmid[vmhub]) {
    510 		list_add(&vm->reserved_vmid[vmhub]->list,
    511 			&id_mgr->ids_lru);
    512 		vm->reserved_vmid[vmhub] = NULL;
    513 		atomic_dec(&id_mgr->reserved_vmid_num);
    514 	}
    515 	mutex_unlock(&id_mgr->lock);
    516 }
    517 
    518 /**
    519  * amdgpu_vmid_reset - reset VMID to zero
    520  *
    521  * @adev: amdgpu device structure
    522  * @vmid: vmid number to use
    523  *
    524  * Reset saved GDW, GWS and OA to force switch on next flush.
    525  */
    526 void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub,
    527 		       unsigned vmid)
    528 {
    529 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
    530 	struct amdgpu_vmid *id = &id_mgr->ids[vmid];
    531 
    532 	mutex_lock(&id_mgr->lock);
    533 	id->owner = 0;
    534 	id->gds_base = 0;
    535 	id->gds_size = 0;
    536 	id->gws_base = 0;
    537 	id->gws_size = 0;
    538 	id->oa_base = 0;
    539 	id->oa_size = 0;
    540 	mutex_unlock(&id_mgr->lock);
    541 }
    542 
    543 /**
    544  * amdgpu_vmid_reset_all - reset VMID to zero
    545  *
    546  * @adev: amdgpu device structure
    547  *
    548  * Reset VMID to force flush on next use
    549  */
    550 void amdgpu_vmid_reset_all(struct amdgpu_device *adev)
    551 {
    552 	unsigned i, j;
    553 
    554 	for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
    555 		struct amdgpu_vmid_mgr *id_mgr =
    556 			&adev->vm_manager.id_mgr[i];
    557 
    558 		for (j = 1; j < id_mgr->num_ids; ++j)
    559 			amdgpu_vmid_reset(adev, i, j);
    560 	}
    561 }
    562 
    563 /**
    564  * amdgpu_vmid_mgr_init - init the VMID manager
    565  *
    566  * @adev: amdgpu_device pointer
    567  *
    568  * Initialize the VM manager structures
    569  */
    570 void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
    571 {
    572 	unsigned i, j;
    573 
    574 	for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
    575 		struct amdgpu_vmid_mgr *id_mgr =
    576 			&adev->vm_manager.id_mgr[i];
    577 
    578 		mutex_init(&id_mgr->lock);
    579 		INIT_LIST_HEAD(&id_mgr->ids_lru);
    580 		atomic_set(&id_mgr->reserved_vmid_num, 0);
    581 
    582 		/* skip over VMID 0, since it is the system VM */
    583 		for (j = 1; j < id_mgr->num_ids; ++j) {
    584 			amdgpu_vmid_reset(adev, i, j);
    585 			amdgpu_sync_create(&id_mgr->ids[j].active);
    586 			list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru);
    587 		}
    588 	}
    589 }
    590 
    591 /**
    592  * amdgpu_vmid_mgr_fini - cleanup VM manager
    593  *
    594  * @adev: amdgpu_device pointer
    595  *
    596  * Cleanup the VM manager and free resources.
    597  */
    598 void amdgpu_vmid_mgr_fini(struct amdgpu_device *adev)
    599 {
    600 	unsigned i, j;
    601 
    602 	for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
    603 		struct amdgpu_vmid_mgr *id_mgr =
    604 			&adev->vm_manager.id_mgr[i];
    605 
    606 		mutex_destroy(&id_mgr->lock);
    607 		for (j = 0; j < AMDGPU_NUM_VMID; ++j) {
    608 			struct amdgpu_vmid *id = &id_mgr->ids[j];
    609 
    610 			amdgpu_sync_free(&id->active);
    611 			dma_fence_put(id->flushed_updates);
    612 			dma_fence_put(id->last_flush);
    613 			dma_fence_put(id->pasid_mapping);
    614 		}
    615 	}
    616 }
    617