1 1.3 riastrad /* $NetBSD: amdgpu_ids.c,v 1.3 2021/12/19 12:02:39 riastradh Exp $ */ 2 1.1 riastrad 3 1.1 riastrad /* 4 1.1 riastrad * Copyright 2017 Advanced Micro Devices, Inc. 5 1.1 riastrad * 6 1.1 riastrad * Permission is hereby granted, free of charge, to any person obtaining a 7 1.1 riastrad * copy of this software and associated documentation files (the "Software"), 8 1.1 riastrad * to deal in the Software without restriction, including without limitation 9 1.1 riastrad * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 1.1 riastrad * and/or sell copies of the Software, and to permit persons to whom the 11 1.1 riastrad * Software is furnished to do so, subject to the following conditions: 12 1.1 riastrad * 13 1.1 riastrad * The above copyright notice and this permission notice shall be included in 14 1.1 riastrad * all copies or substantial portions of the Software. 15 1.1 riastrad * 16 1.1 riastrad * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 1.1 riastrad * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 1.1 riastrad * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 1.1 riastrad * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 1.1 riastrad * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 1.1 riastrad * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 1.1 riastrad * OTHER DEALINGS IN THE SOFTWARE. 23 1.1 riastrad * 24 1.1 riastrad */ 25 1.1 riastrad #include <sys/cdefs.h> 26 1.3 riastrad __KERNEL_RCSID(0, "$NetBSD: amdgpu_ids.c,v 1.3 2021/12/19 12:02:39 riastradh Exp $"); 27 1.1 riastrad 28 1.1 riastrad #include "amdgpu_ids.h" 29 1.1 riastrad 30 1.1 riastrad #include <linux/idr.h> 31 1.1 riastrad #include <linux/dma-fence-array.h> 32 1.1 riastrad 33 1.1 riastrad 34 1.1 riastrad #include "amdgpu.h" 35 1.1 riastrad #include "amdgpu_trace.h" 36 1.1 riastrad 37 1.3 riastrad #include <linux/nbsd-namespace.h> 38 1.3 riastrad 39 1.1 riastrad /* 40 1.1 riastrad * PASID manager 41 1.1 riastrad * 42 1.1 riastrad * PASIDs are global address space identifiers that can be shared 43 1.1 riastrad * between the GPU, an IOMMU and the driver. VMs on different devices 44 1.1 riastrad * may use the same PASID if they share the same address 45 1.1 riastrad * space. Therefore PASIDs are allocated using a global IDA. VMs are 46 1.1 riastrad * looked up from the PASID per amdgpu_device. 47 1.1 riastrad */ 48 1.3 riastrad #ifdef __NetBSD__ /* XXX */ 49 1.3 riastrad struct ida amdgpu_pasid_ida; 50 1.3 riastrad #else 51 1.1 riastrad static DEFINE_IDA(amdgpu_pasid_ida); 52 1.3 riastrad #endif 53 1.1 riastrad 54 1.1 riastrad /* Helper to free pasid from a fence callback */ 55 1.1 riastrad struct amdgpu_pasid_cb { 56 1.1 riastrad struct dma_fence_cb cb; 57 1.1 riastrad unsigned int pasid; 58 1.1 riastrad }; 59 1.1 riastrad 60 1.1 riastrad /** 61 1.1 riastrad * amdgpu_pasid_alloc - Allocate a PASID 62 1.1 riastrad * @bits: Maximum width of the PASID in bits, must be at least 1 63 1.1 riastrad * 64 1.1 riastrad * Allocates a PASID of the given width while keeping smaller PASIDs 65 1.1 riastrad * available if possible. 66 1.1 riastrad * 67 1.1 riastrad * Returns a positive integer on success. Returns %-EINVAL if bits==0. 68 1.1 riastrad * Returns %-ENOSPC if no PASID was available. Returns %-ENOMEM on 69 1.1 riastrad * memory allocation failure. 70 1.1 riastrad */ 71 1.1 riastrad int amdgpu_pasid_alloc(unsigned int bits) 72 1.1 riastrad { 73 1.1 riastrad int pasid = -EINVAL; 74 1.1 riastrad 75 1.1 riastrad for (bits = min(bits, 31U); bits > 0; bits--) { 76 1.1 riastrad pasid = ida_simple_get(&amdgpu_pasid_ida, 77 1.1 riastrad 1U << (bits - 1), 1U << bits, 78 1.1 riastrad GFP_KERNEL); 79 1.1 riastrad if (pasid != -ENOSPC) 80 1.1 riastrad break; 81 1.1 riastrad } 82 1.1 riastrad 83 1.1 riastrad if (pasid >= 0) 84 1.1 riastrad trace_amdgpu_pasid_allocated(pasid); 85 1.1 riastrad 86 1.1 riastrad return pasid; 87 1.1 riastrad } 88 1.1 riastrad 89 1.1 riastrad /** 90 1.1 riastrad * amdgpu_pasid_free - Free a PASID 91 1.1 riastrad * @pasid: PASID to free 92 1.1 riastrad */ 93 1.1 riastrad void amdgpu_pasid_free(unsigned int pasid) 94 1.1 riastrad { 95 1.1 riastrad trace_amdgpu_pasid_freed(pasid); 96 1.1 riastrad ida_simple_remove(&amdgpu_pasid_ida, pasid); 97 1.1 riastrad } 98 1.1 riastrad 99 1.1 riastrad static void amdgpu_pasid_free_cb(struct dma_fence *fence, 100 1.1 riastrad struct dma_fence_cb *_cb) 101 1.1 riastrad { 102 1.1 riastrad struct amdgpu_pasid_cb *cb = 103 1.1 riastrad container_of(_cb, struct amdgpu_pasid_cb, cb); 104 1.1 riastrad 105 1.1 riastrad amdgpu_pasid_free(cb->pasid); 106 1.1 riastrad dma_fence_put(fence); 107 1.1 riastrad kfree(cb); 108 1.1 riastrad } 109 1.1 riastrad 110 1.1 riastrad /** 111 1.1 riastrad * amdgpu_pasid_free_delayed - free pasid when fences signal 112 1.1 riastrad * 113 1.1 riastrad * @resv: reservation object with the fences to wait for 114 1.1 riastrad * @pasid: pasid to free 115 1.1 riastrad * 116 1.1 riastrad * Free the pasid only after all the fences in resv are signaled. 117 1.1 riastrad */ 118 1.1 riastrad void amdgpu_pasid_free_delayed(struct dma_resv *resv, 119 1.1 riastrad unsigned int pasid) 120 1.1 riastrad { 121 1.1 riastrad struct dma_fence *fence, **fences; 122 1.1 riastrad struct amdgpu_pasid_cb *cb; 123 1.1 riastrad unsigned count; 124 1.1 riastrad int r; 125 1.1 riastrad 126 1.1 riastrad r = dma_resv_get_fences_rcu(resv, NULL, &count, &fences); 127 1.1 riastrad if (r) 128 1.1 riastrad goto fallback; 129 1.1 riastrad 130 1.1 riastrad if (count == 0) { 131 1.1 riastrad amdgpu_pasid_free(pasid); 132 1.1 riastrad return; 133 1.1 riastrad } 134 1.1 riastrad 135 1.1 riastrad if (count == 1) { 136 1.1 riastrad fence = fences[0]; 137 1.1 riastrad kfree(fences); 138 1.1 riastrad } else { 139 1.1 riastrad uint64_t context = dma_fence_context_alloc(1); 140 1.1 riastrad struct dma_fence_array *array; 141 1.1 riastrad 142 1.1 riastrad array = dma_fence_array_create(count, fences, context, 143 1.1 riastrad 1, false); 144 1.1 riastrad if (!array) { 145 1.1 riastrad kfree(fences); 146 1.1 riastrad goto fallback; 147 1.1 riastrad } 148 1.1 riastrad fence = &array->base; 149 1.1 riastrad } 150 1.1 riastrad 151 1.1 riastrad cb = kmalloc(sizeof(*cb), GFP_KERNEL); 152 1.1 riastrad if (!cb) { 153 1.1 riastrad /* Last resort when we are OOM */ 154 1.1 riastrad dma_fence_wait(fence, false); 155 1.1 riastrad dma_fence_put(fence); 156 1.1 riastrad amdgpu_pasid_free(pasid); 157 1.1 riastrad } else { 158 1.1 riastrad cb->pasid = pasid; 159 1.1 riastrad if (dma_fence_add_callback(fence, &cb->cb, 160 1.1 riastrad amdgpu_pasid_free_cb)) 161 1.1 riastrad amdgpu_pasid_free_cb(fence, &cb->cb); 162 1.1 riastrad } 163 1.1 riastrad 164 1.1 riastrad return; 165 1.1 riastrad 166 1.1 riastrad fallback: 167 1.1 riastrad /* Not enough memory for the delayed delete, as last resort 168 1.1 riastrad * block for all the fences to complete. 169 1.1 riastrad */ 170 1.1 riastrad dma_resv_wait_timeout_rcu(resv, true, false, 171 1.1 riastrad MAX_SCHEDULE_TIMEOUT); 172 1.1 riastrad amdgpu_pasid_free(pasid); 173 1.1 riastrad } 174 1.1 riastrad 175 1.1 riastrad /* 176 1.1 riastrad * VMID manager 177 1.1 riastrad * 178 1.1 riastrad * VMIDs are a per VMHUB identifier for page tables handling. 179 1.1 riastrad */ 180 1.1 riastrad 181 1.1 riastrad /** 182 1.1 riastrad * amdgpu_vmid_had_gpu_reset - check if reset occured since last use 183 1.1 riastrad * 184 1.1 riastrad * @adev: amdgpu_device pointer 185 1.1 riastrad * @id: VMID structure 186 1.1 riastrad * 187 1.1 riastrad * Check if GPU reset occured since last use of the VMID. 188 1.1 riastrad */ 189 1.1 riastrad bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev, 190 1.1 riastrad struct amdgpu_vmid *id) 191 1.1 riastrad { 192 1.1 riastrad return id->current_gpu_reset_count != 193 1.1 riastrad atomic_read(&adev->gpu_reset_counter); 194 1.1 riastrad } 195 1.1 riastrad 196 1.1 riastrad /** 197 1.1 riastrad * amdgpu_vm_grab_idle - grab idle VMID 198 1.1 riastrad * 199 1.1 riastrad * @vm: vm to allocate id for 200 1.1 riastrad * @ring: ring we want to submit job to 201 1.1 riastrad * @sync: sync object where we add dependencies 202 1.1 riastrad * @idle: resulting idle VMID 203 1.1 riastrad * 204 1.1 riastrad * Try to find an idle VMID, if none is idle add a fence to wait to the sync 205 1.1 riastrad * object. Returns -ENOMEM when we are out of memory. 206 1.1 riastrad */ 207 1.1 riastrad static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm, 208 1.1 riastrad struct amdgpu_ring *ring, 209 1.1 riastrad struct amdgpu_sync *sync, 210 1.1 riastrad struct amdgpu_vmid **idle) 211 1.1 riastrad { 212 1.1 riastrad struct amdgpu_device *adev = ring->adev; 213 1.1 riastrad unsigned vmhub = ring->funcs->vmhub; 214 1.1 riastrad struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; 215 1.1 riastrad struct dma_fence **fences; 216 1.1 riastrad unsigned i; 217 1.1 riastrad int r; 218 1.1 riastrad 219 1.1 riastrad if (ring->vmid_wait && !dma_fence_is_signaled(ring->vmid_wait)) 220 1.1 riastrad return amdgpu_sync_fence(sync, ring->vmid_wait, false); 221 1.1 riastrad 222 1.1 riastrad fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL); 223 1.1 riastrad if (!fences) 224 1.1 riastrad return -ENOMEM; 225 1.1 riastrad 226 1.1 riastrad /* Check if we have an idle VMID */ 227 1.1 riastrad i = 0; 228 1.1 riastrad list_for_each_entry((*idle), &id_mgr->ids_lru, list) { 229 1.1 riastrad fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, ring); 230 1.1 riastrad if (!fences[i]) 231 1.1 riastrad break; 232 1.1 riastrad ++i; 233 1.1 riastrad } 234 1.1 riastrad 235 1.1 riastrad /* If we can't find a idle VMID to use, wait till one becomes available */ 236 1.1 riastrad if (&(*idle)->list == &id_mgr->ids_lru) { 237 1.1 riastrad u64 fence_context = adev->vm_manager.fence_context + ring->idx; 238 1.1 riastrad unsigned seqno = ++adev->vm_manager.seqno[ring->idx]; 239 1.1 riastrad struct dma_fence_array *array; 240 1.1 riastrad unsigned j; 241 1.1 riastrad 242 1.1 riastrad *idle = NULL; 243 1.1 riastrad for (j = 0; j < i; ++j) 244 1.1 riastrad dma_fence_get(fences[j]); 245 1.1 riastrad 246 1.1 riastrad array = dma_fence_array_create(i, fences, fence_context, 247 1.1 riastrad seqno, true); 248 1.1 riastrad if (!array) { 249 1.1 riastrad for (j = 0; j < i; ++j) 250 1.1 riastrad dma_fence_put(fences[j]); 251 1.1 riastrad kfree(fences); 252 1.1 riastrad return -ENOMEM; 253 1.1 riastrad } 254 1.1 riastrad 255 1.1 riastrad r = amdgpu_sync_fence(sync, &array->base, false); 256 1.1 riastrad dma_fence_put(ring->vmid_wait); 257 1.1 riastrad ring->vmid_wait = &array->base; 258 1.1 riastrad return r; 259 1.1 riastrad } 260 1.1 riastrad kfree(fences); 261 1.1 riastrad 262 1.1 riastrad return 0; 263 1.1 riastrad } 264 1.1 riastrad 265 1.1 riastrad /** 266 1.1 riastrad * amdgpu_vm_grab_reserved - try to assign reserved VMID 267 1.1 riastrad * 268 1.1 riastrad * @vm: vm to allocate id for 269 1.1 riastrad * @ring: ring we want to submit job to 270 1.1 riastrad * @sync: sync object where we add dependencies 271 1.1 riastrad * @fence: fence protecting ID from reuse 272 1.1 riastrad * @job: job who wants to use the VMID 273 1.1 riastrad * 274 1.1 riastrad * Try to assign a reserved VMID. 275 1.1 riastrad */ 276 1.1 riastrad static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm, 277 1.1 riastrad struct amdgpu_ring *ring, 278 1.1 riastrad struct amdgpu_sync *sync, 279 1.1 riastrad struct dma_fence *fence, 280 1.1 riastrad struct amdgpu_job *job, 281 1.1 riastrad struct amdgpu_vmid **id) 282 1.1 riastrad { 283 1.1 riastrad struct amdgpu_device *adev = ring->adev; 284 1.1 riastrad unsigned vmhub = ring->funcs->vmhub; 285 1.1 riastrad uint64_t fence_context = adev->fence_context + ring->idx; 286 1.1 riastrad struct dma_fence *updates = sync->last_vm_update; 287 1.1 riastrad bool needs_flush = vm->use_cpu_for_update; 288 1.1 riastrad int r = 0; 289 1.1 riastrad 290 1.1 riastrad *id = vm->reserved_vmid[vmhub]; 291 1.1 riastrad if (updates && (*id)->flushed_updates && 292 1.1 riastrad updates->context == (*id)->flushed_updates->context && 293 1.1 riastrad !dma_fence_is_later(updates, (*id)->flushed_updates)) 294 1.1 riastrad updates = NULL; 295 1.1 riastrad 296 1.1 riastrad if ((*id)->owner != vm->direct.fence_context || 297 1.1 riastrad job->vm_pd_addr != (*id)->pd_gpu_addr || 298 1.1 riastrad updates || !(*id)->last_flush || 299 1.1 riastrad ((*id)->last_flush->context != fence_context && 300 1.1 riastrad !dma_fence_is_signaled((*id)->last_flush))) { 301 1.1 riastrad struct dma_fence *tmp; 302 1.1 riastrad 303 1.1 riastrad /* to prevent one context starved by another context */ 304 1.1 riastrad (*id)->pd_gpu_addr = 0; 305 1.1 riastrad tmp = amdgpu_sync_peek_fence(&(*id)->active, ring); 306 1.1 riastrad if (tmp) { 307 1.1 riastrad *id = NULL; 308 1.1 riastrad r = amdgpu_sync_fence(sync, tmp, false); 309 1.1 riastrad return r; 310 1.1 riastrad } 311 1.1 riastrad needs_flush = true; 312 1.1 riastrad } 313 1.1 riastrad 314 1.1 riastrad /* Good we can use this VMID. Remember this submission as 315 1.1 riastrad * user of the VMID. 316 1.1 riastrad */ 317 1.1 riastrad r = amdgpu_sync_fence(&(*id)->active, fence, false); 318 1.1 riastrad if (r) 319 1.1 riastrad return r; 320 1.1 riastrad 321 1.1 riastrad if (updates) { 322 1.1 riastrad dma_fence_put((*id)->flushed_updates); 323 1.1 riastrad (*id)->flushed_updates = dma_fence_get(updates); 324 1.1 riastrad } 325 1.1 riastrad job->vm_needs_flush = needs_flush; 326 1.1 riastrad return 0; 327 1.1 riastrad } 328 1.1 riastrad 329 1.1 riastrad /** 330 1.1 riastrad * amdgpu_vm_grab_used - try to reuse a VMID 331 1.1 riastrad * 332 1.1 riastrad * @vm: vm to allocate id for 333 1.1 riastrad * @ring: ring we want to submit job to 334 1.1 riastrad * @sync: sync object where we add dependencies 335 1.1 riastrad * @fence: fence protecting ID from reuse 336 1.1 riastrad * @job: job who wants to use the VMID 337 1.1 riastrad * @id: resulting VMID 338 1.1 riastrad * 339 1.1 riastrad * Try to reuse a VMID for this submission. 340 1.1 riastrad */ 341 1.1 riastrad static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm, 342 1.1 riastrad struct amdgpu_ring *ring, 343 1.1 riastrad struct amdgpu_sync *sync, 344 1.1 riastrad struct dma_fence *fence, 345 1.1 riastrad struct amdgpu_job *job, 346 1.1 riastrad struct amdgpu_vmid **id) 347 1.1 riastrad { 348 1.1 riastrad struct amdgpu_device *adev = ring->adev; 349 1.1 riastrad unsigned vmhub = ring->funcs->vmhub; 350 1.1 riastrad struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; 351 1.1 riastrad uint64_t fence_context = adev->fence_context + ring->idx; 352 1.1 riastrad struct dma_fence *updates = sync->last_vm_update; 353 1.1 riastrad int r; 354 1.1 riastrad 355 1.1 riastrad job->vm_needs_flush = vm->use_cpu_for_update; 356 1.1 riastrad 357 1.1 riastrad /* Check if we can use a VMID already assigned to this VM */ 358 1.1 riastrad list_for_each_entry_reverse((*id), &id_mgr->ids_lru, list) { 359 1.1 riastrad bool needs_flush = vm->use_cpu_for_update; 360 1.1 riastrad struct dma_fence *flushed; 361 1.1 riastrad 362 1.1 riastrad /* Check all the prerequisites to using this VMID */ 363 1.1 riastrad if ((*id)->owner != vm->direct.fence_context) 364 1.1 riastrad continue; 365 1.1 riastrad 366 1.1 riastrad if ((*id)->pd_gpu_addr != job->vm_pd_addr) 367 1.1 riastrad continue; 368 1.1 riastrad 369 1.1 riastrad if (!(*id)->last_flush || 370 1.1 riastrad ((*id)->last_flush->context != fence_context && 371 1.1 riastrad !dma_fence_is_signaled((*id)->last_flush))) 372 1.1 riastrad needs_flush = true; 373 1.1 riastrad 374 1.1 riastrad flushed = (*id)->flushed_updates; 375 1.1 riastrad if (updates && (!flushed || dma_fence_is_later(updates, flushed))) 376 1.1 riastrad needs_flush = true; 377 1.1 riastrad 378 1.1 riastrad /* Concurrent flushes are only possible starting with Vega10 and 379 1.1 riastrad * are broken on Navi10 and Navi14. 380 1.1 riastrad */ 381 1.1 riastrad if (needs_flush && (adev->asic_type < CHIP_VEGA10 || 382 1.1 riastrad adev->asic_type == CHIP_NAVI10 || 383 1.1 riastrad adev->asic_type == CHIP_NAVI14)) 384 1.1 riastrad continue; 385 1.1 riastrad 386 1.1 riastrad /* Good, we can use this VMID. Remember this submission as 387 1.1 riastrad * user of the VMID. 388 1.1 riastrad */ 389 1.1 riastrad r = amdgpu_sync_fence(&(*id)->active, fence, false); 390 1.1 riastrad if (r) 391 1.1 riastrad return r; 392 1.1 riastrad 393 1.1 riastrad if (updates && (!flushed || dma_fence_is_later(updates, flushed))) { 394 1.1 riastrad dma_fence_put((*id)->flushed_updates); 395 1.1 riastrad (*id)->flushed_updates = dma_fence_get(updates); 396 1.1 riastrad } 397 1.1 riastrad 398 1.1 riastrad job->vm_needs_flush |= needs_flush; 399 1.1 riastrad return 0; 400 1.1 riastrad } 401 1.1 riastrad 402 1.1 riastrad *id = NULL; 403 1.1 riastrad return 0; 404 1.1 riastrad } 405 1.1 riastrad 406 1.1 riastrad /** 407 1.1 riastrad * amdgpu_vm_grab_id - allocate the next free VMID 408 1.1 riastrad * 409 1.1 riastrad * @vm: vm to allocate id for 410 1.1 riastrad * @ring: ring we want to submit job to 411 1.1 riastrad * @sync: sync object where we add dependencies 412 1.1 riastrad * @fence: fence protecting ID from reuse 413 1.1 riastrad * @job: job who wants to use the VMID 414 1.1 riastrad * 415 1.1 riastrad * Allocate an id for the vm, adding fences to the sync obj as necessary. 416 1.1 riastrad */ 417 1.1 riastrad int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, 418 1.1 riastrad struct amdgpu_sync *sync, struct dma_fence *fence, 419 1.1 riastrad struct amdgpu_job *job) 420 1.1 riastrad { 421 1.1 riastrad struct amdgpu_device *adev = ring->adev; 422 1.1 riastrad unsigned vmhub = ring->funcs->vmhub; 423 1.1 riastrad struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; 424 1.1 riastrad struct amdgpu_vmid *idle = NULL; 425 1.1 riastrad struct amdgpu_vmid *id = NULL; 426 1.1 riastrad int r = 0; 427 1.1 riastrad 428 1.1 riastrad mutex_lock(&id_mgr->lock); 429 1.1 riastrad r = amdgpu_vmid_grab_idle(vm, ring, sync, &idle); 430 1.1 riastrad if (r || !idle) 431 1.1 riastrad goto error; 432 1.1 riastrad 433 1.1 riastrad if (vm->reserved_vmid[vmhub]) { 434 1.1 riastrad r = amdgpu_vmid_grab_reserved(vm, ring, sync, fence, job, &id); 435 1.1 riastrad if (r || !id) 436 1.1 riastrad goto error; 437 1.1 riastrad } else { 438 1.1 riastrad r = amdgpu_vmid_grab_used(vm, ring, sync, fence, job, &id); 439 1.1 riastrad if (r) 440 1.1 riastrad goto error; 441 1.1 riastrad 442 1.1 riastrad if (!id) { 443 1.1 riastrad struct dma_fence *updates = sync->last_vm_update; 444 1.1 riastrad 445 1.1 riastrad /* Still no ID to use? Then use the idle one found earlier */ 446 1.1 riastrad id = idle; 447 1.1 riastrad 448 1.1 riastrad /* Remember this submission as user of the VMID */ 449 1.1 riastrad r = amdgpu_sync_fence(&id->active, fence, false); 450 1.1 riastrad if (r) 451 1.1 riastrad goto error; 452 1.1 riastrad 453 1.1 riastrad dma_fence_put(id->flushed_updates); 454 1.1 riastrad id->flushed_updates = dma_fence_get(updates); 455 1.1 riastrad job->vm_needs_flush = true; 456 1.1 riastrad } 457 1.1 riastrad 458 1.1 riastrad list_move_tail(&id->list, &id_mgr->ids_lru); 459 1.1 riastrad } 460 1.1 riastrad 461 1.1 riastrad id->pd_gpu_addr = job->vm_pd_addr; 462 1.1 riastrad id->owner = vm->direct.fence_context; 463 1.1 riastrad 464 1.1 riastrad if (job->vm_needs_flush) { 465 1.1 riastrad dma_fence_put(id->last_flush); 466 1.1 riastrad id->last_flush = NULL; 467 1.1 riastrad } 468 1.1 riastrad job->vmid = id - id_mgr->ids; 469 1.1 riastrad job->pasid = vm->pasid; 470 1.1 riastrad trace_amdgpu_vm_grab_id(vm, ring, job); 471 1.1 riastrad 472 1.1 riastrad error: 473 1.1 riastrad mutex_unlock(&id_mgr->lock); 474 1.1 riastrad return r; 475 1.1 riastrad } 476 1.1 riastrad 477 1.1 riastrad int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev, 478 1.1 riastrad struct amdgpu_vm *vm, 479 1.1 riastrad unsigned vmhub) 480 1.1 riastrad { 481 1.1 riastrad struct amdgpu_vmid_mgr *id_mgr; 482 1.1 riastrad struct amdgpu_vmid *idle; 483 1.1 riastrad int r = 0; 484 1.1 riastrad 485 1.1 riastrad id_mgr = &adev->vm_manager.id_mgr[vmhub]; 486 1.1 riastrad mutex_lock(&id_mgr->lock); 487 1.1 riastrad if (vm->reserved_vmid[vmhub]) 488 1.1 riastrad goto unlock; 489 1.1 riastrad if (atomic_inc_return(&id_mgr->reserved_vmid_num) > 490 1.1 riastrad AMDGPU_VM_MAX_RESERVED_VMID) { 491 1.1 riastrad DRM_ERROR("Over limitation of reserved vmid\n"); 492 1.1 riastrad atomic_dec(&id_mgr->reserved_vmid_num); 493 1.1 riastrad r = -EINVAL; 494 1.1 riastrad goto unlock; 495 1.1 riastrad } 496 1.1 riastrad /* Select the first entry VMID */ 497 1.1 riastrad idle = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid, list); 498 1.1 riastrad list_del_init(&idle->list); 499 1.1 riastrad vm->reserved_vmid[vmhub] = idle; 500 1.1 riastrad mutex_unlock(&id_mgr->lock); 501 1.1 riastrad 502 1.1 riastrad return 0; 503 1.1 riastrad unlock: 504 1.1 riastrad mutex_unlock(&id_mgr->lock); 505 1.1 riastrad return r; 506 1.1 riastrad } 507 1.1 riastrad 508 1.1 riastrad void amdgpu_vmid_free_reserved(struct amdgpu_device *adev, 509 1.1 riastrad struct amdgpu_vm *vm, 510 1.1 riastrad unsigned vmhub) 511 1.1 riastrad { 512 1.1 riastrad struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; 513 1.1 riastrad 514 1.1 riastrad mutex_lock(&id_mgr->lock); 515 1.1 riastrad if (vm->reserved_vmid[vmhub]) { 516 1.1 riastrad list_add(&vm->reserved_vmid[vmhub]->list, 517 1.1 riastrad &id_mgr->ids_lru); 518 1.1 riastrad vm->reserved_vmid[vmhub] = NULL; 519 1.1 riastrad atomic_dec(&id_mgr->reserved_vmid_num); 520 1.1 riastrad } 521 1.1 riastrad mutex_unlock(&id_mgr->lock); 522 1.1 riastrad } 523 1.1 riastrad 524 1.1 riastrad /** 525 1.1 riastrad * amdgpu_vmid_reset - reset VMID to zero 526 1.1 riastrad * 527 1.1 riastrad * @adev: amdgpu device structure 528 1.1 riastrad * @vmid: vmid number to use 529 1.1 riastrad * 530 1.1 riastrad * Reset saved GDW, GWS and OA to force switch on next flush. 531 1.1 riastrad */ 532 1.1 riastrad void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub, 533 1.1 riastrad unsigned vmid) 534 1.1 riastrad { 535 1.1 riastrad struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; 536 1.1 riastrad struct amdgpu_vmid *id = &id_mgr->ids[vmid]; 537 1.1 riastrad 538 1.1 riastrad mutex_lock(&id_mgr->lock); 539 1.1 riastrad id->owner = 0; 540 1.1 riastrad id->gds_base = 0; 541 1.1 riastrad id->gds_size = 0; 542 1.1 riastrad id->gws_base = 0; 543 1.1 riastrad id->gws_size = 0; 544 1.1 riastrad id->oa_base = 0; 545 1.1 riastrad id->oa_size = 0; 546 1.1 riastrad mutex_unlock(&id_mgr->lock); 547 1.1 riastrad } 548 1.1 riastrad 549 1.1 riastrad /** 550 1.1 riastrad * amdgpu_vmid_reset_all - reset VMID to zero 551 1.1 riastrad * 552 1.1 riastrad * @adev: amdgpu device structure 553 1.1 riastrad * 554 1.1 riastrad * Reset VMID to force flush on next use 555 1.1 riastrad */ 556 1.1 riastrad void amdgpu_vmid_reset_all(struct amdgpu_device *adev) 557 1.1 riastrad { 558 1.1 riastrad unsigned i, j; 559 1.1 riastrad 560 1.1 riastrad for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { 561 1.1 riastrad struct amdgpu_vmid_mgr *id_mgr = 562 1.1 riastrad &adev->vm_manager.id_mgr[i]; 563 1.1 riastrad 564 1.1 riastrad for (j = 1; j < id_mgr->num_ids; ++j) 565 1.1 riastrad amdgpu_vmid_reset(adev, i, j); 566 1.1 riastrad } 567 1.1 riastrad } 568 1.1 riastrad 569 1.1 riastrad /** 570 1.1 riastrad * amdgpu_vmid_mgr_init - init the VMID manager 571 1.1 riastrad * 572 1.1 riastrad * @adev: amdgpu_device pointer 573 1.1 riastrad * 574 1.1 riastrad * Initialize the VM manager structures 575 1.1 riastrad */ 576 1.1 riastrad void amdgpu_vmid_mgr_init(struct amdgpu_device *adev) 577 1.1 riastrad { 578 1.1 riastrad unsigned i, j; 579 1.1 riastrad 580 1.1 riastrad for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { 581 1.1 riastrad struct amdgpu_vmid_mgr *id_mgr = 582 1.1 riastrad &adev->vm_manager.id_mgr[i]; 583 1.1 riastrad 584 1.1 riastrad mutex_init(&id_mgr->lock); 585 1.1 riastrad INIT_LIST_HEAD(&id_mgr->ids_lru); 586 1.1 riastrad atomic_set(&id_mgr->reserved_vmid_num, 0); 587 1.1 riastrad 588 1.1 riastrad /* skip over VMID 0, since it is the system VM */ 589 1.1 riastrad for (j = 1; j < id_mgr->num_ids; ++j) { 590 1.1 riastrad amdgpu_vmid_reset(adev, i, j); 591 1.1 riastrad amdgpu_sync_create(&id_mgr->ids[j].active); 592 1.1 riastrad list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru); 593 1.1 riastrad } 594 1.1 riastrad } 595 1.1 riastrad } 596 1.1 riastrad 597 1.1 riastrad /** 598 1.1 riastrad * amdgpu_vmid_mgr_fini - cleanup VM manager 599 1.1 riastrad * 600 1.1 riastrad * @adev: amdgpu_device pointer 601 1.1 riastrad * 602 1.1 riastrad * Cleanup the VM manager and free resources. 603 1.1 riastrad */ 604 1.1 riastrad void amdgpu_vmid_mgr_fini(struct amdgpu_device *adev) 605 1.1 riastrad { 606 1.1 riastrad unsigned i, j; 607 1.1 riastrad 608 1.1 riastrad for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { 609 1.1 riastrad struct amdgpu_vmid_mgr *id_mgr = 610 1.1 riastrad &adev->vm_manager.id_mgr[i]; 611 1.1 riastrad 612 1.1 riastrad mutex_destroy(&id_mgr->lock); 613 1.1 riastrad for (j = 0; j < AMDGPU_NUM_VMID; ++j) { 614 1.1 riastrad struct amdgpu_vmid *id = &id_mgr->ids[j]; 615 1.1 riastrad 616 1.1 riastrad amdgpu_sync_free(&id->active); 617 1.1 riastrad dma_fence_put(id->flushed_updates); 618 1.1 riastrad dma_fence_put(id->last_flush); 619 1.1 riastrad dma_fence_put(id->pasid_mapping); 620 1.1 riastrad } 621 1.1 riastrad } 622 1.1 riastrad } 623