1 1.7 riastrad /* $NetBSD: amdgpu_cs.c,v 1.7 2021/12/19 12:02:39 riastradh Exp $ */ 2 1.1 riastrad 3 1.1 riastrad /* 4 1.1 riastrad * Copyright 2008 Jerome Glisse. 5 1.1 riastrad * All Rights Reserved. 6 1.1 riastrad * 7 1.1 riastrad * Permission is hereby granted, free of charge, to any person obtaining a 8 1.1 riastrad * copy of this software and associated documentation files (the "Software"), 9 1.1 riastrad * to deal in the Software without restriction, including without limitation 10 1.1 riastrad * the rights to use, copy, modify, merge, publish, distribute, sublicense, 11 1.1 riastrad * and/or sell copies of the Software, and to permit persons to whom the 12 1.1 riastrad * Software is furnished to do so, subject to the following conditions: 13 1.1 riastrad * 14 1.1 riastrad * The above copyright notice and this permission notice (including the next 15 1.1 riastrad * paragraph) shall be included in all copies or substantial portions of the 16 1.1 riastrad * Software. 17 1.1 riastrad * 18 1.1 riastrad * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 1.1 riastrad * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 1.1 riastrad * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 21 1.1 riastrad * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 22 1.1 riastrad * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 23 1.1 riastrad * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 24 1.1 riastrad * DEALINGS IN THE SOFTWARE. 25 1.1 riastrad * 26 1.1 riastrad * Authors: 27 1.1 riastrad * Jerome Glisse <glisse (at) freedesktop.org> 28 1.1 riastrad */ 29 1.5 riastrad 30 1.1 riastrad #include <sys/cdefs.h> 31 1.7 riastrad __KERNEL_RCSID(0, "$NetBSD: amdgpu_cs.c,v 1.7 2021/12/19 12:02:39 riastradh Exp $"); 32 1.5 riastrad 33 1.5 riastrad #include <linux/file.h> 34 1.5 riastrad #include <linux/pagemap.h> 35 1.5 riastrad #include <linux/sync_file.h> 36 1.1 riastrad 37 1.1 riastrad #include <drm/amdgpu_drm.h> 38 1.5 riastrad #include <drm/drm_syncobj.h> 39 1.1 riastrad #include "amdgpu.h" 40 1.1 riastrad #include "amdgpu_trace.h" 41 1.5 riastrad #include "amdgpu_gmc.h" 42 1.5 riastrad #include "amdgpu_gem.h" 43 1.5 riastrad #include "amdgpu_ras.h" 44 1.1 riastrad 45 1.5 riastrad static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p, 46 1.5 riastrad struct drm_amdgpu_cs_chunk_fence *data, 47 1.5 riastrad uint32_t *offset) 48 1.1 riastrad { 49 1.5 riastrad struct drm_gem_object *gobj; 50 1.5 riastrad struct amdgpu_bo *bo; 51 1.5 riastrad unsigned long size; 52 1.5 riastrad int r; 53 1.1 riastrad 54 1.5 riastrad gobj = drm_gem_object_lookup(p->filp, data->handle); 55 1.5 riastrad if (gobj == NULL) 56 1.5 riastrad return -EINVAL; 57 1.1 riastrad 58 1.5 riastrad bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); 59 1.5 riastrad p->uf_entry.priority = 0; 60 1.5 riastrad p->uf_entry.tv.bo = &bo->tbo; 61 1.5 riastrad /* One for TTM and one for the CS job */ 62 1.5 riastrad p->uf_entry.tv.num_shared = 2; 63 1.1 riastrad 64 1.5 riastrad drm_gem_object_put_unlocked(gobj); 65 1.1 riastrad 66 1.5 riastrad size = amdgpu_bo_size(bo); 67 1.5 riastrad if (size != PAGE_SIZE || (data->offset + 8) > size) { 68 1.5 riastrad r = -EINVAL; 69 1.5 riastrad goto error_unref; 70 1.1 riastrad } 71 1.1 riastrad 72 1.5 riastrad if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) { 73 1.5 riastrad r = -EINVAL; 74 1.5 riastrad goto error_unref; 75 1.1 riastrad } 76 1.1 riastrad 77 1.5 riastrad *offset = data->offset; 78 1.1 riastrad 79 1.1 riastrad return 0; 80 1.5 riastrad 81 1.5 riastrad error_unref: 82 1.5 riastrad amdgpu_bo_unref(&bo); 83 1.5 riastrad return r; 84 1.1 riastrad } 85 1.1 riastrad 86 1.5 riastrad static int amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser *p, 87 1.5 riastrad struct drm_amdgpu_bo_list_in *data) 88 1.1 riastrad { 89 1.5 riastrad int r; 90 1.5 riastrad struct drm_amdgpu_bo_list_entry *info = NULL; 91 1.1 riastrad 92 1.5 riastrad r = amdgpu_bo_create_list_entry_array(data, &info); 93 1.5 riastrad if (r) 94 1.5 riastrad return r; 95 1.1 riastrad 96 1.5 riastrad r = amdgpu_bo_list_create(p->adev, p->filp, info, data->bo_number, 97 1.5 riastrad &p->bo_list); 98 1.5 riastrad if (r) 99 1.5 riastrad goto error_free; 100 1.1 riastrad 101 1.5 riastrad kvfree(info); 102 1.5 riastrad return 0; 103 1.1 riastrad 104 1.5 riastrad error_free: 105 1.5 riastrad if (info) 106 1.5 riastrad kvfree(info); 107 1.1 riastrad 108 1.5 riastrad return r; 109 1.1 riastrad } 110 1.1 riastrad 111 1.5 riastrad static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs *cs) 112 1.1 riastrad { 113 1.5 riastrad struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 114 1.5 riastrad struct amdgpu_vm *vm = &fpriv->vm; 115 1.1 riastrad uint64_t *chunk_array_user; 116 1.1 riastrad uint64_t *chunk_array; 117 1.5 riastrad unsigned size, num_ibs = 0; 118 1.5 riastrad uint32_t uf_offset = 0; 119 1.1 riastrad int i; 120 1.1 riastrad int ret; 121 1.1 riastrad 122 1.1 riastrad if (cs->in.num_chunks == 0) 123 1.1 riastrad return 0; 124 1.1 riastrad 125 1.1 riastrad chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL); 126 1.1 riastrad if (!chunk_array) 127 1.1 riastrad return -ENOMEM; 128 1.1 riastrad 129 1.1 riastrad p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id); 130 1.1 riastrad if (!p->ctx) { 131 1.1 riastrad ret = -EINVAL; 132 1.1 riastrad goto free_chunk; 133 1.1 riastrad } 134 1.1 riastrad 135 1.5 riastrad mutex_lock(&p->ctx->lock); 136 1.5 riastrad 137 1.5 riastrad /* skip guilty context job */ 138 1.5 riastrad if (atomic_read(&p->ctx->guilty) == 1) { 139 1.5 riastrad ret = -ECANCELED; 140 1.5 riastrad goto free_chunk; 141 1.5 riastrad } 142 1.1 riastrad 143 1.1 riastrad /* get chunks */ 144 1.5 riastrad chunk_array_user = u64_to_user_ptr(cs->in.chunks); 145 1.1 riastrad if (copy_from_user(chunk_array, chunk_array_user, 146 1.1 riastrad sizeof(uint64_t)*cs->in.num_chunks)) { 147 1.1 riastrad ret = -EFAULT; 148 1.5 riastrad goto free_chunk; 149 1.1 riastrad } 150 1.1 riastrad 151 1.1 riastrad p->nchunks = cs->in.num_chunks; 152 1.1 riastrad p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk), 153 1.1 riastrad GFP_KERNEL); 154 1.1 riastrad if (!p->chunks) { 155 1.1 riastrad ret = -ENOMEM; 156 1.5 riastrad goto free_chunk; 157 1.1 riastrad } 158 1.1 riastrad 159 1.1 riastrad for (i = 0; i < p->nchunks; i++) { 160 1.1 riastrad struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL; 161 1.1 riastrad struct drm_amdgpu_cs_chunk user_chunk; 162 1.1 riastrad uint32_t __user *cdata; 163 1.1 riastrad 164 1.5 riastrad chunk_ptr = u64_to_user_ptr(chunk_array[i]); 165 1.1 riastrad if (copy_from_user(&user_chunk, chunk_ptr, 166 1.1 riastrad sizeof(struct drm_amdgpu_cs_chunk))) { 167 1.1 riastrad ret = -EFAULT; 168 1.1 riastrad i--; 169 1.1 riastrad goto free_partial_kdata; 170 1.1 riastrad } 171 1.1 riastrad p->chunks[i].chunk_id = user_chunk.chunk_id; 172 1.1 riastrad p->chunks[i].length_dw = user_chunk.length_dw; 173 1.1 riastrad 174 1.1 riastrad size = p->chunks[i].length_dw; 175 1.5 riastrad cdata = u64_to_user_ptr(user_chunk.chunk_data); 176 1.1 riastrad 177 1.5 riastrad p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL); 178 1.1 riastrad if (p->chunks[i].kdata == NULL) { 179 1.1 riastrad ret = -ENOMEM; 180 1.1 riastrad i--; 181 1.1 riastrad goto free_partial_kdata; 182 1.1 riastrad } 183 1.1 riastrad size *= sizeof(uint32_t); 184 1.1 riastrad if (copy_from_user(p->chunks[i].kdata, cdata, size)) { 185 1.1 riastrad ret = -EFAULT; 186 1.1 riastrad goto free_partial_kdata; 187 1.1 riastrad } 188 1.1 riastrad 189 1.1 riastrad switch (p->chunks[i].chunk_id) { 190 1.1 riastrad case AMDGPU_CHUNK_ID_IB: 191 1.5 riastrad ++num_ibs; 192 1.1 riastrad break; 193 1.1 riastrad 194 1.1 riastrad case AMDGPU_CHUNK_ID_FENCE: 195 1.1 riastrad size = sizeof(struct drm_amdgpu_cs_chunk_fence); 196 1.1 riastrad if (p->chunks[i].length_dw * sizeof(uint32_t) < size) { 197 1.1 riastrad ret = -EINVAL; 198 1.1 riastrad goto free_partial_kdata; 199 1.1 riastrad } 200 1.1 riastrad 201 1.5 riastrad ret = amdgpu_cs_user_fence_chunk(p, p->chunks[i].kdata, 202 1.5 riastrad &uf_offset); 203 1.5 riastrad if (ret) 204 1.5 riastrad goto free_partial_kdata; 205 1.5 riastrad 206 1.5 riastrad break; 207 1.5 riastrad 208 1.5 riastrad case AMDGPU_CHUNK_ID_BO_HANDLES: 209 1.5 riastrad size = sizeof(struct drm_amdgpu_bo_list_in); 210 1.5 riastrad if (p->chunks[i].length_dw * sizeof(uint32_t) < size) { 211 1.5 riastrad ret = -EINVAL; 212 1.5 riastrad goto free_partial_kdata; 213 1.5 riastrad } 214 1.5 riastrad 215 1.5 riastrad ret = amdgpu_cs_bo_handles_chunk(p, p->chunks[i].kdata); 216 1.1 riastrad if (ret) 217 1.1 riastrad goto free_partial_kdata; 218 1.1 riastrad 219 1.1 riastrad break; 220 1.1 riastrad 221 1.1 riastrad case AMDGPU_CHUNK_ID_DEPENDENCIES: 222 1.5 riastrad case AMDGPU_CHUNK_ID_SYNCOBJ_IN: 223 1.5 riastrad case AMDGPU_CHUNK_ID_SYNCOBJ_OUT: 224 1.5 riastrad case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES: 225 1.5 riastrad case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT: 226 1.5 riastrad case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL: 227 1.1 riastrad break; 228 1.1 riastrad 229 1.1 riastrad default: 230 1.1 riastrad ret = -EINVAL; 231 1.1 riastrad goto free_partial_kdata; 232 1.1 riastrad } 233 1.1 riastrad } 234 1.1 riastrad 235 1.5 riastrad ret = amdgpu_job_alloc(p->adev, num_ibs, &p->job, vm); 236 1.5 riastrad if (ret) 237 1.5 riastrad goto free_all_kdata; 238 1.1 riastrad 239 1.5 riastrad if (p->ctx->vram_lost_counter != p->job->vram_lost_counter) { 240 1.5 riastrad ret = -ECANCELED; 241 1.1 riastrad goto free_all_kdata; 242 1.1 riastrad } 243 1.1 riastrad 244 1.5 riastrad if (p->uf_entry.tv.bo) 245 1.5 riastrad p->job->uf_addr = uf_offset; 246 1.1 riastrad kfree(chunk_array); 247 1.5 riastrad 248 1.5 riastrad /* Use this opportunity to fill in task info for the vm */ 249 1.5 riastrad amdgpu_vm_set_task_info(vm); 250 1.5 riastrad 251 1.1 riastrad return 0; 252 1.1 riastrad 253 1.1 riastrad free_all_kdata: 254 1.1 riastrad i = p->nchunks - 1; 255 1.1 riastrad free_partial_kdata: 256 1.1 riastrad for (; i >= 0; i--) 257 1.5 riastrad kvfree(p->chunks[i].kdata); 258 1.1 riastrad kfree(p->chunks); 259 1.5 riastrad p->chunks = NULL; 260 1.5 riastrad p->nchunks = 0; 261 1.1 riastrad free_chunk: 262 1.1 riastrad kfree(chunk_array); 263 1.1 riastrad 264 1.1 riastrad return ret; 265 1.1 riastrad } 266 1.1 riastrad 267 1.5 riastrad /* Convert microseconds to bytes. */ 268 1.5 riastrad static u64 us_to_bytes(struct amdgpu_device *adev, s64 us) 269 1.5 riastrad { 270 1.5 riastrad if (us <= 0 || !adev->mm_stats.log2_max_MBps) 271 1.5 riastrad return 0; 272 1.5 riastrad 273 1.5 riastrad /* Since accum_us is incremented by a million per second, just 274 1.5 riastrad * multiply it by the number of MB/s to get the number of bytes. 275 1.5 riastrad */ 276 1.5 riastrad return us << adev->mm_stats.log2_max_MBps; 277 1.5 riastrad } 278 1.5 riastrad 279 1.5 riastrad static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes) 280 1.5 riastrad { 281 1.5 riastrad if (!adev->mm_stats.log2_max_MBps) 282 1.5 riastrad return 0; 283 1.5 riastrad 284 1.5 riastrad return bytes >> adev->mm_stats.log2_max_MBps; 285 1.5 riastrad } 286 1.5 riastrad 287 1.5 riastrad /* Returns how many bytes TTM can move right now. If no bytes can be moved, 288 1.5 riastrad * it returns 0. If it returns non-zero, it's OK to move at least one buffer, 289 1.5 riastrad * which means it can go over the threshold once. If that happens, the driver 290 1.5 riastrad * will be in debt and no other buffer migrations can be done until that debt 291 1.5 riastrad * is repaid. 292 1.5 riastrad * 293 1.5 riastrad * This approach allows moving a buffer of any size (it's important to allow 294 1.5 riastrad * that). 295 1.5 riastrad * 296 1.5 riastrad * The currency is simply time in microseconds and it increases as the clock 297 1.5 riastrad * ticks. The accumulated microseconds (us) are converted to bytes and 298 1.5 riastrad * returned. 299 1.1 riastrad */ 300 1.5 riastrad static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev, 301 1.5 riastrad u64 *max_bytes, 302 1.5 riastrad u64 *max_vis_bytes) 303 1.1 riastrad { 304 1.5 riastrad s64 time_us, increment_us; 305 1.5 riastrad u64 free_vram, total_vram, used_vram; 306 1.1 riastrad 307 1.5 riastrad /* Allow a maximum of 200 accumulated ms. This is basically per-IB 308 1.5 riastrad * throttling. 309 1.1 riastrad * 310 1.5 riastrad * It means that in order to get full max MBps, at least 5 IBs per 311 1.5 riastrad * second must be submitted and not more than 200ms apart from each 312 1.5 riastrad * other. 313 1.5 riastrad */ 314 1.5 riastrad const s64 us_upper_bound = 200000; 315 1.1 riastrad 316 1.5 riastrad if (!adev->mm_stats.log2_max_MBps) { 317 1.5 riastrad *max_bytes = 0; 318 1.5 riastrad *max_vis_bytes = 0; 319 1.5 riastrad return; 320 1.5 riastrad } 321 1.5 riastrad 322 1.5 riastrad total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size); 323 1.5 riastrad used_vram = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); 324 1.5 riastrad free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram; 325 1.5 riastrad 326 1.5 riastrad spin_lock(&adev->mm_stats.lock); 327 1.5 riastrad 328 1.5 riastrad /* Increase the amount of accumulated us. */ 329 1.5 riastrad time_us = ktime_to_us(ktime_get()); 330 1.5 riastrad increment_us = time_us - adev->mm_stats.last_update_us; 331 1.5 riastrad adev->mm_stats.last_update_us = time_us; 332 1.5 riastrad adev->mm_stats.accum_us = min(adev->mm_stats.accum_us + increment_us, 333 1.5 riastrad us_upper_bound); 334 1.5 riastrad 335 1.5 riastrad /* This prevents the short period of low performance when the VRAM 336 1.5 riastrad * usage is low and the driver is in debt or doesn't have enough 337 1.5 riastrad * accumulated us to fill VRAM quickly. 338 1.1 riastrad * 339 1.5 riastrad * The situation can occur in these cases: 340 1.5 riastrad * - a lot of VRAM is freed by userspace 341 1.5 riastrad * - the presence of a big buffer causes a lot of evictions 342 1.5 riastrad * (solution: split buffers into smaller ones) 343 1.1 riastrad * 344 1.5 riastrad * If 128 MB or 1/8th of VRAM is free, start filling it now by setting 345 1.5 riastrad * accum_us to a positive number. 346 1.5 riastrad */ 347 1.5 riastrad if (free_vram >= 128 * 1024 * 1024 || free_vram >= total_vram / 8) { 348 1.5 riastrad s64 min_us; 349 1.5 riastrad 350 1.5 riastrad /* Be more aggresive on dGPUs. Try to fill a portion of free 351 1.5 riastrad * VRAM now. 352 1.5 riastrad */ 353 1.5 riastrad if (!(adev->flags & AMD_IS_APU)) 354 1.5 riastrad min_us = bytes_to_us(adev, free_vram / 4); 355 1.5 riastrad else 356 1.5 riastrad min_us = 0; /* Reset accum_us on APUs. */ 357 1.5 riastrad 358 1.5 riastrad adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us); 359 1.5 riastrad } 360 1.5 riastrad 361 1.5 riastrad /* This is set to 0 if the driver is in debt to disallow (optional) 362 1.5 riastrad * buffer moves. 363 1.5 riastrad */ 364 1.5 riastrad *max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us); 365 1.5 riastrad 366 1.5 riastrad /* Do the same for visible VRAM if half of it is free */ 367 1.5 riastrad if (!amdgpu_gmc_vram_full_visible(&adev->gmc)) { 368 1.5 riastrad u64 total_vis_vram = adev->gmc.visible_vram_size; 369 1.5 riastrad u64 used_vis_vram = 370 1.5 riastrad amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); 371 1.5 riastrad 372 1.5 riastrad if (used_vis_vram < total_vis_vram) { 373 1.5 riastrad u64 free_vis_vram = total_vis_vram - used_vis_vram; 374 1.5 riastrad adev->mm_stats.accum_us_vis = min(adev->mm_stats.accum_us_vis + 375 1.5 riastrad increment_us, us_upper_bound); 376 1.5 riastrad 377 1.5 riastrad if (free_vis_vram >= total_vis_vram / 2) 378 1.5 riastrad adev->mm_stats.accum_us_vis = 379 1.5 riastrad max(bytes_to_us(adev, free_vis_vram / 2), 380 1.5 riastrad adev->mm_stats.accum_us_vis); 381 1.5 riastrad } 382 1.5 riastrad 383 1.5 riastrad *max_vis_bytes = us_to_bytes(adev, adev->mm_stats.accum_us_vis); 384 1.5 riastrad } else { 385 1.5 riastrad *max_vis_bytes = 0; 386 1.5 riastrad } 387 1.5 riastrad 388 1.5 riastrad spin_unlock(&adev->mm_stats.lock); 389 1.5 riastrad } 390 1.5 riastrad 391 1.5 riastrad /* Report how many bytes have really been moved for the last command 392 1.5 riastrad * submission. This can result in a debt that can stop buffer migrations 393 1.5 riastrad * temporarily. 394 1.5 riastrad */ 395 1.5 riastrad void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes, 396 1.5 riastrad u64 num_vis_bytes) 397 1.5 riastrad { 398 1.5 riastrad spin_lock(&adev->mm_stats.lock); 399 1.5 riastrad adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes); 400 1.5 riastrad adev->mm_stats.accum_us_vis -= bytes_to_us(adev, num_vis_bytes); 401 1.5 riastrad spin_unlock(&adev->mm_stats.lock); 402 1.5 riastrad } 403 1.5 riastrad 404 1.5 riastrad static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p, 405 1.5 riastrad struct amdgpu_bo *bo) 406 1.5 riastrad { 407 1.5 riastrad struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 408 1.5 riastrad struct ttm_operation_ctx ctx = { 409 1.5 riastrad .interruptible = true, 410 1.5 riastrad .no_wait_gpu = false, 411 1.5 riastrad .resv = bo->tbo.base.resv, 412 1.5 riastrad .flags = 0 413 1.5 riastrad }; 414 1.5 riastrad uint32_t domain; 415 1.5 riastrad int r; 416 1.5 riastrad 417 1.5 riastrad if (bo->pin_count) 418 1.5 riastrad return 0; 419 1.5 riastrad 420 1.5 riastrad /* Don't move this buffer if we have depleted our allowance 421 1.5 riastrad * to move it. Don't move anything if the threshold is zero. 422 1.1 riastrad */ 423 1.5 riastrad if (p->bytes_moved < p->bytes_moved_threshold) { 424 1.5 riastrad if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && 425 1.5 riastrad (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) { 426 1.5 riastrad /* And don't move a CPU_ACCESS_REQUIRED BO to limited 427 1.5 riastrad * visible VRAM if we've depleted our allowance to do 428 1.5 riastrad * that. 429 1.5 riastrad */ 430 1.5 riastrad if (p->bytes_moved_vis < p->bytes_moved_vis_threshold) 431 1.5 riastrad domain = bo->preferred_domains; 432 1.5 riastrad else 433 1.5 riastrad domain = bo->allowed_domains; 434 1.5 riastrad } else { 435 1.5 riastrad domain = bo->preferred_domains; 436 1.5 riastrad } 437 1.5 riastrad } else { 438 1.5 riastrad domain = bo->allowed_domains; 439 1.5 riastrad } 440 1.5 riastrad 441 1.5 riastrad retry: 442 1.5 riastrad amdgpu_bo_placement_from_domain(bo, domain); 443 1.5 riastrad r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 444 1.5 riastrad 445 1.5 riastrad p->bytes_moved += ctx.bytes_moved; 446 1.5 riastrad if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && 447 1.5 riastrad amdgpu_bo_in_cpu_visible_vram(bo)) 448 1.5 riastrad p->bytes_moved_vis += ctx.bytes_moved; 449 1.5 riastrad 450 1.5 riastrad if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) { 451 1.5 riastrad domain = bo->allowed_domains; 452 1.5 riastrad goto retry; 453 1.5 riastrad } 454 1.5 riastrad 455 1.5 riastrad return r; 456 1.5 riastrad } 457 1.5 riastrad 458 1.5 riastrad static int amdgpu_cs_validate(void *param, struct amdgpu_bo *bo) 459 1.5 riastrad { 460 1.5 riastrad struct amdgpu_cs_parser *p = param; 461 1.5 riastrad int r; 462 1.5 riastrad 463 1.5 riastrad r = amdgpu_cs_bo_validate(p, bo); 464 1.5 riastrad if (r) 465 1.5 riastrad return r; 466 1.5 riastrad 467 1.5 riastrad if (bo->shadow) 468 1.5 riastrad r = amdgpu_cs_bo_validate(p, bo->shadow); 469 1.1 riastrad 470 1.5 riastrad return r; 471 1.1 riastrad } 472 1.1 riastrad 473 1.5 riastrad static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p, 474 1.1 riastrad struct list_head *validated) 475 1.1 riastrad { 476 1.5 riastrad struct ttm_operation_ctx ctx = { true, false }; 477 1.1 riastrad struct amdgpu_bo_list_entry *lobj; 478 1.1 riastrad int r; 479 1.1 riastrad 480 1.1 riastrad list_for_each_entry(lobj, validated, tv.head) { 481 1.5 riastrad struct amdgpu_bo *bo = ttm_to_amdgpu_bo(lobj->tv.bo); 482 1.7 riastrad #ifdef __NetBSD__ 483 1.7 riastrad struct vmspace *usermm; 484 1.7 riastrad #else 485 1.5 riastrad struct mm_struct *usermm; 486 1.7 riastrad #endif 487 1.1 riastrad 488 1.5 riastrad usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm); 489 1.7 riastrad #ifdef __NetBSD__ 490 1.7 riastrad if (usermm && usermm != curproc->p_vmspace) 491 1.6 riastrad #else 492 1.5 riastrad if (usermm && usermm != current->mm) 493 1.7 riastrad #endif 494 1.5 riastrad return -EPERM; 495 1.5 riastrad 496 1.5 riastrad if (amdgpu_ttm_tt_is_userptr(bo->tbo.ttm) && 497 1.5 riastrad lobj->user_invalidated && lobj->user_pages) { 498 1.5 riastrad amdgpu_bo_placement_from_domain(bo, 499 1.5 riastrad AMDGPU_GEM_DOMAIN_CPU); 500 1.5 riastrad r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 501 1.5 riastrad if (r) 502 1.1 riastrad return r; 503 1.5 riastrad 504 1.5 riastrad amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, 505 1.5 riastrad lobj->user_pages); 506 1.1 riastrad } 507 1.5 riastrad 508 1.5 riastrad r = amdgpu_cs_validate(p, bo); 509 1.5 riastrad if (r) 510 1.5 riastrad return r; 511 1.5 riastrad 512 1.5 riastrad kvfree(lobj->user_pages); 513 1.5 riastrad lobj->user_pages = NULL; 514 1.1 riastrad } 515 1.1 riastrad return 0; 516 1.1 riastrad } 517 1.1 riastrad 518 1.5 riastrad static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, 519 1.5 riastrad union drm_amdgpu_cs *cs) 520 1.1 riastrad { 521 1.1 riastrad struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 522 1.5 riastrad struct amdgpu_vm *vm = &fpriv->vm; 523 1.5 riastrad struct amdgpu_bo_list_entry *e; 524 1.1 riastrad struct list_head duplicates; 525 1.5 riastrad struct amdgpu_bo *gds; 526 1.5 riastrad struct amdgpu_bo *gws; 527 1.5 riastrad struct amdgpu_bo *oa; 528 1.5 riastrad int r; 529 1.5 riastrad 530 1.5 riastrad INIT_LIST_HEAD(&p->validated); 531 1.1 riastrad 532 1.5 riastrad /* p->bo_list could already be assigned if AMDGPU_CHUNK_ID_BO_HANDLES is present */ 533 1.5 riastrad if (cs->in.bo_list_handle) { 534 1.5 riastrad if (p->bo_list) 535 1.5 riastrad return -EINVAL; 536 1.1 riastrad 537 1.5 riastrad r = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle, 538 1.5 riastrad &p->bo_list); 539 1.5 riastrad if (r) 540 1.5 riastrad return r; 541 1.5 riastrad } else if (!p->bo_list) { 542 1.5 riastrad /* Create a empty bo_list when no handle is provided */ 543 1.5 riastrad r = amdgpu_bo_list_create(p->adev, p->filp, NULL, 0, 544 1.5 riastrad &p->bo_list); 545 1.5 riastrad if (r) 546 1.5 riastrad return r; 547 1.1 riastrad } 548 1.1 riastrad 549 1.5 riastrad /* One for TTM and one for the CS job */ 550 1.5 riastrad amdgpu_bo_list_for_each_entry(e, p->bo_list) 551 1.5 riastrad e->tv.num_shared = 2; 552 1.5 riastrad 553 1.5 riastrad amdgpu_bo_list_get_list(p->bo_list, &p->validated); 554 1.5 riastrad 555 1.5 riastrad INIT_LIST_HEAD(&duplicates); 556 1.5 riastrad amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd); 557 1.1 riastrad 558 1.5 riastrad if (p->uf_entry.tv.bo && !ttm_to_amdgpu_bo(p->uf_entry.tv.bo)->parent) 559 1.1 riastrad list_add(&p->uf_entry.tv.head, &p->validated); 560 1.1 riastrad 561 1.5 riastrad /* Get userptr backing pages. If pages are updated after registered 562 1.5 riastrad * in amdgpu_gem_userptr_ioctl(), amdgpu_cs_list_validate() will do 563 1.5 riastrad * amdgpu_ttm_backend_bind() to flush and invalidate new pages 564 1.5 riastrad */ 565 1.5 riastrad amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { 566 1.5 riastrad struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); 567 1.5 riastrad bool userpage_invalidated = false; 568 1.5 riastrad int i; 569 1.5 riastrad 570 1.5 riastrad e->user_pages = kvmalloc_array(bo->tbo.ttm->num_pages, 571 1.5 riastrad sizeof(struct page *), 572 1.5 riastrad GFP_KERNEL | __GFP_ZERO); 573 1.5 riastrad if (!e->user_pages) { 574 1.5 riastrad DRM_ERROR("calloc failure\n"); 575 1.5 riastrad return -ENOMEM; 576 1.5 riastrad } 577 1.5 riastrad 578 1.5 riastrad r = amdgpu_ttm_tt_get_user_pages(bo, e->user_pages); 579 1.5 riastrad if (r) { 580 1.5 riastrad kvfree(e->user_pages); 581 1.5 riastrad e->user_pages = NULL; 582 1.5 riastrad return r; 583 1.5 riastrad } 584 1.5 riastrad 585 1.5 riastrad for (i = 0; i < bo->tbo.ttm->num_pages; i++) { 586 1.5 riastrad if (bo->tbo.ttm->pages[i] != e->user_pages[i]) { 587 1.5 riastrad userpage_invalidated = true; 588 1.5 riastrad break; 589 1.5 riastrad } 590 1.5 riastrad } 591 1.5 riastrad e->user_invalidated = userpage_invalidated; 592 1.5 riastrad } 593 1.5 riastrad 594 1.5 riastrad r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, 595 1.5 riastrad &duplicates); 596 1.5 riastrad if (unlikely(r != 0)) { 597 1.5 riastrad if (r != -ERESTARTSYS) 598 1.5 riastrad DRM_ERROR("ttm_eu_reserve_buffers failed.\n"); 599 1.5 riastrad goto out; 600 1.5 riastrad } 601 1.5 riastrad 602 1.5 riastrad amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold, 603 1.5 riastrad &p->bytes_moved_vis_threshold); 604 1.5 riastrad p->bytes_moved = 0; 605 1.5 riastrad p->bytes_moved_vis = 0; 606 1.5 riastrad 607 1.5 riastrad r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm, 608 1.5 riastrad amdgpu_cs_validate, p); 609 1.5 riastrad if (r) { 610 1.5 riastrad DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n"); 611 1.5 riastrad goto error_validate; 612 1.5 riastrad } 613 1.1 riastrad 614 1.5 riastrad r = amdgpu_cs_list_validate(p, &duplicates); 615 1.5 riastrad if (r) 616 1.5 riastrad goto error_validate; 617 1.1 riastrad 618 1.5 riastrad r = amdgpu_cs_list_validate(p, &p->validated); 619 1.1 riastrad if (r) 620 1.1 riastrad goto error_validate; 621 1.1 riastrad 622 1.5 riastrad amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved, 623 1.5 riastrad p->bytes_moved_vis); 624 1.5 riastrad 625 1.5 riastrad gds = p->bo_list->gds_obj; 626 1.5 riastrad gws = p->bo_list->gws_obj; 627 1.5 riastrad oa = p->bo_list->oa_obj; 628 1.5 riastrad 629 1.5 riastrad amdgpu_bo_list_for_each_entry(e, p->bo_list) { 630 1.5 riastrad struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); 631 1.5 riastrad 632 1.5 riastrad /* Make sure we use the exclusive slot for shared BOs */ 633 1.5 riastrad if (bo->prime_shared_count) 634 1.5 riastrad e->tv.num_shared = 0; 635 1.5 riastrad e->bo_va = amdgpu_vm_bo_find(vm, bo); 636 1.5 riastrad } 637 1.5 riastrad 638 1.5 riastrad if (gds) { 639 1.5 riastrad p->job->gds_base = amdgpu_bo_gpu_offset(gds) >> PAGE_SHIFT; 640 1.5 riastrad p->job->gds_size = amdgpu_bo_size(gds) >> PAGE_SHIFT; 641 1.5 riastrad } 642 1.5 riastrad if (gws) { 643 1.5 riastrad p->job->gws_base = amdgpu_bo_gpu_offset(gws) >> PAGE_SHIFT; 644 1.5 riastrad p->job->gws_size = amdgpu_bo_size(gws) >> PAGE_SHIFT; 645 1.5 riastrad } 646 1.5 riastrad if (oa) { 647 1.5 riastrad p->job->oa_base = amdgpu_bo_gpu_offset(oa) >> PAGE_SHIFT; 648 1.5 riastrad p->job->oa_size = amdgpu_bo_size(oa) >> PAGE_SHIFT; 649 1.5 riastrad } 650 1.5 riastrad 651 1.5 riastrad if (!r && p->uf_entry.tv.bo) { 652 1.5 riastrad struct amdgpu_bo *uf = ttm_to_amdgpu_bo(p->uf_entry.tv.bo); 653 1.5 riastrad 654 1.5 riastrad r = amdgpu_ttm_alloc_gart(&uf->tbo); 655 1.5 riastrad p->job->uf_addr += amdgpu_bo_gpu_offset(uf); 656 1.5 riastrad } 657 1.1 riastrad 658 1.1 riastrad error_validate: 659 1.1 riastrad if (r) 660 1.1 riastrad ttm_eu_backoff_reservation(&p->ticket, &p->validated); 661 1.5 riastrad out: 662 1.1 riastrad return r; 663 1.1 riastrad } 664 1.1 riastrad 665 1.1 riastrad static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p) 666 1.1 riastrad { 667 1.1 riastrad struct amdgpu_bo_list_entry *e; 668 1.1 riastrad int r; 669 1.1 riastrad 670 1.1 riastrad list_for_each_entry(e, &p->validated, tv.head) { 671 1.5 riastrad struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); 672 1.5 riastrad struct dma_resv *resv = bo->tbo.base.resv; 673 1.5 riastrad 674 1.5 riastrad r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp, 675 1.5 riastrad amdgpu_bo_explicit_sync(bo)); 676 1.1 riastrad 677 1.1 riastrad if (r) 678 1.1 riastrad return r; 679 1.1 riastrad } 680 1.1 riastrad return 0; 681 1.1 riastrad } 682 1.1 riastrad 683 1.1 riastrad /** 684 1.1 riastrad * cs_parser_fini() - clean parser states 685 1.1 riastrad * @parser: parser structure holding parsing context. 686 1.1 riastrad * @error: error number 687 1.1 riastrad * 688 1.1 riastrad * If error is set than unvalidate buffer, otherwise just free memory 689 1.1 riastrad * used by parsing context. 690 1.1 riastrad **/ 691 1.5 riastrad static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, 692 1.5 riastrad bool backoff) 693 1.1 riastrad { 694 1.1 riastrad unsigned i; 695 1.1 riastrad 696 1.5 riastrad if (error && backoff) 697 1.1 riastrad ttm_eu_backoff_reservation(&parser->ticket, 698 1.1 riastrad &parser->validated); 699 1.5 riastrad 700 1.5 riastrad for (i = 0; i < parser->num_post_deps; i++) { 701 1.5 riastrad drm_syncobj_put(parser->post_deps[i].syncobj); 702 1.5 riastrad kfree(parser->post_deps[i].chain); 703 1.1 riastrad } 704 1.5 riastrad kfree(parser->post_deps); 705 1.5 riastrad 706 1.5 riastrad dma_fence_put(parser->fence); 707 1.1 riastrad 708 1.5 riastrad if (parser->ctx) { 709 1.5 riastrad mutex_unlock(&parser->ctx->lock); 710 1.1 riastrad amdgpu_ctx_put(parser->ctx); 711 1.5 riastrad } 712 1.1 riastrad if (parser->bo_list) 713 1.1 riastrad amdgpu_bo_list_put(parser->bo_list); 714 1.1 riastrad 715 1.1 riastrad for (i = 0; i < parser->nchunks; i++) 716 1.5 riastrad kvfree(parser->chunks[i].kdata); 717 1.1 riastrad kfree(parser->chunks); 718 1.5 riastrad if (parser->job) 719 1.5 riastrad amdgpu_job_free(parser->job); 720 1.5 riastrad if (parser->uf_entry.tv.bo) { 721 1.5 riastrad struct amdgpu_bo *uf = ttm_to_amdgpu_bo(parser->uf_entry.tv.bo); 722 1.5 riastrad 723 1.5 riastrad amdgpu_bo_unref(&uf); 724 1.5 riastrad } 725 1.1 riastrad } 726 1.1 riastrad 727 1.5 riastrad static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) 728 1.1 riastrad { 729 1.5 riastrad struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched); 730 1.5 riastrad struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 731 1.1 riastrad struct amdgpu_device *adev = p->adev; 732 1.5 riastrad struct amdgpu_vm *vm = &fpriv->vm; 733 1.5 riastrad struct amdgpu_bo_list_entry *e; 734 1.1 riastrad struct amdgpu_bo_va *bo_va; 735 1.1 riastrad struct amdgpu_bo *bo; 736 1.5 riastrad int r; 737 1.1 riastrad 738 1.5 riastrad /* Only for UVD/VCE VM emulation */ 739 1.5 riastrad if (ring->funcs->parse_cs || ring->funcs->patch_cs_in_place) { 740 1.5 riastrad unsigned i, j; 741 1.1 riastrad 742 1.5 riastrad for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) { 743 1.5 riastrad struct drm_amdgpu_cs_chunk_ib *chunk_ib; 744 1.5 riastrad struct amdgpu_bo_va_mapping *m; 745 1.5 riastrad struct amdgpu_bo *aobj = NULL; 746 1.5 riastrad struct amdgpu_cs_chunk *chunk; 747 1.5 riastrad uint64_t offset, va_start; 748 1.5 riastrad struct amdgpu_ib *ib; 749 1.5 riastrad uint8_t *kptr; 750 1.1 riastrad 751 1.5 riastrad chunk = &p->chunks[i]; 752 1.5 riastrad ib = &p->job->ibs[j]; 753 1.5 riastrad chunk_ib = chunk->kdata; 754 1.1 riastrad 755 1.5 riastrad if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB) 756 1.1 riastrad continue; 757 1.1 riastrad 758 1.5 riastrad va_start = chunk_ib->va_start & AMDGPU_GMC_HOLE_MASK; 759 1.5 riastrad r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m); 760 1.5 riastrad if (r) { 761 1.5 riastrad DRM_ERROR("IB va_start is invalid\n"); 762 1.5 riastrad return r; 763 1.5 riastrad } 764 1.1 riastrad 765 1.5 riastrad if ((va_start + chunk_ib->ib_bytes) > 766 1.5 riastrad (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) { 767 1.5 riastrad DRM_ERROR("IB va_start+ib_bytes is invalid\n"); 768 1.5 riastrad return -EINVAL; 769 1.5 riastrad } 770 1.1 riastrad 771 1.5 riastrad /* the IB should be reserved at this point */ 772 1.5 riastrad r = amdgpu_bo_kmap(aobj, (void **)&kptr); 773 1.5 riastrad if (r) { 774 1.1 riastrad return r; 775 1.5 riastrad } 776 1.1 riastrad 777 1.5 riastrad offset = m->start * AMDGPU_GPU_PAGE_SIZE; 778 1.5 riastrad kptr += va_start - offset; 779 1.1 riastrad 780 1.5 riastrad if (ring->funcs->parse_cs) { 781 1.5 riastrad memcpy(ib->ptr, kptr, chunk_ib->ib_bytes); 782 1.5 riastrad amdgpu_bo_kunmap(aobj); 783 1.1 riastrad 784 1.5 riastrad r = amdgpu_ring_parse_cs(ring, p, j); 785 1.5 riastrad if (r) 786 1.5 riastrad return r; 787 1.5 riastrad } else { 788 1.5 riastrad ib->ptr = (uint32_t *)kptr; 789 1.5 riastrad r = amdgpu_ring_patch_cs_in_place(ring, p, j); 790 1.5 riastrad amdgpu_bo_kunmap(aobj); 791 1.5 riastrad if (r) 792 1.5 riastrad return r; 793 1.5 riastrad } 794 1.1 riastrad 795 1.5 riastrad j++; 796 1.1 riastrad } 797 1.1 riastrad } 798 1.1 riastrad 799 1.5 riastrad if (!p->job->vm) 800 1.5 riastrad return amdgpu_cs_sync_rings(p); 801 1.1 riastrad 802 1.1 riastrad 803 1.5 riastrad r = amdgpu_vm_clear_freed(adev, vm, NULL); 804 1.5 riastrad if (r) 805 1.5 riastrad return r; 806 1.5 riastrad 807 1.5 riastrad r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false); 808 1.5 riastrad if (r) 809 1.5 riastrad return r; 810 1.5 riastrad 811 1.5 riastrad r = amdgpu_sync_vm_fence(&p->job->sync, fpriv->prt_va->last_pt_update); 812 1.5 riastrad if (r) 813 1.5 riastrad return r; 814 1.5 riastrad 815 1.5 riastrad if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) { 816 1.5 riastrad bo_va = fpriv->csa_va; 817 1.5 riastrad BUG_ON(!bo_va); 818 1.5 riastrad r = amdgpu_vm_bo_update(adev, bo_va, false); 819 1.5 riastrad if (r) 820 1.5 riastrad return r; 821 1.5 riastrad 822 1.5 riastrad r = amdgpu_sync_vm_fence(&p->job->sync, bo_va->last_pt_update); 823 1.5 riastrad if (r) 824 1.5 riastrad return r; 825 1.5 riastrad } 826 1.5 riastrad 827 1.5 riastrad amdgpu_bo_list_for_each_entry(e, p->bo_list) { 828 1.5 riastrad /* ignore duplicates */ 829 1.5 riastrad bo = ttm_to_amdgpu_bo(e->tv.bo); 830 1.5 riastrad if (!bo) 831 1.5 riastrad continue; 832 1.5 riastrad 833 1.5 riastrad bo_va = e->bo_va; 834 1.5 riastrad if (bo_va == NULL) 835 1.5 riastrad continue; 836 1.5 riastrad 837 1.5 riastrad r = amdgpu_vm_bo_update(adev, bo_va, false); 838 1.5 riastrad if (r) 839 1.5 riastrad return r; 840 1.1 riastrad 841 1.5 riastrad r = amdgpu_sync_vm_fence(&p->job->sync, bo_va->last_pt_update); 842 1.5 riastrad if (r) 843 1.5 riastrad return r; 844 1.1 riastrad } 845 1.1 riastrad 846 1.5 riastrad r = amdgpu_vm_handle_moved(adev, vm); 847 1.5 riastrad if (r) 848 1.5 riastrad return r; 849 1.5 riastrad 850 1.5 riastrad r = amdgpu_vm_update_pdes(adev, vm, false); 851 1.5 riastrad if (r) 852 1.5 riastrad return r; 853 1.5 riastrad 854 1.5 riastrad r = amdgpu_sync_vm_fence(&p->job->sync, vm->last_update); 855 1.5 riastrad if (r) 856 1.5 riastrad return r; 857 1.5 riastrad 858 1.5 riastrad p->job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.base.bo); 859 1.5 riastrad 860 1.5 riastrad if (amdgpu_vm_debug) { 861 1.5 riastrad /* Invalidate all BOs to test for userspace bugs */ 862 1.5 riastrad amdgpu_bo_list_for_each_entry(e, p->bo_list) { 863 1.5 riastrad struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); 864 1.1 riastrad 865 1.5 riastrad /* ignore duplicates */ 866 1.5 riastrad if (!bo) 867 1.5 riastrad continue; 868 1.1 riastrad 869 1.5 riastrad amdgpu_vm_bo_invalidate(adev, bo, false); 870 1.5 riastrad } 871 1.1 riastrad } 872 1.5 riastrad 873 1.5 riastrad return amdgpu_cs_sync_rings(p); 874 1.1 riastrad } 875 1.1 riastrad 876 1.1 riastrad static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, 877 1.1 riastrad struct amdgpu_cs_parser *parser) 878 1.1 riastrad { 879 1.1 riastrad struct amdgpu_fpriv *fpriv = parser->filp->driver_priv; 880 1.1 riastrad struct amdgpu_vm *vm = &fpriv->vm; 881 1.5 riastrad int r, ce_preempt = 0, de_preempt = 0; 882 1.5 riastrad struct amdgpu_ring *ring; 883 1.1 riastrad int i, j; 884 1.1 riastrad 885 1.5 riastrad for (i = 0, j = 0; i < parser->nchunks && j < parser->job->num_ibs; i++) { 886 1.1 riastrad struct amdgpu_cs_chunk *chunk; 887 1.1 riastrad struct amdgpu_ib *ib; 888 1.1 riastrad struct drm_amdgpu_cs_chunk_ib *chunk_ib; 889 1.5 riastrad struct drm_sched_entity *entity; 890 1.1 riastrad 891 1.1 riastrad chunk = &parser->chunks[i]; 892 1.5 riastrad ib = &parser->job->ibs[j]; 893 1.1 riastrad chunk_ib = (struct drm_amdgpu_cs_chunk_ib *)chunk->kdata; 894 1.1 riastrad 895 1.1 riastrad if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB) 896 1.1 riastrad continue; 897 1.1 riastrad 898 1.5 riastrad if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX && 899 1.5 riastrad (amdgpu_mcbp || amdgpu_sriov_vf(adev))) { 900 1.5 riastrad if (chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) { 901 1.5 riastrad if (chunk_ib->flags & AMDGPU_IB_FLAG_CE) 902 1.5 riastrad ce_preempt++; 903 1.5 riastrad else 904 1.5 riastrad de_preempt++; 905 1.5 riastrad } 906 1.1 riastrad 907 1.5 riastrad /* each GFX command submit allows 0 or 1 IB preemptible for CE & DE */ 908 1.5 riastrad if (ce_preempt > 1 || de_preempt > 1) 909 1.1 riastrad return -EINVAL; 910 1.5 riastrad } 911 1.1 riastrad 912 1.5 riastrad r = amdgpu_ctx_get_entity(parser->ctx, chunk_ib->ip_type, 913 1.5 riastrad chunk_ib->ip_instance, chunk_ib->ring, 914 1.5 riastrad &entity); 915 1.5 riastrad if (r) 916 1.5 riastrad return r; 917 1.1 riastrad 918 1.5 riastrad if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE) 919 1.5 riastrad parser->job->preamble_status |= 920 1.5 riastrad AMDGPU_PREAMBLE_IB_PRESENT; 921 1.1 riastrad 922 1.5 riastrad if (parser->entity && parser->entity != entity) 923 1.5 riastrad return -EINVAL; 924 1.1 riastrad 925 1.5 riastrad /* Return if there is no run queue associated with this entity. 926 1.5 riastrad * Possibly because of disabled HW IP*/ 927 1.5 riastrad if (entity->rq == NULL) 928 1.5 riastrad return -EINVAL; 929 1.1 riastrad 930 1.5 riastrad parser->entity = entity; 931 1.1 riastrad 932 1.5 riastrad ring = to_amdgpu_ring(entity->rq->sched); 933 1.5 riastrad r = amdgpu_ib_get(adev, vm, ring->funcs->parse_cs ? 934 1.5 riastrad chunk_ib->ib_bytes : 0, ib); 935 1.5 riastrad if (r) { 936 1.5 riastrad DRM_ERROR("Failed to get ib !\n"); 937 1.5 riastrad return r; 938 1.1 riastrad } 939 1.1 riastrad 940 1.5 riastrad ib->gpu_addr = chunk_ib->va_start; 941 1.1 riastrad ib->length_dw = chunk_ib->ib_bytes / 4; 942 1.1 riastrad ib->flags = chunk_ib->flags; 943 1.5 riastrad 944 1.1 riastrad j++; 945 1.1 riastrad } 946 1.1 riastrad 947 1.5 riastrad /* MM engine doesn't support user fences */ 948 1.5 riastrad ring = to_amdgpu_ring(parser->entity->rq->sched); 949 1.5 riastrad if (parser->job->uf_addr && ring->funcs->no_user_fence) 950 1.5 riastrad return -EINVAL; 951 1.5 riastrad 952 1.5 riastrad return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->entity); 953 1.5 riastrad } 954 1.5 riastrad 955 1.5 riastrad static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p, 956 1.5 riastrad struct amdgpu_cs_chunk *chunk) 957 1.5 riastrad { 958 1.5 riastrad struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 959 1.5 riastrad unsigned num_deps; 960 1.5 riastrad int i, r; 961 1.5 riastrad struct drm_amdgpu_cs_chunk_dep *deps; 962 1.5 riastrad 963 1.5 riastrad deps = (struct drm_amdgpu_cs_chunk_dep *)chunk->kdata; 964 1.5 riastrad num_deps = chunk->length_dw * 4 / 965 1.5 riastrad sizeof(struct drm_amdgpu_cs_chunk_dep); 966 1.5 riastrad 967 1.5 riastrad for (i = 0; i < num_deps; ++i) { 968 1.5 riastrad struct amdgpu_ctx *ctx; 969 1.5 riastrad struct drm_sched_entity *entity; 970 1.5 riastrad struct dma_fence *fence; 971 1.5 riastrad 972 1.5 riastrad ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id); 973 1.5 riastrad if (ctx == NULL) 974 1.5 riastrad return -EINVAL; 975 1.5 riastrad 976 1.5 riastrad r = amdgpu_ctx_get_entity(ctx, deps[i].ip_type, 977 1.5 riastrad deps[i].ip_instance, 978 1.5 riastrad deps[i].ring, &entity); 979 1.5 riastrad if (r) { 980 1.5 riastrad amdgpu_ctx_put(ctx); 981 1.5 riastrad return r; 982 1.5 riastrad } 983 1.5 riastrad 984 1.5 riastrad fence = amdgpu_ctx_get_fence(ctx, entity, deps[i].handle); 985 1.5 riastrad amdgpu_ctx_put(ctx); 986 1.5 riastrad 987 1.5 riastrad if (IS_ERR(fence)) 988 1.5 riastrad return PTR_ERR(fence); 989 1.5 riastrad else if (!fence) 990 1.5 riastrad continue; 991 1.5 riastrad 992 1.5 riastrad if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) { 993 1.5 riastrad struct drm_sched_fence *s_fence; 994 1.5 riastrad struct dma_fence *old = fence; 995 1.5 riastrad 996 1.5 riastrad s_fence = to_drm_sched_fence(fence); 997 1.5 riastrad fence = dma_fence_get(&s_fence->scheduled); 998 1.5 riastrad dma_fence_put(old); 999 1.5 riastrad } 1000 1.5 riastrad 1001 1.5 riastrad r = amdgpu_sync_fence(&p->job->sync, fence, true); 1002 1.5 riastrad dma_fence_put(fence); 1003 1.5 riastrad if (r) 1004 1.5 riastrad return r; 1005 1.5 riastrad } 1006 1.5 riastrad return 0; 1007 1.5 riastrad } 1008 1.5 riastrad 1009 1.5 riastrad static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p, 1010 1.5 riastrad uint32_t handle, u64 point, 1011 1.5 riastrad u64 flags) 1012 1.5 riastrad { 1013 1.5 riastrad struct dma_fence *fence; 1014 1.5 riastrad int r; 1015 1.5 riastrad 1016 1.5 riastrad r = drm_syncobj_find_fence(p->filp, handle, point, flags, &fence); 1017 1.5 riastrad if (r) { 1018 1.6 riastrad DRM_ERROR("syncobj %u failed to find fence @ %"PRIu64" (%d)!\n", 1019 1.5 riastrad handle, point, r); 1020 1.5 riastrad return r; 1021 1.5 riastrad } 1022 1.5 riastrad 1023 1.5 riastrad r = amdgpu_sync_fence(&p->job->sync, fence, true); 1024 1.5 riastrad dma_fence_put(fence); 1025 1.5 riastrad 1026 1.5 riastrad return r; 1027 1.5 riastrad } 1028 1.5 riastrad 1029 1.5 riastrad static int amdgpu_cs_process_syncobj_in_dep(struct amdgpu_cs_parser *p, 1030 1.5 riastrad struct amdgpu_cs_chunk *chunk) 1031 1.5 riastrad { 1032 1.5 riastrad struct drm_amdgpu_cs_chunk_sem *deps; 1033 1.5 riastrad unsigned num_deps; 1034 1.5 riastrad int i, r; 1035 1.5 riastrad 1036 1.5 riastrad deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata; 1037 1.5 riastrad num_deps = chunk->length_dw * 4 / 1038 1.5 riastrad sizeof(struct drm_amdgpu_cs_chunk_sem); 1039 1.5 riastrad for (i = 0; i < num_deps; ++i) { 1040 1.5 riastrad r = amdgpu_syncobj_lookup_and_add_to_sync(p, deps[i].handle, 1041 1.5 riastrad 0, 0); 1042 1.5 riastrad if (r) 1043 1.5 riastrad return r; 1044 1.5 riastrad } 1045 1.5 riastrad 1046 1.5 riastrad return 0; 1047 1.5 riastrad } 1048 1.5 riastrad 1049 1.5 riastrad 1050 1.5 riastrad static int amdgpu_cs_process_syncobj_timeline_in_dep(struct amdgpu_cs_parser *p, 1051 1.5 riastrad struct amdgpu_cs_chunk *chunk) 1052 1.5 riastrad { 1053 1.5 riastrad struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps; 1054 1.5 riastrad unsigned num_deps; 1055 1.5 riastrad int i, r; 1056 1.5 riastrad 1057 1.5 riastrad syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata; 1058 1.5 riastrad num_deps = chunk->length_dw * 4 / 1059 1.5 riastrad sizeof(struct drm_amdgpu_cs_chunk_syncobj); 1060 1.5 riastrad for (i = 0; i < num_deps; ++i) { 1061 1.5 riastrad r = amdgpu_syncobj_lookup_and_add_to_sync(p, 1062 1.5 riastrad syncobj_deps[i].handle, 1063 1.5 riastrad syncobj_deps[i].point, 1064 1.5 riastrad syncobj_deps[i].flags); 1065 1.5 riastrad if (r) 1066 1.5 riastrad return r; 1067 1.5 riastrad } 1068 1.5 riastrad 1069 1.5 riastrad return 0; 1070 1.5 riastrad } 1071 1.5 riastrad 1072 1.5 riastrad static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p, 1073 1.5 riastrad struct amdgpu_cs_chunk *chunk) 1074 1.5 riastrad { 1075 1.5 riastrad struct drm_amdgpu_cs_chunk_sem *deps; 1076 1.5 riastrad unsigned num_deps; 1077 1.5 riastrad int i; 1078 1.5 riastrad 1079 1.5 riastrad deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata; 1080 1.5 riastrad num_deps = chunk->length_dw * 4 / 1081 1.5 riastrad sizeof(struct drm_amdgpu_cs_chunk_sem); 1082 1.5 riastrad 1083 1.5 riastrad if (p->post_deps) 1084 1.5 riastrad return -EINVAL; 1085 1.5 riastrad 1086 1.5 riastrad p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps), 1087 1.5 riastrad GFP_KERNEL); 1088 1.5 riastrad p->num_post_deps = 0; 1089 1.5 riastrad 1090 1.5 riastrad if (!p->post_deps) 1091 1.5 riastrad return -ENOMEM; 1092 1.5 riastrad 1093 1.1 riastrad 1094 1.5 riastrad for (i = 0; i < num_deps; ++i) { 1095 1.5 riastrad p->post_deps[i].syncobj = 1096 1.5 riastrad drm_syncobj_find(p->filp, deps[i].handle); 1097 1.5 riastrad if (!p->post_deps[i].syncobj) 1098 1.1 riastrad return -EINVAL; 1099 1.5 riastrad p->post_deps[i].chain = NULL; 1100 1.5 riastrad p->post_deps[i].point = 0; 1101 1.5 riastrad p->num_post_deps++; 1102 1.5 riastrad } 1103 1.5 riastrad 1104 1.5 riastrad return 0; 1105 1.5 riastrad } 1106 1.1 riastrad 1107 1.5 riastrad 1108 1.5 riastrad static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p, 1109 1.5 riastrad struct amdgpu_cs_chunk *chunk) 1110 1.5 riastrad { 1111 1.5 riastrad struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps; 1112 1.5 riastrad unsigned num_deps; 1113 1.5 riastrad int i; 1114 1.5 riastrad 1115 1.5 riastrad syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata; 1116 1.5 riastrad num_deps = chunk->length_dw * 4 / 1117 1.5 riastrad sizeof(struct drm_amdgpu_cs_chunk_syncobj); 1118 1.5 riastrad 1119 1.5 riastrad if (p->post_deps) 1120 1.5 riastrad return -EINVAL; 1121 1.5 riastrad 1122 1.5 riastrad p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps), 1123 1.5 riastrad GFP_KERNEL); 1124 1.5 riastrad p->num_post_deps = 0; 1125 1.5 riastrad 1126 1.5 riastrad if (!p->post_deps) 1127 1.5 riastrad return -ENOMEM; 1128 1.5 riastrad 1129 1.5 riastrad for (i = 0; i < num_deps; ++i) { 1130 1.5 riastrad struct amdgpu_cs_post_dep *dep = &p->post_deps[i]; 1131 1.5 riastrad 1132 1.5 riastrad dep->chain = NULL; 1133 1.5 riastrad if (syncobj_deps[i].point) { 1134 1.5 riastrad dep->chain = kmalloc(sizeof(*dep->chain), GFP_KERNEL); 1135 1.5 riastrad if (!dep->chain) 1136 1.5 riastrad return -ENOMEM; 1137 1.5 riastrad } 1138 1.5 riastrad 1139 1.5 riastrad dep->syncobj = drm_syncobj_find(p->filp, 1140 1.5 riastrad syncobj_deps[i].handle); 1141 1.5 riastrad if (!dep->syncobj) { 1142 1.5 riastrad kfree(dep->chain); 1143 1.5 riastrad return -EINVAL; 1144 1.5 riastrad } 1145 1.5 riastrad dep->point = syncobj_deps[i].point; 1146 1.5 riastrad p->num_post_deps++; 1147 1.1 riastrad } 1148 1.1 riastrad 1149 1.1 riastrad return 0; 1150 1.1 riastrad } 1151 1.1 riastrad 1152 1.1 riastrad static int amdgpu_cs_dependencies(struct amdgpu_device *adev, 1153 1.1 riastrad struct amdgpu_cs_parser *p) 1154 1.1 riastrad { 1155 1.5 riastrad int i, r; 1156 1.1 riastrad 1157 1.1 riastrad for (i = 0; i < p->nchunks; ++i) { 1158 1.1 riastrad struct amdgpu_cs_chunk *chunk; 1159 1.1 riastrad 1160 1.1 riastrad chunk = &p->chunks[i]; 1161 1.1 riastrad 1162 1.5 riastrad switch (chunk->chunk_id) { 1163 1.5 riastrad case AMDGPU_CHUNK_ID_DEPENDENCIES: 1164 1.5 riastrad case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES: 1165 1.5 riastrad r = amdgpu_cs_process_fence_dep(p, chunk); 1166 1.5 riastrad if (r) 1167 1.5 riastrad return r; 1168 1.5 riastrad break; 1169 1.5 riastrad case AMDGPU_CHUNK_ID_SYNCOBJ_IN: 1170 1.5 riastrad r = amdgpu_cs_process_syncobj_in_dep(p, chunk); 1171 1.5 riastrad if (r) 1172 1.5 riastrad return r; 1173 1.5 riastrad break; 1174 1.5 riastrad case AMDGPU_CHUNK_ID_SYNCOBJ_OUT: 1175 1.5 riastrad r = amdgpu_cs_process_syncobj_out_dep(p, chunk); 1176 1.5 riastrad if (r) 1177 1.5 riastrad return r; 1178 1.5 riastrad break; 1179 1.5 riastrad case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT: 1180 1.5 riastrad r = amdgpu_cs_process_syncobj_timeline_in_dep(p, chunk); 1181 1.1 riastrad if (r) 1182 1.1 riastrad return r; 1183 1.5 riastrad break; 1184 1.5 riastrad case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL: 1185 1.5 riastrad r = amdgpu_cs_process_syncobj_timeline_out_dep(p, chunk); 1186 1.5 riastrad if (r) 1187 1.1 riastrad return r; 1188 1.5 riastrad break; 1189 1.1 riastrad } 1190 1.1 riastrad } 1191 1.1 riastrad 1192 1.1 riastrad return 0; 1193 1.1 riastrad } 1194 1.1 riastrad 1195 1.5 riastrad static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p) 1196 1.1 riastrad { 1197 1.1 riastrad int i; 1198 1.5 riastrad 1199 1.5 riastrad for (i = 0; i < p->num_post_deps; ++i) { 1200 1.5 riastrad if (p->post_deps[i].chain && p->post_deps[i].point) { 1201 1.5 riastrad drm_syncobj_add_point(p->post_deps[i].syncobj, 1202 1.5 riastrad p->post_deps[i].chain, 1203 1.5 riastrad p->fence, p->post_deps[i].point); 1204 1.5 riastrad p->post_deps[i].chain = NULL; 1205 1.5 riastrad } else { 1206 1.5 riastrad drm_syncobj_replace_fence(p->post_deps[i].syncobj, 1207 1.5 riastrad p->fence); 1208 1.5 riastrad } 1209 1.5 riastrad } 1210 1.5 riastrad } 1211 1.5 riastrad 1212 1.5 riastrad static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, 1213 1.5 riastrad union drm_amdgpu_cs *cs) 1214 1.5 riastrad { 1215 1.5 riastrad struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 1216 1.5 riastrad struct drm_sched_entity *entity = p->entity; 1217 1.5 riastrad enum drm_sched_priority priority; 1218 1.5 riastrad struct amdgpu_ring *ring; 1219 1.5 riastrad struct amdgpu_bo_list_entry *e; 1220 1.5 riastrad struct amdgpu_job *job; 1221 1.5 riastrad uint64_t seq; 1222 1.5 riastrad int r; 1223 1.5 riastrad 1224 1.5 riastrad job = p->job; 1225 1.5 riastrad p->job = NULL; 1226 1.5 riastrad 1227 1.5 riastrad r = drm_sched_job_init(&job->base, entity, p->filp); 1228 1.5 riastrad if (r) 1229 1.5 riastrad goto error_unlock; 1230 1.5 riastrad 1231 1.5 riastrad /* No memory allocation is allowed while holding the notifier lock. 1232 1.5 riastrad * The lock is held until amdgpu_cs_submit is finished and fence is 1233 1.5 riastrad * added to BOs. 1234 1.5 riastrad */ 1235 1.5 riastrad mutex_lock(&p->adev->notifier_lock); 1236 1.5 riastrad 1237 1.5 riastrad /* If userptr are invalidated after amdgpu_cs_parser_bos(), return 1238 1.5 riastrad * -EAGAIN, drmIoctl in libdrm will restart the amdgpu_cs_ioctl. 1239 1.5 riastrad */ 1240 1.5 riastrad amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { 1241 1.5 riastrad struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); 1242 1.5 riastrad 1243 1.5 riastrad r |= !amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm); 1244 1.5 riastrad } 1245 1.5 riastrad if (r) { 1246 1.5 riastrad r = -EAGAIN; 1247 1.5 riastrad goto error_abort; 1248 1.5 riastrad } 1249 1.5 riastrad 1250 1.5 riastrad p->fence = dma_fence_get(&job->base.s_fence->finished); 1251 1.5 riastrad 1252 1.5 riastrad amdgpu_ctx_add_fence(p->ctx, entity, p->fence, &seq); 1253 1.5 riastrad amdgpu_cs_post_dependencies(p); 1254 1.5 riastrad 1255 1.5 riastrad if ((job->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) && 1256 1.5 riastrad !p->ctx->preamble_presented) { 1257 1.5 riastrad job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST; 1258 1.5 riastrad p->ctx->preamble_presented = true; 1259 1.5 riastrad } 1260 1.5 riastrad 1261 1.5 riastrad cs->out.handle = seq; 1262 1.5 riastrad job->uf_sequence = seq; 1263 1.5 riastrad 1264 1.5 riastrad amdgpu_job_free_resources(job); 1265 1.5 riastrad 1266 1.5 riastrad trace_amdgpu_cs_ioctl(job); 1267 1.5 riastrad amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->ticket); 1268 1.5 riastrad priority = job->base.s_priority; 1269 1.5 riastrad drm_sched_entity_push_job(&job->base, entity); 1270 1.5 riastrad 1271 1.5 riastrad ring = to_amdgpu_ring(entity->rq->sched); 1272 1.5 riastrad amdgpu_ring_priority_get(ring, priority); 1273 1.5 riastrad 1274 1.5 riastrad amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm); 1275 1.5 riastrad 1276 1.5 riastrad ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence); 1277 1.5 riastrad mutex_unlock(&p->adev->notifier_lock); 1278 1.5 riastrad 1279 1.1 riastrad return 0; 1280 1.5 riastrad 1281 1.5 riastrad error_abort: 1282 1.5 riastrad drm_sched_job_cleanup(&job->base); 1283 1.5 riastrad mutex_unlock(&p->adev->notifier_lock); 1284 1.5 riastrad 1285 1.5 riastrad error_unlock: 1286 1.5 riastrad amdgpu_job_free(job); 1287 1.5 riastrad return r; 1288 1.1 riastrad } 1289 1.1 riastrad 1290 1.1 riastrad int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) 1291 1.1 riastrad { 1292 1.1 riastrad struct amdgpu_device *adev = dev->dev_private; 1293 1.1 riastrad union drm_amdgpu_cs *cs = data; 1294 1.1 riastrad struct amdgpu_cs_parser parser = {}; 1295 1.1 riastrad bool reserved_buffers = false; 1296 1.1 riastrad int i, r; 1297 1.1 riastrad 1298 1.5 riastrad if (amdgpu_ras_intr_triggered()) 1299 1.5 riastrad return -EHWPOISON; 1300 1.5 riastrad 1301 1.1 riastrad if (!adev->accel_working) 1302 1.1 riastrad return -EBUSY; 1303 1.1 riastrad 1304 1.1 riastrad parser.adev = adev; 1305 1.1 riastrad parser.filp = filp; 1306 1.1 riastrad 1307 1.1 riastrad r = amdgpu_cs_parser_init(&parser, data); 1308 1.1 riastrad if (r) { 1309 1.5 riastrad DRM_ERROR("Failed to initialize parser %d!\n", r); 1310 1.5 riastrad goto out; 1311 1.1 riastrad } 1312 1.5 riastrad 1313 1.5 riastrad r = amdgpu_cs_ib_fill(adev, &parser); 1314 1.5 riastrad if (r) 1315 1.5 riastrad goto out; 1316 1.5 riastrad 1317 1.5 riastrad r = amdgpu_cs_dependencies(adev, &parser); 1318 1.5 riastrad if (r) { 1319 1.5 riastrad DRM_ERROR("Failed in the dependencies handling %d!\n", r); 1320 1.5 riastrad goto out; 1321 1.1 riastrad } 1322 1.1 riastrad 1323 1.5 riastrad r = amdgpu_cs_parser_bos(&parser, data); 1324 1.5 riastrad if (r) { 1325 1.5 riastrad if (r == -ENOMEM) 1326 1.5 riastrad DRM_ERROR("Not enough memory for command submission!\n"); 1327 1.5 riastrad else if (r != -ERESTARTSYS && r != -EAGAIN) 1328 1.5 riastrad DRM_ERROR("Failed to process the buffer list %d!\n", r); 1329 1.5 riastrad goto out; 1330 1.1 riastrad } 1331 1.1 riastrad 1332 1.5 riastrad reserved_buffers = true; 1333 1.1 riastrad 1334 1.5 riastrad for (i = 0; i < parser.job->num_ibs; i++) 1335 1.1 riastrad trace_amdgpu_cs(&parser, i); 1336 1.1 riastrad 1337 1.5 riastrad r = amdgpu_cs_vm_handling(&parser); 1338 1.1 riastrad if (r) 1339 1.1 riastrad goto out; 1340 1.1 riastrad 1341 1.5 riastrad r = amdgpu_cs_submit(&parser, cs); 1342 1.1 riastrad 1343 1.1 riastrad out: 1344 1.1 riastrad amdgpu_cs_parser_fini(&parser, r, reserved_buffers); 1345 1.5 riastrad 1346 1.1 riastrad return r; 1347 1.1 riastrad } 1348 1.1 riastrad 1349 1.1 riastrad /** 1350 1.1 riastrad * amdgpu_cs_wait_ioctl - wait for a command submission to finish 1351 1.1 riastrad * 1352 1.1 riastrad * @dev: drm device 1353 1.1 riastrad * @data: data from userspace 1354 1.1 riastrad * @filp: file private 1355 1.1 riastrad * 1356 1.1 riastrad * Wait for the command submission identified by handle to finish. 1357 1.1 riastrad */ 1358 1.1 riastrad int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, 1359 1.1 riastrad struct drm_file *filp) 1360 1.1 riastrad { 1361 1.1 riastrad union drm_amdgpu_wait_cs *wait = data; 1362 1.1 riastrad unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout); 1363 1.5 riastrad struct drm_sched_entity *entity; 1364 1.1 riastrad struct amdgpu_ctx *ctx; 1365 1.5 riastrad struct dma_fence *fence; 1366 1.1 riastrad long r; 1367 1.1 riastrad 1368 1.1 riastrad ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id); 1369 1.1 riastrad if (ctx == NULL) 1370 1.1 riastrad return -EINVAL; 1371 1.1 riastrad 1372 1.5 riastrad r = amdgpu_ctx_get_entity(ctx, wait->in.ip_type, wait->in.ip_instance, 1373 1.5 riastrad wait->in.ring, &entity); 1374 1.5 riastrad if (r) { 1375 1.5 riastrad amdgpu_ctx_put(ctx); 1376 1.5 riastrad return r; 1377 1.5 riastrad } 1378 1.5 riastrad 1379 1.5 riastrad fence = amdgpu_ctx_get_fence(ctx, entity, wait->in.handle); 1380 1.1 riastrad if (IS_ERR(fence)) 1381 1.1 riastrad r = PTR_ERR(fence); 1382 1.1 riastrad else if (fence) { 1383 1.5 riastrad r = dma_fence_wait_timeout(fence, true, timeout); 1384 1.5 riastrad if (r > 0 && fence->error) 1385 1.5 riastrad r = fence->error; 1386 1.5 riastrad dma_fence_put(fence); 1387 1.1 riastrad } else 1388 1.1 riastrad r = 1; 1389 1.1 riastrad 1390 1.1 riastrad amdgpu_ctx_put(ctx); 1391 1.1 riastrad if (r < 0) 1392 1.1 riastrad return r; 1393 1.1 riastrad 1394 1.1 riastrad memset(wait, 0, sizeof(*wait)); 1395 1.1 riastrad wait->out.status = (r == 0); 1396 1.1 riastrad 1397 1.1 riastrad return 0; 1398 1.1 riastrad } 1399 1.1 riastrad 1400 1.1 riastrad /** 1401 1.5 riastrad * amdgpu_cs_get_fence - helper to get fence from drm_amdgpu_fence 1402 1.5 riastrad * 1403 1.5 riastrad * @adev: amdgpu device 1404 1.5 riastrad * @filp: file private 1405 1.5 riastrad * @user: drm_amdgpu_fence copied from user space 1406 1.5 riastrad */ 1407 1.5 riastrad static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev, 1408 1.5 riastrad struct drm_file *filp, 1409 1.5 riastrad struct drm_amdgpu_fence *user) 1410 1.5 riastrad { 1411 1.5 riastrad struct drm_sched_entity *entity; 1412 1.5 riastrad struct amdgpu_ctx *ctx; 1413 1.5 riastrad struct dma_fence *fence; 1414 1.5 riastrad int r; 1415 1.5 riastrad 1416 1.5 riastrad ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id); 1417 1.5 riastrad if (ctx == NULL) 1418 1.5 riastrad return ERR_PTR(-EINVAL); 1419 1.5 riastrad 1420 1.5 riastrad r = amdgpu_ctx_get_entity(ctx, user->ip_type, user->ip_instance, 1421 1.5 riastrad user->ring, &entity); 1422 1.5 riastrad if (r) { 1423 1.5 riastrad amdgpu_ctx_put(ctx); 1424 1.5 riastrad return ERR_PTR(r); 1425 1.5 riastrad } 1426 1.5 riastrad 1427 1.5 riastrad fence = amdgpu_ctx_get_fence(ctx, entity, user->seq_no); 1428 1.5 riastrad amdgpu_ctx_put(ctx); 1429 1.5 riastrad 1430 1.5 riastrad return fence; 1431 1.5 riastrad } 1432 1.5 riastrad 1433 1.5 riastrad int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data, 1434 1.5 riastrad struct drm_file *filp) 1435 1.5 riastrad { 1436 1.5 riastrad struct amdgpu_device *adev = dev->dev_private; 1437 1.5 riastrad union drm_amdgpu_fence_to_handle *info = data; 1438 1.5 riastrad struct dma_fence *fence; 1439 1.5 riastrad struct drm_syncobj *syncobj; 1440 1.5 riastrad struct sync_file *sync_file; 1441 1.5 riastrad int fd, r; 1442 1.5 riastrad 1443 1.5 riastrad fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence); 1444 1.5 riastrad if (IS_ERR(fence)) 1445 1.5 riastrad return PTR_ERR(fence); 1446 1.5 riastrad 1447 1.5 riastrad if (!fence) 1448 1.5 riastrad fence = dma_fence_get_stub(); 1449 1.5 riastrad 1450 1.5 riastrad switch (info->in.what) { 1451 1.5 riastrad case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ: 1452 1.5 riastrad r = drm_syncobj_create(&syncobj, 0, fence); 1453 1.5 riastrad dma_fence_put(fence); 1454 1.5 riastrad if (r) 1455 1.5 riastrad return r; 1456 1.5 riastrad r = drm_syncobj_get_handle(filp, syncobj, &info->out.handle); 1457 1.5 riastrad drm_syncobj_put(syncobj); 1458 1.5 riastrad return r; 1459 1.5 riastrad 1460 1.5 riastrad case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD: 1461 1.5 riastrad r = drm_syncobj_create(&syncobj, 0, fence); 1462 1.5 riastrad dma_fence_put(fence); 1463 1.5 riastrad if (r) 1464 1.5 riastrad return r; 1465 1.5 riastrad r = drm_syncobj_get_fd(syncobj, (int*)&info->out.handle); 1466 1.5 riastrad drm_syncobj_put(syncobj); 1467 1.5 riastrad return r; 1468 1.5 riastrad 1469 1.5 riastrad case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD: 1470 1.6 riastrad #ifdef __NetBSD__ 1471 1.6 riastrad { 1472 1.6 riastrad struct file *fp = NULL; 1473 1.6 riastrad 1474 1.6 riastrad /* XXX errno NetBSD->Linux */ 1475 1.6 riastrad r = -fd_allocfile(&fp, &fd); 1476 1.6 riastrad if (r) 1477 1.6 riastrad goto out; 1478 1.6 riastrad sync_file = sync_file_create(fence, fp); 1479 1.6 riastrad if (sync_file == NULL) 1480 1.6 riastrad goto out; 1481 1.6 riastrad fd_affix(curproc, fp, fd); 1482 1.6 riastrad fp = NULL; /* consumed by sync_file */ 1483 1.6 riastrad 1484 1.6 riastrad out: if (fp) { 1485 1.6 riastrad fd_abort(curproc, fp, fd); 1486 1.6 riastrad fd = -1; 1487 1.6 riastrad } 1488 1.6 riastrad dma_fence_put(fence); 1489 1.6 riastrad } 1490 1.6 riastrad #else 1491 1.5 riastrad fd = get_unused_fd_flags(O_CLOEXEC); 1492 1.5 riastrad if (fd < 0) { 1493 1.5 riastrad dma_fence_put(fence); 1494 1.5 riastrad return fd; 1495 1.5 riastrad } 1496 1.5 riastrad 1497 1.5 riastrad sync_file = sync_file_create(fence); 1498 1.5 riastrad dma_fence_put(fence); 1499 1.5 riastrad if (!sync_file) { 1500 1.5 riastrad put_unused_fd(fd); 1501 1.5 riastrad return -ENOMEM; 1502 1.5 riastrad } 1503 1.5 riastrad 1504 1.5 riastrad fd_install(fd, sync_file->file); 1505 1.6 riastrad #endif 1506 1.5 riastrad info->out.handle = fd; 1507 1.5 riastrad return 0; 1508 1.5 riastrad 1509 1.5 riastrad default: 1510 1.5 riastrad return -EINVAL; 1511 1.5 riastrad } 1512 1.5 riastrad } 1513 1.5 riastrad 1514 1.5 riastrad /** 1515 1.5 riastrad * amdgpu_cs_wait_all_fence - wait on all fences to signal 1516 1.5 riastrad * 1517 1.5 riastrad * @adev: amdgpu device 1518 1.5 riastrad * @filp: file private 1519 1.5 riastrad * @wait: wait parameters 1520 1.5 riastrad * @fences: array of drm_amdgpu_fence 1521 1.5 riastrad */ 1522 1.5 riastrad static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev, 1523 1.5 riastrad struct drm_file *filp, 1524 1.5 riastrad union drm_amdgpu_wait_fences *wait, 1525 1.5 riastrad struct drm_amdgpu_fence *fences) 1526 1.5 riastrad { 1527 1.5 riastrad uint32_t fence_count = wait->in.fence_count; 1528 1.5 riastrad unsigned int i; 1529 1.5 riastrad long r = 1; 1530 1.5 riastrad 1531 1.5 riastrad for (i = 0; i < fence_count; i++) { 1532 1.5 riastrad struct dma_fence *fence; 1533 1.5 riastrad unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns); 1534 1.5 riastrad 1535 1.5 riastrad fence = amdgpu_cs_get_fence(adev, filp, &fences[i]); 1536 1.5 riastrad if (IS_ERR(fence)) 1537 1.5 riastrad return PTR_ERR(fence); 1538 1.5 riastrad else if (!fence) 1539 1.5 riastrad continue; 1540 1.5 riastrad 1541 1.5 riastrad r = dma_fence_wait_timeout(fence, true, timeout); 1542 1.5 riastrad dma_fence_put(fence); 1543 1.5 riastrad if (r < 0) 1544 1.5 riastrad return r; 1545 1.5 riastrad 1546 1.5 riastrad if (r == 0) 1547 1.5 riastrad break; 1548 1.5 riastrad 1549 1.5 riastrad if (fence->error) 1550 1.5 riastrad return fence->error; 1551 1.5 riastrad } 1552 1.5 riastrad 1553 1.5 riastrad memset(wait, 0, sizeof(*wait)); 1554 1.5 riastrad wait->out.status = (r > 0); 1555 1.5 riastrad 1556 1.5 riastrad return 0; 1557 1.5 riastrad } 1558 1.5 riastrad 1559 1.5 riastrad /** 1560 1.5 riastrad * amdgpu_cs_wait_any_fence - wait on any fence to signal 1561 1.5 riastrad * 1562 1.5 riastrad * @adev: amdgpu device 1563 1.5 riastrad * @filp: file private 1564 1.5 riastrad * @wait: wait parameters 1565 1.5 riastrad * @fences: array of drm_amdgpu_fence 1566 1.5 riastrad */ 1567 1.5 riastrad static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev, 1568 1.5 riastrad struct drm_file *filp, 1569 1.5 riastrad union drm_amdgpu_wait_fences *wait, 1570 1.5 riastrad struct drm_amdgpu_fence *fences) 1571 1.5 riastrad { 1572 1.5 riastrad unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns); 1573 1.5 riastrad uint32_t fence_count = wait->in.fence_count; 1574 1.5 riastrad uint32_t first = ~0; 1575 1.5 riastrad struct dma_fence **array; 1576 1.5 riastrad unsigned int i; 1577 1.5 riastrad long r; 1578 1.5 riastrad 1579 1.5 riastrad /* Prepare the fence array */ 1580 1.5 riastrad array = kcalloc(fence_count, sizeof(struct dma_fence *), GFP_KERNEL); 1581 1.5 riastrad 1582 1.5 riastrad if (array == NULL) 1583 1.5 riastrad return -ENOMEM; 1584 1.5 riastrad 1585 1.5 riastrad for (i = 0; i < fence_count; i++) { 1586 1.5 riastrad struct dma_fence *fence; 1587 1.5 riastrad 1588 1.5 riastrad fence = amdgpu_cs_get_fence(adev, filp, &fences[i]); 1589 1.5 riastrad if (IS_ERR(fence)) { 1590 1.5 riastrad r = PTR_ERR(fence); 1591 1.5 riastrad goto err_free_fence_array; 1592 1.5 riastrad } else if (fence) { 1593 1.5 riastrad array[i] = fence; 1594 1.5 riastrad } else { /* NULL, the fence has been already signaled */ 1595 1.5 riastrad r = 1; 1596 1.5 riastrad first = i; 1597 1.5 riastrad goto out; 1598 1.5 riastrad } 1599 1.5 riastrad } 1600 1.5 riastrad 1601 1.5 riastrad r = dma_fence_wait_any_timeout(array, fence_count, true, timeout, 1602 1.5 riastrad &first); 1603 1.5 riastrad if (r < 0) 1604 1.5 riastrad goto err_free_fence_array; 1605 1.5 riastrad 1606 1.5 riastrad out: 1607 1.5 riastrad memset(wait, 0, sizeof(*wait)); 1608 1.5 riastrad wait->out.status = (r > 0); 1609 1.5 riastrad wait->out.first_signaled = first; 1610 1.5 riastrad 1611 1.5 riastrad if (first < fence_count && array[first]) 1612 1.5 riastrad r = array[first]->error; 1613 1.5 riastrad else 1614 1.5 riastrad r = 0; 1615 1.5 riastrad 1616 1.5 riastrad err_free_fence_array: 1617 1.5 riastrad for (i = 0; i < fence_count; i++) 1618 1.5 riastrad dma_fence_put(array[i]); 1619 1.5 riastrad kfree(array); 1620 1.5 riastrad 1621 1.5 riastrad return r; 1622 1.5 riastrad } 1623 1.5 riastrad 1624 1.5 riastrad /** 1625 1.5 riastrad * amdgpu_cs_wait_fences_ioctl - wait for multiple command submissions to finish 1626 1.5 riastrad * 1627 1.5 riastrad * @dev: drm device 1628 1.5 riastrad * @data: data from userspace 1629 1.5 riastrad * @filp: file private 1630 1.5 riastrad */ 1631 1.5 riastrad int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data, 1632 1.5 riastrad struct drm_file *filp) 1633 1.5 riastrad { 1634 1.5 riastrad struct amdgpu_device *adev = dev->dev_private; 1635 1.5 riastrad union drm_amdgpu_wait_fences *wait = data; 1636 1.5 riastrad uint32_t fence_count = wait->in.fence_count; 1637 1.5 riastrad struct drm_amdgpu_fence *fences_user; 1638 1.5 riastrad struct drm_amdgpu_fence *fences; 1639 1.5 riastrad int r; 1640 1.5 riastrad 1641 1.5 riastrad /* Get the fences from userspace */ 1642 1.5 riastrad fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence), 1643 1.5 riastrad GFP_KERNEL); 1644 1.5 riastrad if (fences == NULL) 1645 1.5 riastrad return -ENOMEM; 1646 1.5 riastrad 1647 1.5 riastrad fences_user = u64_to_user_ptr(wait->in.fences); 1648 1.5 riastrad if (copy_from_user(fences, fences_user, 1649 1.5 riastrad sizeof(struct drm_amdgpu_fence) * fence_count)) { 1650 1.5 riastrad r = -EFAULT; 1651 1.5 riastrad goto err_free_fences; 1652 1.5 riastrad } 1653 1.5 riastrad 1654 1.5 riastrad if (wait->in.wait_all) 1655 1.5 riastrad r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences); 1656 1.5 riastrad else 1657 1.5 riastrad r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences); 1658 1.5 riastrad 1659 1.5 riastrad err_free_fences: 1660 1.5 riastrad kfree(fences); 1661 1.5 riastrad 1662 1.5 riastrad return r; 1663 1.5 riastrad } 1664 1.5 riastrad 1665 1.5 riastrad /** 1666 1.1 riastrad * amdgpu_cs_find_bo_va - find bo_va for VM address 1667 1.1 riastrad * 1668 1.1 riastrad * @parser: command submission parser context 1669 1.1 riastrad * @addr: VM address 1670 1.1 riastrad * @bo: resulting BO of the mapping found 1671 1.1 riastrad * 1672 1.1 riastrad * Search the buffer objects in the command submission context for a certain 1673 1.1 riastrad * virtual memory address. Returns allocation structure when found, NULL 1674 1.1 riastrad * otherwise. 1675 1.1 riastrad */ 1676 1.5 riastrad int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, 1677 1.5 riastrad uint64_t addr, struct amdgpu_bo **bo, 1678 1.5 riastrad struct amdgpu_bo_va_mapping **map) 1679 1.1 riastrad { 1680 1.5 riastrad struct amdgpu_fpriv *fpriv = parser->filp->driver_priv; 1681 1.5 riastrad struct ttm_operation_ctx ctx = { false, false }; 1682 1.5 riastrad struct amdgpu_vm *vm = &fpriv->vm; 1683 1.1 riastrad struct amdgpu_bo_va_mapping *mapping; 1684 1.5 riastrad int r; 1685 1.1 riastrad 1686 1.1 riastrad addr /= AMDGPU_GPU_PAGE_SIZE; 1687 1.1 riastrad 1688 1.5 riastrad mapping = amdgpu_vm_bo_lookup_mapping(vm, addr); 1689 1.5 riastrad if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo) 1690 1.5 riastrad return -EINVAL; 1691 1.1 riastrad 1692 1.5 riastrad *bo = mapping->bo_va->base.bo; 1693 1.5 riastrad *map = mapping; 1694 1.1 riastrad 1695 1.5 riastrad /* Double check that the BO is reserved by this CS */ 1696 1.5 riastrad if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->ticket) 1697 1.5 riastrad return -EINVAL; 1698 1.1 riastrad 1699 1.5 riastrad if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) { 1700 1.5 riastrad (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; 1701 1.5 riastrad amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains); 1702 1.5 riastrad r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx); 1703 1.5 riastrad if (r) 1704 1.5 riastrad return r; 1705 1.1 riastrad } 1706 1.1 riastrad 1707 1.5 riastrad return amdgpu_ttm_alloc_gart(&(*bo)->tbo); 1708 1.1 riastrad } 1709