1 1.9 riastrad /* $NetBSD: radeon_gem.c,v 1.9 2021/12/18 23:45:43 riastradh Exp $ */ 2 1.4 riastrad 3 1.1 riastrad /* 4 1.1 riastrad * Copyright 2008 Advanced Micro Devices, Inc. 5 1.1 riastrad * Copyright 2008 Red Hat Inc. 6 1.1 riastrad * Copyright 2009 Jerome Glisse. 7 1.1 riastrad * 8 1.1 riastrad * Permission is hereby granted, free of charge, to any person obtaining a 9 1.1 riastrad * copy of this software and associated documentation files (the "Software"), 10 1.1 riastrad * to deal in the Software without restriction, including without limitation 11 1.1 riastrad * the rights to use, copy, modify, merge, publish, distribute, sublicense, 12 1.1 riastrad * and/or sell copies of the Software, and to permit persons to whom the 13 1.1 riastrad * Software is furnished to do so, subject to the following conditions: 14 1.1 riastrad * 15 1.1 riastrad * The above copyright notice and this permission notice shall be included in 16 1.1 riastrad * all copies or substantial portions of the Software. 17 1.1 riastrad * 18 1.1 riastrad * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 1.1 riastrad * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 1.1 riastrad * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 21 1.1 riastrad * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 22 1.1 riastrad * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 23 1.1 riastrad * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 24 1.1 riastrad * OTHER DEALINGS IN THE SOFTWARE. 25 1.1 riastrad * 26 1.1 riastrad * Authors: Dave Airlie 27 1.1 riastrad * Alex Deucher 28 1.1 riastrad * Jerome Glisse 29 1.1 riastrad */ 30 1.9 riastrad 31 1.4 riastrad #include <sys/cdefs.h> 32 1.9 riastrad __KERNEL_RCSID(0, "$NetBSD: radeon_gem.c,v 1.9 2021/12/18 23:45:43 riastradh Exp $"); 33 1.9 riastrad 34 1.9 riastrad #include <linux/pci.h> 35 1.4 riastrad 36 1.9 riastrad #include <drm/drm_debugfs.h> 37 1.9 riastrad #include <drm/drm_device.h> 38 1.9 riastrad #include <drm/drm_file.h> 39 1.1 riastrad #include <drm/radeon_drm.h> 40 1.9 riastrad 41 1.1 riastrad #include "radeon.h" 42 1.1 riastrad 43 1.7 riastrad #include <linux/nbsd-namespace.h> 44 1.7 riastrad 45 1.1 riastrad void radeon_gem_object_free(struct drm_gem_object *gobj) 46 1.1 riastrad { 47 1.1 riastrad struct radeon_bo *robj = gem_to_radeon_bo(gobj); 48 1.1 riastrad 49 1.1 riastrad if (robj) { 50 1.4 riastrad radeon_mn_unregister(robj); 51 1.1 riastrad radeon_bo_unref(&robj); 52 1.1 riastrad } 53 1.1 riastrad } 54 1.1 riastrad 55 1.4 riastrad int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size, 56 1.1 riastrad int alignment, int initial_domain, 57 1.4 riastrad u32 flags, bool kernel, 58 1.1 riastrad struct drm_gem_object **obj) 59 1.1 riastrad { 60 1.1 riastrad struct radeon_bo *robj; 61 1.1 riastrad unsigned long max_size; 62 1.1 riastrad int r; 63 1.1 riastrad 64 1.1 riastrad *obj = NULL; 65 1.1 riastrad /* At least align on page size */ 66 1.1 riastrad if (alignment < PAGE_SIZE) { 67 1.1 riastrad alignment = PAGE_SIZE; 68 1.1 riastrad } 69 1.1 riastrad 70 1.4 riastrad /* Maximum bo size is the unpinned gtt size since we use the gtt to 71 1.4 riastrad * handle vram to system pool migrations. 72 1.4 riastrad */ 73 1.4 riastrad max_size = rdev->mc.gtt_size - rdev->gart_pin_size; 74 1.1 riastrad if (size > max_size) { 75 1.4 riastrad DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n", 76 1.4 riastrad size >> 20, max_size >> 20); 77 1.1 riastrad return -ENOMEM; 78 1.1 riastrad } 79 1.1 riastrad 80 1.1 riastrad retry: 81 1.4 riastrad r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, 82 1.4 riastrad flags, NULL, NULL, &robj); 83 1.1 riastrad if (r) { 84 1.1 riastrad if (r != -ERESTARTSYS) { 85 1.1 riastrad if (initial_domain == RADEON_GEM_DOMAIN_VRAM) { 86 1.1 riastrad initial_domain |= RADEON_GEM_DOMAIN_GTT; 87 1.1 riastrad goto retry; 88 1.1 riastrad } 89 1.4 riastrad DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n", 90 1.1 riastrad size, initial_domain, alignment, r); 91 1.1 riastrad } 92 1.1 riastrad return r; 93 1.1 riastrad } 94 1.9 riastrad *obj = &robj->tbo.base; 95 1.2 riastrad #ifndef __NetBSD__ 96 1.1 riastrad robj->pid = task_pid_nr(current); 97 1.2 riastrad #endif 98 1.1 riastrad 99 1.1 riastrad mutex_lock(&rdev->gem.mutex); 100 1.1 riastrad list_add_tail(&robj->list, &rdev->gem.objects); 101 1.1 riastrad mutex_unlock(&rdev->gem.mutex); 102 1.1 riastrad 103 1.1 riastrad return 0; 104 1.1 riastrad } 105 1.1 riastrad 106 1.1 riastrad static int radeon_gem_set_domain(struct drm_gem_object *gobj, 107 1.1 riastrad uint32_t rdomain, uint32_t wdomain) 108 1.1 riastrad { 109 1.1 riastrad struct radeon_bo *robj; 110 1.1 riastrad uint32_t domain; 111 1.4 riastrad long r; 112 1.1 riastrad 113 1.1 riastrad /* FIXME: reeimplement */ 114 1.1 riastrad robj = gem_to_radeon_bo(gobj); 115 1.1 riastrad /* work out where to validate the buffer to */ 116 1.1 riastrad domain = wdomain; 117 1.1 riastrad if (!domain) { 118 1.1 riastrad domain = rdomain; 119 1.1 riastrad } 120 1.1 riastrad if (!domain) { 121 1.1 riastrad /* Do nothings */ 122 1.9 riastrad pr_warn("Set domain without domain !\n"); 123 1.1 riastrad return 0; 124 1.1 riastrad } 125 1.1 riastrad if (domain == RADEON_GEM_DOMAIN_CPU) { 126 1.1 riastrad /* Asking for cpu access wait for object idle */ 127 1.9 riastrad r = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ); 128 1.4 riastrad if (!r) 129 1.4 riastrad r = -EBUSY; 130 1.4 riastrad 131 1.4 riastrad if (r < 0 && r != -EINTR) { 132 1.9 riastrad pr_err("Failed to wait for object: %li\n", r); 133 1.1 riastrad return r; 134 1.1 riastrad } 135 1.1 riastrad } 136 1.9 riastrad if (domain == RADEON_GEM_DOMAIN_VRAM && robj->prime_shared_count) { 137 1.9 riastrad /* A BO that is associated with a dma-buf cannot be sensibly migrated to VRAM */ 138 1.9 riastrad return -EINVAL; 139 1.9 riastrad } 140 1.1 riastrad return 0; 141 1.1 riastrad } 142 1.1 riastrad 143 1.1 riastrad int radeon_gem_init(struct radeon_device *rdev) 144 1.1 riastrad { 145 1.1 riastrad INIT_LIST_HEAD(&rdev->gem.objects); 146 1.1 riastrad return 0; 147 1.1 riastrad } 148 1.1 riastrad 149 1.1 riastrad void radeon_gem_fini(struct radeon_device *rdev) 150 1.1 riastrad { 151 1.1 riastrad radeon_bo_force_delete(rdev); 152 1.1 riastrad } 153 1.1 riastrad 154 1.1 riastrad /* 155 1.1 riastrad * Call from drm_gem_handle_create which appear in both new and open ioctl 156 1.1 riastrad * case. 157 1.1 riastrad */ 158 1.1 riastrad int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv) 159 1.1 riastrad { 160 1.1 riastrad struct radeon_bo *rbo = gem_to_radeon_bo(obj); 161 1.1 riastrad struct radeon_device *rdev = rbo->rdev; 162 1.1 riastrad struct radeon_fpriv *fpriv = file_priv->driver_priv; 163 1.1 riastrad struct radeon_vm *vm = &fpriv->vm; 164 1.1 riastrad struct radeon_bo_va *bo_va; 165 1.1 riastrad int r; 166 1.1 riastrad 167 1.4 riastrad if ((rdev->family < CHIP_CAYMAN) || 168 1.4 riastrad (!rdev->accel_working)) { 169 1.1 riastrad return 0; 170 1.1 riastrad } 171 1.1 riastrad 172 1.1 riastrad r = radeon_bo_reserve(rbo, false); 173 1.1 riastrad if (r) { 174 1.1 riastrad return r; 175 1.1 riastrad } 176 1.1 riastrad 177 1.1 riastrad bo_va = radeon_vm_bo_find(vm, rbo); 178 1.1 riastrad if (!bo_va) { 179 1.1 riastrad bo_va = radeon_vm_bo_add(rdev, vm, rbo); 180 1.1 riastrad } else { 181 1.1 riastrad ++bo_va->ref_count; 182 1.1 riastrad } 183 1.1 riastrad radeon_bo_unreserve(rbo); 184 1.1 riastrad 185 1.1 riastrad return 0; 186 1.1 riastrad } 187 1.1 riastrad 188 1.1 riastrad void radeon_gem_object_close(struct drm_gem_object *obj, 189 1.1 riastrad struct drm_file *file_priv) 190 1.1 riastrad { 191 1.1 riastrad struct radeon_bo *rbo = gem_to_radeon_bo(obj); 192 1.1 riastrad struct radeon_device *rdev = rbo->rdev; 193 1.1 riastrad struct radeon_fpriv *fpriv = file_priv->driver_priv; 194 1.1 riastrad struct radeon_vm *vm = &fpriv->vm; 195 1.1 riastrad struct radeon_bo_va *bo_va; 196 1.1 riastrad int r; 197 1.1 riastrad 198 1.4 riastrad if ((rdev->family < CHIP_CAYMAN) || 199 1.4 riastrad (!rdev->accel_working)) { 200 1.1 riastrad return; 201 1.1 riastrad } 202 1.1 riastrad 203 1.1 riastrad r = radeon_bo_reserve(rbo, true); 204 1.1 riastrad if (r) { 205 1.1 riastrad dev_err(rdev->dev, "leaking bo va because " 206 1.1 riastrad "we fail to reserve bo (%d)\n", r); 207 1.1 riastrad return; 208 1.1 riastrad } 209 1.1 riastrad bo_va = radeon_vm_bo_find(vm, rbo); 210 1.1 riastrad if (bo_va) { 211 1.1 riastrad if (--bo_va->ref_count == 0) { 212 1.1 riastrad radeon_vm_bo_rmv(rdev, bo_va); 213 1.1 riastrad } 214 1.1 riastrad } 215 1.1 riastrad radeon_bo_unreserve(rbo); 216 1.1 riastrad } 217 1.1 riastrad 218 1.1 riastrad static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r) 219 1.1 riastrad { 220 1.1 riastrad if (r == -EDEADLK) { 221 1.1 riastrad r = radeon_gpu_reset(rdev); 222 1.1 riastrad if (!r) 223 1.1 riastrad r = -EAGAIN; 224 1.1 riastrad } 225 1.1 riastrad return r; 226 1.1 riastrad } 227 1.1 riastrad 228 1.1 riastrad /* 229 1.1 riastrad * GEM ioctls. 230 1.1 riastrad */ 231 1.1 riastrad int radeon_gem_info_ioctl(struct drm_device *dev, void *data, 232 1.1 riastrad struct drm_file *filp) 233 1.1 riastrad { 234 1.1 riastrad struct radeon_device *rdev = dev->dev_private; 235 1.1 riastrad struct drm_radeon_gem_info *args = data; 236 1.1 riastrad struct ttm_mem_type_manager *man; 237 1.1 riastrad 238 1.1 riastrad man = &rdev->mman.bdev.man[TTM_PL_VRAM]; 239 1.1 riastrad 240 1.9 riastrad args->vram_size = (u64)man->size << PAGE_SHIFT; 241 1.9 riastrad args->vram_visible = rdev->mc.visible_vram_size; 242 1.4 riastrad args->vram_visible -= rdev->vram_pin_size; 243 1.4 riastrad args->gart_size = rdev->mc.gtt_size; 244 1.4 riastrad args->gart_size -= rdev->gart_pin_size; 245 1.4 riastrad 246 1.1 riastrad return 0; 247 1.1 riastrad } 248 1.1 riastrad 249 1.1 riastrad int radeon_gem_pread_ioctl(struct drm_device *dev, void *data, 250 1.1 riastrad struct drm_file *filp) 251 1.1 riastrad { 252 1.1 riastrad /* TODO: implement */ 253 1.1 riastrad DRM_ERROR("unimplemented %s\n", __func__); 254 1.1 riastrad return -ENOSYS; 255 1.1 riastrad } 256 1.1 riastrad 257 1.1 riastrad int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data, 258 1.1 riastrad struct drm_file *filp) 259 1.1 riastrad { 260 1.1 riastrad /* TODO: implement */ 261 1.1 riastrad DRM_ERROR("unimplemented %s\n", __func__); 262 1.1 riastrad return -ENOSYS; 263 1.1 riastrad } 264 1.1 riastrad 265 1.1 riastrad int radeon_gem_create_ioctl(struct drm_device *dev, void *data, 266 1.1 riastrad struct drm_file *filp) 267 1.1 riastrad { 268 1.1 riastrad struct radeon_device *rdev = dev->dev_private; 269 1.1 riastrad struct drm_radeon_gem_create *args = data; 270 1.1 riastrad struct drm_gem_object *gobj; 271 1.1 riastrad uint32_t handle; 272 1.1 riastrad int r; 273 1.1 riastrad 274 1.1 riastrad down_read(&rdev->exclusive_lock); 275 1.1 riastrad /* create a gem object to contain this object in */ 276 1.1 riastrad args->size = roundup(args->size, PAGE_SIZE); 277 1.1 riastrad r = radeon_gem_object_create(rdev, args->size, args->alignment, 278 1.4 riastrad args->initial_domain, args->flags, 279 1.4 riastrad false, &gobj); 280 1.1 riastrad if (r) { 281 1.1 riastrad up_read(&rdev->exclusive_lock); 282 1.1 riastrad r = radeon_gem_handle_lockup(rdev, r); 283 1.1 riastrad return r; 284 1.1 riastrad } 285 1.1 riastrad r = drm_gem_handle_create(filp, gobj, &handle); 286 1.1 riastrad /* drop reference from allocate - handle holds it now */ 287 1.9 riastrad drm_gem_object_put_unlocked(gobj); 288 1.1 riastrad if (r) { 289 1.1 riastrad up_read(&rdev->exclusive_lock); 290 1.1 riastrad r = radeon_gem_handle_lockup(rdev, r); 291 1.1 riastrad return r; 292 1.1 riastrad } 293 1.1 riastrad args->handle = handle; 294 1.1 riastrad up_read(&rdev->exclusive_lock); 295 1.1 riastrad return 0; 296 1.1 riastrad } 297 1.1 riastrad 298 1.4 riastrad int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data, 299 1.4 riastrad struct drm_file *filp) 300 1.4 riastrad { 301 1.9 riastrad struct ttm_operation_ctx ctx = { true, false }; 302 1.4 riastrad struct radeon_device *rdev = dev->dev_private; 303 1.4 riastrad struct drm_radeon_gem_userptr *args = data; 304 1.4 riastrad struct drm_gem_object *gobj; 305 1.4 riastrad struct radeon_bo *bo; 306 1.4 riastrad uint32_t handle; 307 1.4 riastrad int r; 308 1.4 riastrad 309 1.9 riastrad args->addr = untagged_addr(args->addr); 310 1.9 riastrad 311 1.4 riastrad if (offset_in_page(args->addr | args->size)) 312 1.4 riastrad return -EINVAL; 313 1.4 riastrad 314 1.4 riastrad /* reject unknown flag values */ 315 1.4 riastrad if (args->flags & ~(RADEON_GEM_USERPTR_READONLY | 316 1.4 riastrad RADEON_GEM_USERPTR_ANONONLY | RADEON_GEM_USERPTR_VALIDATE | 317 1.4 riastrad RADEON_GEM_USERPTR_REGISTER)) 318 1.4 riastrad return -EINVAL; 319 1.4 riastrad 320 1.4 riastrad if (args->flags & RADEON_GEM_USERPTR_READONLY) { 321 1.4 riastrad /* readonly pages not tested on older hardware */ 322 1.4 riastrad if (rdev->family < CHIP_R600) 323 1.4 riastrad return -EINVAL; 324 1.4 riastrad 325 1.4 riastrad } else if (!(args->flags & RADEON_GEM_USERPTR_ANONONLY) || 326 1.4 riastrad !(args->flags & RADEON_GEM_USERPTR_REGISTER)) { 327 1.4 riastrad 328 1.4 riastrad /* if we want to write to it we must require anonymous 329 1.4 riastrad memory and install a MMU notifier */ 330 1.4 riastrad return -EACCES; 331 1.4 riastrad } 332 1.4 riastrad 333 1.4 riastrad down_read(&rdev->exclusive_lock); 334 1.4 riastrad 335 1.4 riastrad /* create a gem object to contain this object in */ 336 1.4 riastrad r = radeon_gem_object_create(rdev, args->size, 0, 337 1.4 riastrad RADEON_GEM_DOMAIN_CPU, 0, 338 1.4 riastrad false, &gobj); 339 1.4 riastrad if (r) 340 1.4 riastrad goto handle_lockup; 341 1.4 riastrad 342 1.4 riastrad bo = gem_to_radeon_bo(gobj); 343 1.4 riastrad r = radeon_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags); 344 1.4 riastrad if (r) 345 1.4 riastrad goto release_object; 346 1.4 riastrad 347 1.4 riastrad if (args->flags & RADEON_GEM_USERPTR_REGISTER) { 348 1.4 riastrad r = radeon_mn_register(bo, args->addr); 349 1.4 riastrad if (r) 350 1.4 riastrad goto release_object; 351 1.4 riastrad } 352 1.4 riastrad 353 1.4 riastrad if (args->flags & RADEON_GEM_USERPTR_VALIDATE) { 354 1.8 riastrad #ifdef __NetBSD__ 355 1.8 riastrad vm_map_lock_read(&curproc->p_vmspace->vm_map); 356 1.8 riastrad #else 357 1.4 riastrad down_read(¤t->mm->mmap_sem); 358 1.8 riastrad #endif 359 1.4 riastrad r = radeon_bo_reserve(bo, true); 360 1.4 riastrad if (r) { 361 1.8 riastrad #ifdef __NetBSD__ 362 1.8 riastrad vm_map_unlock_read(&curproc->p_vmspace->vm_map); 363 1.8 riastrad #else 364 1.4 riastrad up_read(¤t->mm->mmap_sem); 365 1.8 riastrad #endif 366 1.4 riastrad goto release_object; 367 1.4 riastrad } 368 1.4 riastrad 369 1.4 riastrad radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT); 370 1.9 riastrad r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 371 1.4 riastrad radeon_bo_unreserve(bo); 372 1.8 riastrad #ifdef __NetBSD__ 373 1.8 riastrad vm_map_unlock_read(&curproc->p_vmspace->vm_map); 374 1.8 riastrad #else 375 1.4 riastrad up_read(¤t->mm->mmap_sem); 376 1.8 riastrad #endif 377 1.4 riastrad if (r) 378 1.4 riastrad goto release_object; 379 1.4 riastrad } 380 1.4 riastrad 381 1.4 riastrad r = drm_gem_handle_create(filp, gobj, &handle); 382 1.4 riastrad /* drop reference from allocate - handle holds it now */ 383 1.9 riastrad drm_gem_object_put_unlocked(gobj); 384 1.4 riastrad if (r) 385 1.4 riastrad goto handle_lockup; 386 1.4 riastrad 387 1.4 riastrad args->handle = handle; 388 1.4 riastrad up_read(&rdev->exclusive_lock); 389 1.4 riastrad return 0; 390 1.4 riastrad 391 1.4 riastrad release_object: 392 1.9 riastrad drm_gem_object_put_unlocked(gobj); 393 1.4 riastrad 394 1.4 riastrad handle_lockup: 395 1.4 riastrad up_read(&rdev->exclusive_lock); 396 1.4 riastrad r = radeon_gem_handle_lockup(rdev, r); 397 1.4 riastrad 398 1.4 riastrad return r; 399 1.4 riastrad } 400 1.4 riastrad 401 1.1 riastrad int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, 402 1.1 riastrad struct drm_file *filp) 403 1.1 riastrad { 404 1.1 riastrad /* transition the BO to a domain - 405 1.1 riastrad * just validate the BO into a certain domain */ 406 1.1 riastrad struct radeon_device *rdev = dev->dev_private; 407 1.1 riastrad struct drm_radeon_gem_set_domain *args = data; 408 1.1 riastrad struct drm_gem_object *gobj; 409 1.1 riastrad struct radeon_bo *robj; 410 1.1 riastrad int r; 411 1.1 riastrad 412 1.1 riastrad /* for now if someone requests domain CPU - 413 1.1 riastrad * just make sure the buffer is finished with */ 414 1.1 riastrad down_read(&rdev->exclusive_lock); 415 1.1 riastrad 416 1.1 riastrad /* just do a BO wait for now */ 417 1.9 riastrad gobj = drm_gem_object_lookup(filp, args->handle); 418 1.1 riastrad if (gobj == NULL) { 419 1.1 riastrad up_read(&rdev->exclusive_lock); 420 1.1 riastrad return -ENOENT; 421 1.1 riastrad } 422 1.1 riastrad robj = gem_to_radeon_bo(gobj); 423 1.1 riastrad 424 1.1 riastrad r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain); 425 1.1 riastrad 426 1.9 riastrad drm_gem_object_put_unlocked(gobj); 427 1.1 riastrad up_read(&rdev->exclusive_lock); 428 1.1 riastrad r = radeon_gem_handle_lockup(robj->rdev, r); 429 1.1 riastrad return r; 430 1.1 riastrad } 431 1.1 riastrad 432 1.1 riastrad int radeon_mode_dumb_mmap(struct drm_file *filp, 433 1.1 riastrad struct drm_device *dev, 434 1.1 riastrad uint32_t handle, uint64_t *offset_p) 435 1.1 riastrad { 436 1.1 riastrad struct drm_gem_object *gobj; 437 1.1 riastrad struct radeon_bo *robj; 438 1.1 riastrad 439 1.9 riastrad gobj = drm_gem_object_lookup(filp, handle); 440 1.1 riastrad if (gobj == NULL) { 441 1.1 riastrad return -ENOENT; 442 1.1 riastrad } 443 1.1 riastrad robj = gem_to_radeon_bo(gobj); 444 1.4 riastrad if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) { 445 1.9 riastrad drm_gem_object_put_unlocked(gobj); 446 1.4 riastrad return -EPERM; 447 1.4 riastrad } 448 1.1 riastrad *offset_p = radeon_bo_mmap_offset(robj); 449 1.9 riastrad drm_gem_object_put_unlocked(gobj); 450 1.1 riastrad return 0; 451 1.1 riastrad } 452 1.1 riastrad 453 1.1 riastrad int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data, 454 1.1 riastrad struct drm_file *filp) 455 1.1 riastrad { 456 1.1 riastrad struct drm_radeon_gem_mmap *args = data; 457 1.1 riastrad 458 1.1 riastrad return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr); 459 1.1 riastrad } 460 1.1 riastrad 461 1.1 riastrad int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, 462 1.1 riastrad struct drm_file *filp) 463 1.1 riastrad { 464 1.1 riastrad struct drm_radeon_gem_busy *args = data; 465 1.1 riastrad struct drm_gem_object *gobj; 466 1.1 riastrad struct radeon_bo *robj; 467 1.1 riastrad int r; 468 1.1 riastrad uint32_t cur_placement = 0; 469 1.1 riastrad 470 1.9 riastrad gobj = drm_gem_object_lookup(filp, args->handle); 471 1.1 riastrad if (gobj == NULL) { 472 1.1 riastrad return -ENOENT; 473 1.1 riastrad } 474 1.1 riastrad robj = gem_to_radeon_bo(gobj); 475 1.4 riastrad 476 1.9 riastrad r = dma_resv_test_signaled_rcu(robj->tbo.base.resv, true); 477 1.4 riastrad if (r == 0) 478 1.4 riastrad r = -EBUSY; 479 1.4 riastrad else 480 1.4 riastrad r = 0; 481 1.4 riastrad 482 1.9 riastrad cur_placement = READ_ONCE(robj->tbo.mem.mem_type); 483 1.1 riastrad args->domain = radeon_mem_type_to_domain(cur_placement); 484 1.9 riastrad drm_gem_object_put_unlocked(gobj); 485 1.1 riastrad return r; 486 1.1 riastrad } 487 1.1 riastrad 488 1.1 riastrad int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, 489 1.1 riastrad struct drm_file *filp) 490 1.1 riastrad { 491 1.1 riastrad struct radeon_device *rdev = dev->dev_private; 492 1.1 riastrad struct drm_radeon_gem_wait_idle *args = data; 493 1.1 riastrad struct drm_gem_object *gobj; 494 1.1 riastrad struct radeon_bo *robj; 495 1.4 riastrad int r = 0; 496 1.4 riastrad uint32_t cur_placement = 0; 497 1.4 riastrad long ret; 498 1.1 riastrad 499 1.9 riastrad gobj = drm_gem_object_lookup(filp, args->handle); 500 1.1 riastrad if (gobj == NULL) { 501 1.1 riastrad return -ENOENT; 502 1.1 riastrad } 503 1.1 riastrad robj = gem_to_radeon_bo(gobj); 504 1.4 riastrad 505 1.9 riastrad ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ); 506 1.4 riastrad if (ret == 0) 507 1.4 riastrad r = -EBUSY; 508 1.4 riastrad else if (ret < 0) 509 1.4 riastrad r = ret; 510 1.4 riastrad 511 1.4 riastrad /* Flush HDP cache via MMIO if necessary */ 512 1.9 riastrad cur_placement = READ_ONCE(robj->tbo.mem.mem_type); 513 1.4 riastrad if (rdev->asic->mmio_hdp_flush && 514 1.4 riastrad radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM) 515 1.4 riastrad robj->rdev->asic->mmio_hdp_flush(rdev); 516 1.9 riastrad drm_gem_object_put_unlocked(gobj); 517 1.1 riastrad r = radeon_gem_handle_lockup(rdev, r); 518 1.1 riastrad return r; 519 1.1 riastrad } 520 1.1 riastrad 521 1.1 riastrad int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, 522 1.1 riastrad struct drm_file *filp) 523 1.1 riastrad { 524 1.1 riastrad struct drm_radeon_gem_set_tiling *args = data; 525 1.1 riastrad struct drm_gem_object *gobj; 526 1.1 riastrad struct radeon_bo *robj; 527 1.1 riastrad int r = 0; 528 1.1 riastrad 529 1.1 riastrad DRM_DEBUG("%d \n", args->handle); 530 1.9 riastrad gobj = drm_gem_object_lookup(filp, args->handle); 531 1.1 riastrad if (gobj == NULL) 532 1.1 riastrad return -ENOENT; 533 1.1 riastrad robj = gem_to_radeon_bo(gobj); 534 1.1 riastrad r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch); 535 1.9 riastrad drm_gem_object_put_unlocked(gobj); 536 1.1 riastrad return r; 537 1.1 riastrad } 538 1.1 riastrad 539 1.1 riastrad int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, 540 1.1 riastrad struct drm_file *filp) 541 1.1 riastrad { 542 1.1 riastrad struct drm_radeon_gem_get_tiling *args = data; 543 1.1 riastrad struct drm_gem_object *gobj; 544 1.1 riastrad struct radeon_bo *rbo; 545 1.1 riastrad int r = 0; 546 1.1 riastrad 547 1.1 riastrad DRM_DEBUG("\n"); 548 1.9 riastrad gobj = drm_gem_object_lookup(filp, args->handle); 549 1.1 riastrad if (gobj == NULL) 550 1.1 riastrad return -ENOENT; 551 1.1 riastrad rbo = gem_to_radeon_bo(gobj); 552 1.1 riastrad r = radeon_bo_reserve(rbo, false); 553 1.1 riastrad if (unlikely(r != 0)) 554 1.1 riastrad goto out; 555 1.1 riastrad radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch); 556 1.1 riastrad radeon_bo_unreserve(rbo); 557 1.1 riastrad out: 558 1.9 riastrad drm_gem_object_put_unlocked(gobj); 559 1.1 riastrad return r; 560 1.1 riastrad } 561 1.1 riastrad 562 1.4 riastrad /** 563 1.4 riastrad * radeon_gem_va_update_vm -update the bo_va in its VM 564 1.4 riastrad * 565 1.4 riastrad * @rdev: radeon_device pointer 566 1.4 riastrad * @bo_va: bo_va to update 567 1.4 riastrad * 568 1.4 riastrad * Update the bo_va directly after setting it's address. Errors are not 569 1.4 riastrad * vital here, so they are not reported back to userspace. 570 1.4 riastrad */ 571 1.4 riastrad static void radeon_gem_va_update_vm(struct radeon_device *rdev, 572 1.4 riastrad struct radeon_bo_va *bo_va) 573 1.4 riastrad { 574 1.4 riastrad struct ttm_validate_buffer tv, *entry; 575 1.4 riastrad struct radeon_bo_list *vm_bos; 576 1.4 riastrad struct ww_acquire_ctx ticket; 577 1.4 riastrad struct list_head list; 578 1.4 riastrad unsigned domain; 579 1.4 riastrad int r; 580 1.4 riastrad 581 1.4 riastrad INIT_LIST_HEAD(&list); 582 1.4 riastrad 583 1.4 riastrad tv.bo = &bo_va->bo->tbo; 584 1.9 riastrad tv.num_shared = 1; 585 1.4 riastrad list_add(&tv.head, &list); 586 1.4 riastrad 587 1.4 riastrad vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list); 588 1.4 riastrad if (!vm_bos) 589 1.4 riastrad return; 590 1.4 riastrad 591 1.4 riastrad r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL); 592 1.4 riastrad if (r) 593 1.4 riastrad goto error_free; 594 1.4 riastrad 595 1.4 riastrad list_for_each_entry(entry, &list, head) { 596 1.4 riastrad domain = radeon_mem_type_to_domain(entry->bo->mem.mem_type); 597 1.4 riastrad /* if anything is swapped out don't swap it in here, 598 1.4 riastrad just abort and wait for the next CS */ 599 1.4 riastrad if (domain == RADEON_GEM_DOMAIN_CPU) 600 1.4 riastrad goto error_unreserve; 601 1.4 riastrad } 602 1.4 riastrad 603 1.4 riastrad mutex_lock(&bo_va->vm->mutex); 604 1.4 riastrad r = radeon_vm_clear_freed(rdev, bo_va->vm); 605 1.4 riastrad if (r) 606 1.4 riastrad goto error_unlock; 607 1.4 riastrad 608 1.4 riastrad if (bo_va->it.start) 609 1.4 riastrad r = radeon_vm_bo_update(rdev, bo_va, &bo_va->bo->tbo.mem); 610 1.4 riastrad 611 1.4 riastrad error_unlock: 612 1.4 riastrad mutex_unlock(&bo_va->vm->mutex); 613 1.4 riastrad 614 1.4 riastrad error_unreserve: 615 1.4 riastrad ttm_eu_backoff_reservation(&ticket, &list); 616 1.4 riastrad 617 1.4 riastrad error_free: 618 1.9 riastrad kvfree(vm_bos); 619 1.4 riastrad 620 1.4 riastrad if (r && r != -ERESTARTSYS) 621 1.4 riastrad DRM_ERROR("Couldn't update BO_VA (%d)\n", r); 622 1.4 riastrad } 623 1.4 riastrad 624 1.1 riastrad int radeon_gem_va_ioctl(struct drm_device *dev, void *data, 625 1.1 riastrad struct drm_file *filp) 626 1.1 riastrad { 627 1.1 riastrad struct drm_radeon_gem_va *args = data; 628 1.1 riastrad struct drm_gem_object *gobj; 629 1.1 riastrad struct radeon_device *rdev = dev->dev_private; 630 1.1 riastrad struct radeon_fpriv *fpriv = filp->driver_priv; 631 1.1 riastrad struct radeon_bo *rbo; 632 1.1 riastrad struct radeon_bo_va *bo_va; 633 1.1 riastrad u32 invalid_flags; 634 1.1 riastrad int r = 0; 635 1.1 riastrad 636 1.1 riastrad if (!rdev->vm_manager.enabled) { 637 1.1 riastrad args->operation = RADEON_VA_RESULT_ERROR; 638 1.1 riastrad return -ENOTTY; 639 1.1 riastrad } 640 1.1 riastrad 641 1.1 riastrad /* !! DONT REMOVE !! 642 1.1 riastrad * We don't support vm_id yet, to be sure we don't have have broken 643 1.1 riastrad * userspace, reject anyone trying to use non 0 value thus moving 644 1.1 riastrad * forward we can use those fields without breaking existant userspace 645 1.1 riastrad */ 646 1.1 riastrad if (args->vm_id) { 647 1.1 riastrad args->operation = RADEON_VA_RESULT_ERROR; 648 1.1 riastrad return -EINVAL; 649 1.1 riastrad } 650 1.1 riastrad 651 1.1 riastrad if (args->offset < RADEON_VA_RESERVED_SIZE) { 652 1.3 riastrad dev_err(dev->dev, 653 1.1 riastrad "offset 0x%lX is in reserved area 0x%X\n", 654 1.1 riastrad (unsigned long)args->offset, 655 1.1 riastrad RADEON_VA_RESERVED_SIZE); 656 1.1 riastrad args->operation = RADEON_VA_RESULT_ERROR; 657 1.1 riastrad return -EINVAL; 658 1.1 riastrad } 659 1.1 riastrad 660 1.1 riastrad /* don't remove, we need to enforce userspace to set the snooped flag 661 1.1 riastrad * otherwise we will endup with broken userspace and we won't be able 662 1.1 riastrad * to enable this feature without adding new interface 663 1.1 riastrad */ 664 1.1 riastrad invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM; 665 1.1 riastrad if ((args->flags & invalid_flags)) { 666 1.3 riastrad dev_err(dev->dev, "invalid flags 0x%08X vs 0x%08X\n", 667 1.1 riastrad args->flags, invalid_flags); 668 1.1 riastrad args->operation = RADEON_VA_RESULT_ERROR; 669 1.1 riastrad return -EINVAL; 670 1.1 riastrad } 671 1.1 riastrad 672 1.1 riastrad switch (args->operation) { 673 1.1 riastrad case RADEON_VA_MAP: 674 1.1 riastrad case RADEON_VA_UNMAP: 675 1.1 riastrad break; 676 1.1 riastrad default: 677 1.3 riastrad dev_err(dev->dev, "unsupported operation %d\n", 678 1.1 riastrad args->operation); 679 1.1 riastrad args->operation = RADEON_VA_RESULT_ERROR; 680 1.1 riastrad return -EINVAL; 681 1.1 riastrad } 682 1.1 riastrad 683 1.9 riastrad gobj = drm_gem_object_lookup(filp, args->handle); 684 1.1 riastrad if (gobj == NULL) { 685 1.1 riastrad args->operation = RADEON_VA_RESULT_ERROR; 686 1.1 riastrad return -ENOENT; 687 1.1 riastrad } 688 1.1 riastrad rbo = gem_to_radeon_bo(gobj); 689 1.1 riastrad r = radeon_bo_reserve(rbo, false); 690 1.1 riastrad if (r) { 691 1.1 riastrad args->operation = RADEON_VA_RESULT_ERROR; 692 1.9 riastrad drm_gem_object_put_unlocked(gobj); 693 1.1 riastrad return r; 694 1.1 riastrad } 695 1.1 riastrad bo_va = radeon_vm_bo_find(&fpriv->vm, rbo); 696 1.1 riastrad if (!bo_va) { 697 1.1 riastrad args->operation = RADEON_VA_RESULT_ERROR; 698 1.9 riastrad radeon_bo_unreserve(rbo); 699 1.9 riastrad drm_gem_object_put_unlocked(gobj); 700 1.1 riastrad return -ENOENT; 701 1.1 riastrad } 702 1.1 riastrad 703 1.1 riastrad switch (args->operation) { 704 1.1 riastrad case RADEON_VA_MAP: 705 1.4 riastrad if (bo_va->it.start) { 706 1.1 riastrad args->operation = RADEON_VA_RESULT_VA_EXIST; 707 1.4 riastrad args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE; 708 1.4 riastrad radeon_bo_unreserve(rbo); 709 1.1 riastrad goto out; 710 1.1 riastrad } 711 1.1 riastrad r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags); 712 1.1 riastrad break; 713 1.1 riastrad case RADEON_VA_UNMAP: 714 1.1 riastrad r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0); 715 1.1 riastrad break; 716 1.1 riastrad default: 717 1.1 riastrad break; 718 1.1 riastrad } 719 1.4 riastrad if (!r) 720 1.4 riastrad radeon_gem_va_update_vm(rdev, bo_va); 721 1.1 riastrad args->operation = RADEON_VA_RESULT_OK; 722 1.1 riastrad if (r) { 723 1.1 riastrad args->operation = RADEON_VA_RESULT_ERROR; 724 1.1 riastrad } 725 1.1 riastrad out: 726 1.9 riastrad drm_gem_object_put_unlocked(gobj); 727 1.1 riastrad return r; 728 1.1 riastrad } 729 1.1 riastrad 730 1.1 riastrad int radeon_gem_op_ioctl(struct drm_device *dev, void *data, 731 1.1 riastrad struct drm_file *filp) 732 1.1 riastrad { 733 1.1 riastrad struct drm_radeon_gem_op *args = data; 734 1.1 riastrad struct drm_gem_object *gobj; 735 1.1 riastrad struct radeon_bo *robj; 736 1.1 riastrad int r; 737 1.1 riastrad 738 1.9 riastrad gobj = drm_gem_object_lookup(filp, args->handle); 739 1.1 riastrad if (gobj == NULL) { 740 1.1 riastrad return -ENOENT; 741 1.1 riastrad } 742 1.1 riastrad robj = gem_to_radeon_bo(gobj); 743 1.4 riastrad 744 1.4 riastrad r = -EPERM; 745 1.4 riastrad if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) 746 1.4 riastrad goto out; 747 1.4 riastrad 748 1.1 riastrad r = radeon_bo_reserve(robj, false); 749 1.1 riastrad if (unlikely(r)) 750 1.1 riastrad goto out; 751 1.1 riastrad 752 1.1 riastrad switch (args->op) { 753 1.1 riastrad case RADEON_GEM_OP_GET_INITIAL_DOMAIN: 754 1.1 riastrad args->value = robj->initial_domain; 755 1.1 riastrad break; 756 1.1 riastrad case RADEON_GEM_OP_SET_INITIAL_DOMAIN: 757 1.1 riastrad robj->initial_domain = args->value & (RADEON_GEM_DOMAIN_VRAM | 758 1.1 riastrad RADEON_GEM_DOMAIN_GTT | 759 1.1 riastrad RADEON_GEM_DOMAIN_CPU); 760 1.1 riastrad break; 761 1.1 riastrad default: 762 1.1 riastrad r = -EINVAL; 763 1.1 riastrad } 764 1.1 riastrad 765 1.1 riastrad radeon_bo_unreserve(robj); 766 1.1 riastrad out: 767 1.9 riastrad drm_gem_object_put_unlocked(gobj); 768 1.1 riastrad return r; 769 1.1 riastrad } 770 1.1 riastrad 771 1.1 riastrad int radeon_mode_dumb_create(struct drm_file *file_priv, 772 1.1 riastrad struct drm_device *dev, 773 1.1 riastrad struct drm_mode_create_dumb *args) 774 1.1 riastrad { 775 1.1 riastrad struct radeon_device *rdev = dev->dev_private; 776 1.1 riastrad struct drm_gem_object *gobj; 777 1.1 riastrad uint32_t handle; 778 1.1 riastrad int r; 779 1.1 riastrad 780 1.9 riastrad args->pitch = radeon_align_pitch(rdev, args->width, 781 1.9 riastrad DIV_ROUND_UP(args->bpp, 8), 0); 782 1.1 riastrad args->size = args->pitch * args->height; 783 1.1 riastrad args->size = ALIGN(args->size, PAGE_SIZE); 784 1.1 riastrad 785 1.1 riastrad r = radeon_gem_object_create(rdev, args->size, 0, 786 1.4 riastrad RADEON_GEM_DOMAIN_VRAM, 0, 787 1.4 riastrad false, &gobj); 788 1.1 riastrad if (r) 789 1.1 riastrad return -ENOMEM; 790 1.1 riastrad 791 1.1 riastrad r = drm_gem_handle_create(file_priv, gobj, &handle); 792 1.1 riastrad /* drop reference from allocate - handle holds it now */ 793 1.9 riastrad drm_gem_object_put_unlocked(gobj); 794 1.1 riastrad if (r) { 795 1.1 riastrad return r; 796 1.1 riastrad } 797 1.1 riastrad args->handle = handle; 798 1.1 riastrad return 0; 799 1.1 riastrad } 800 1.1 riastrad 801 1.1 riastrad #if defined(CONFIG_DEBUG_FS) 802 1.1 riastrad static int radeon_debugfs_gem_info(struct seq_file *m, void *data) 803 1.1 riastrad { 804 1.1 riastrad struct drm_info_node *node = (struct drm_info_node *)m->private; 805 1.1 riastrad struct drm_device *dev = node->minor->dev; 806 1.1 riastrad struct radeon_device *rdev = dev->dev_private; 807 1.1 riastrad struct radeon_bo *rbo; 808 1.1 riastrad unsigned i = 0; 809 1.1 riastrad 810 1.1 riastrad mutex_lock(&rdev->gem.mutex); 811 1.1 riastrad list_for_each_entry(rbo, &rdev->gem.objects, list) { 812 1.1 riastrad unsigned domain; 813 1.1 riastrad const char *placement; 814 1.1 riastrad 815 1.1 riastrad domain = radeon_mem_type_to_domain(rbo->tbo.mem.mem_type); 816 1.1 riastrad switch (domain) { 817 1.1 riastrad case RADEON_GEM_DOMAIN_VRAM: 818 1.1 riastrad placement = "VRAM"; 819 1.1 riastrad break; 820 1.1 riastrad case RADEON_GEM_DOMAIN_GTT: 821 1.1 riastrad placement = " GTT"; 822 1.1 riastrad break; 823 1.1 riastrad case RADEON_GEM_DOMAIN_CPU: 824 1.1 riastrad default: 825 1.1 riastrad placement = " CPU"; 826 1.1 riastrad break; 827 1.1 riastrad } 828 1.1 riastrad seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n", 829 1.1 riastrad i, radeon_bo_size(rbo) >> 10, radeon_bo_size(rbo) >> 20, 830 1.1 riastrad placement, (unsigned long)rbo->pid); 831 1.1 riastrad i++; 832 1.1 riastrad } 833 1.1 riastrad mutex_unlock(&rdev->gem.mutex); 834 1.1 riastrad return 0; 835 1.1 riastrad } 836 1.1 riastrad 837 1.1 riastrad static struct drm_info_list radeon_debugfs_gem_list[] = { 838 1.1 riastrad {"radeon_gem_info", &radeon_debugfs_gem_info, 0, NULL}, 839 1.1 riastrad }; 840 1.1 riastrad #endif 841 1.1 riastrad 842 1.1 riastrad int radeon_gem_debugfs_init(struct radeon_device *rdev) 843 1.1 riastrad { 844 1.1 riastrad #if defined(CONFIG_DEBUG_FS) 845 1.1 riastrad return radeon_debugfs_add_files(rdev, radeon_debugfs_gem_list, 1); 846 1.1 riastrad #endif 847 1.1 riastrad return 0; 848 1.1 riastrad } 849