Home | History | Annotate | Line # | Download | only in radeon
radeon_gem.c revision 1.4
      1 /*	$NetBSD: radeon_gem.c,v 1.4 2018/08/27 04:58:36 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright 2008 Advanced Micro Devices, Inc.
      5  * Copyright 2008 Red Hat Inc.
      6  * Copyright 2009 Jerome Glisse.
      7  *
      8  * Permission is hereby granted, free of charge, to any person obtaining a
      9  * copy of this software and associated documentation files (the "Software"),
     10  * to deal in the Software without restriction, including without limitation
     11  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     12  * and/or sell copies of the Software, and to permit persons to whom the
     13  * Software is furnished to do so, subject to the following conditions:
     14  *
     15  * The above copyright notice and this permission notice shall be included in
     16  * all copies or substantial portions of the Software.
     17  *
     18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     20  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     21  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     22  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     23  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     24  * OTHER DEALINGS IN THE SOFTWARE.
     25  *
     26  * Authors: Dave Airlie
     27  *          Alex Deucher
     28  *          Jerome Glisse
     29  */
     30 #include <sys/cdefs.h>
     31 __KERNEL_RCSID(0, "$NetBSD: radeon_gem.c,v 1.4 2018/08/27 04:58:36 riastradh Exp $");
     32 
     33 #include <drm/drmP.h>
     34 #include <drm/radeon_drm.h>
     35 #include "radeon.h"
     36 
     37 void radeon_gem_object_free(struct drm_gem_object *gobj)
     38 {
     39 	struct radeon_bo *robj = gem_to_radeon_bo(gobj);
     40 
     41 	if (robj) {
     42 #ifndef __NetBSD__		/* XXX drm prime */
     43 		if (robj->gem_base.import_attach)
     44 			drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
     45 #endif
     46 		radeon_mn_unregister(robj);
     47 		radeon_bo_unref(&robj);
     48 	}
     49 }
     50 
     51 int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
     52 				int alignment, int initial_domain,
     53 				u32 flags, bool kernel,
     54 				struct drm_gem_object **obj)
     55 {
     56 	struct radeon_bo *robj;
     57 	unsigned long max_size;
     58 	int r;
     59 
     60 	*obj = NULL;
     61 	/* At least align on page size */
     62 	if (alignment < PAGE_SIZE) {
     63 		alignment = PAGE_SIZE;
     64 	}
     65 
     66 	/* Maximum bo size is the unpinned gtt size since we use the gtt to
     67 	 * handle vram to system pool migrations.
     68 	 */
     69 	max_size = rdev->mc.gtt_size - rdev->gart_pin_size;
     70 	if (size > max_size) {
     71 		DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
     72 			  size >> 20, max_size >> 20);
     73 		return -ENOMEM;
     74 	}
     75 
     76 retry:
     77 	r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain,
     78 			     flags, NULL, NULL, &robj);
     79 	if (r) {
     80 		if (r != -ERESTARTSYS) {
     81 			if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
     82 				initial_domain |= RADEON_GEM_DOMAIN_GTT;
     83 				goto retry;
     84 			}
     85 			DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
     86 				  size, initial_domain, alignment, r);
     87 		}
     88 		return r;
     89 	}
     90 	*obj = &robj->gem_base;
     91 #ifndef __NetBSD__
     92 	robj->pid = task_pid_nr(current);
     93 #endif
     94 
     95 	mutex_lock(&rdev->gem.mutex);
     96 	list_add_tail(&robj->list, &rdev->gem.objects);
     97 	mutex_unlock(&rdev->gem.mutex);
     98 
     99 	return 0;
    100 }
    101 
    102 static int radeon_gem_set_domain(struct drm_gem_object *gobj,
    103 			  uint32_t rdomain, uint32_t wdomain)
    104 {
    105 	struct radeon_bo *robj;
    106 	uint32_t domain;
    107 	long r;
    108 
    109 	/* FIXME: reeimplement */
    110 	robj = gem_to_radeon_bo(gobj);
    111 	/* work out where to validate the buffer to */
    112 	domain = wdomain;
    113 	if (!domain) {
    114 		domain = rdomain;
    115 	}
    116 	if (!domain) {
    117 		/* Do nothings */
    118 		printk(KERN_WARNING "Set domain without domain !\n");
    119 		return 0;
    120 	}
    121 	if (domain == RADEON_GEM_DOMAIN_CPU) {
    122 		/* Asking for cpu access wait for object idle */
    123 		r = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ);
    124 		if (!r)
    125 			r = -EBUSY;
    126 
    127 		if (r < 0 && r != -EINTR) {
    128 			printk(KERN_ERR "Failed to wait for object: %li\n", r);
    129 			return r;
    130 		}
    131 	}
    132 	return 0;
    133 }
    134 
    135 int radeon_gem_init(struct radeon_device *rdev)
    136 {
    137 	INIT_LIST_HEAD(&rdev->gem.objects);
    138 	return 0;
    139 }
    140 
    141 void radeon_gem_fini(struct radeon_device *rdev)
    142 {
    143 	radeon_bo_force_delete(rdev);
    144 }
    145 
    146 /*
    147  * Call from drm_gem_handle_create which appear in both new and open ioctl
    148  * case.
    149  */
    150 int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
    151 {
    152 	struct radeon_bo *rbo = gem_to_radeon_bo(obj);
    153 	struct radeon_device *rdev = rbo->rdev;
    154 	struct radeon_fpriv *fpriv = file_priv->driver_priv;
    155 	struct radeon_vm *vm = &fpriv->vm;
    156 	struct radeon_bo_va *bo_va;
    157 	int r;
    158 
    159 	if ((rdev->family < CHIP_CAYMAN) ||
    160 	    (!rdev->accel_working)) {
    161 		return 0;
    162 	}
    163 
    164 	r = radeon_bo_reserve(rbo, false);
    165 	if (r) {
    166 		return r;
    167 	}
    168 
    169 	bo_va = radeon_vm_bo_find(vm, rbo);
    170 	if (!bo_va) {
    171 		bo_va = radeon_vm_bo_add(rdev, vm, rbo);
    172 	} else {
    173 		++bo_va->ref_count;
    174 	}
    175 	radeon_bo_unreserve(rbo);
    176 
    177 	return 0;
    178 }
    179 
    180 void radeon_gem_object_close(struct drm_gem_object *obj,
    181 			     struct drm_file *file_priv)
    182 {
    183 	struct radeon_bo *rbo = gem_to_radeon_bo(obj);
    184 	struct radeon_device *rdev = rbo->rdev;
    185 	struct radeon_fpriv *fpriv = file_priv->driver_priv;
    186 	struct radeon_vm *vm = &fpriv->vm;
    187 	struct radeon_bo_va *bo_va;
    188 	int r;
    189 
    190 	if ((rdev->family < CHIP_CAYMAN) ||
    191 	    (!rdev->accel_working)) {
    192 		return;
    193 	}
    194 
    195 	r = radeon_bo_reserve(rbo, true);
    196 	if (r) {
    197 		dev_err(rdev->dev, "leaking bo va because "
    198 			"we fail to reserve bo (%d)\n", r);
    199 		return;
    200 	}
    201 	bo_va = radeon_vm_bo_find(vm, rbo);
    202 	if (bo_va) {
    203 		if (--bo_va->ref_count == 0) {
    204 			radeon_vm_bo_rmv(rdev, bo_va);
    205 		}
    206 	}
    207 	radeon_bo_unreserve(rbo);
    208 }
    209 
    210 static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
    211 {
    212 	if (r == -EDEADLK) {
    213 		r = radeon_gpu_reset(rdev);
    214 		if (!r)
    215 			r = -EAGAIN;
    216 	}
    217 	return r;
    218 }
    219 
    220 /*
    221  * GEM ioctls.
    222  */
    223 int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
    224 			  struct drm_file *filp)
    225 {
    226 	struct radeon_device *rdev = dev->dev_private;
    227 	struct drm_radeon_gem_info *args = data;
    228 	struct ttm_mem_type_manager *man;
    229 
    230 	man = &rdev->mman.bdev.man[TTM_PL_VRAM];
    231 
    232 	args->vram_size = rdev->mc.real_vram_size;
    233 	args->vram_visible = (u64)man->size << PAGE_SHIFT;
    234 	args->vram_visible -= rdev->vram_pin_size;
    235 	args->gart_size = rdev->mc.gtt_size;
    236 	args->gart_size -= rdev->gart_pin_size;
    237 
    238 	return 0;
    239 }
    240 
    241 int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
    242 			   struct drm_file *filp)
    243 {
    244 	/* TODO: implement */
    245 	DRM_ERROR("unimplemented %s\n", __func__);
    246 	return -ENOSYS;
    247 }
    248 
    249 int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
    250 			    struct drm_file *filp)
    251 {
    252 	/* TODO: implement */
    253 	DRM_ERROR("unimplemented %s\n", __func__);
    254 	return -ENOSYS;
    255 }
    256 
    257 int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
    258 			    struct drm_file *filp)
    259 {
    260 	struct radeon_device *rdev = dev->dev_private;
    261 	struct drm_radeon_gem_create *args = data;
    262 	struct drm_gem_object *gobj;
    263 	uint32_t handle;
    264 	int r;
    265 
    266 	down_read(&rdev->exclusive_lock);
    267 	/* create a gem object to contain this object in */
    268 	args->size = roundup(args->size, PAGE_SIZE);
    269 	r = radeon_gem_object_create(rdev, args->size, args->alignment,
    270 				     args->initial_domain, args->flags,
    271 				     false, &gobj);
    272 	if (r) {
    273 		up_read(&rdev->exclusive_lock);
    274 		r = radeon_gem_handle_lockup(rdev, r);
    275 		return r;
    276 	}
    277 	r = drm_gem_handle_create(filp, gobj, &handle);
    278 	/* drop reference from allocate - handle holds it now */
    279 	drm_gem_object_unreference_unlocked(gobj);
    280 	if (r) {
    281 		up_read(&rdev->exclusive_lock);
    282 		r = radeon_gem_handle_lockup(rdev, r);
    283 		return r;
    284 	}
    285 	args->handle = handle;
    286 	up_read(&rdev->exclusive_lock);
    287 	return 0;
    288 }
    289 
    290 int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
    291 			     struct drm_file *filp)
    292 {
    293 	struct radeon_device *rdev = dev->dev_private;
    294 	struct drm_radeon_gem_userptr *args = data;
    295 	struct drm_gem_object *gobj;
    296 	struct radeon_bo *bo;
    297 	uint32_t handle;
    298 	int r;
    299 
    300 	if (offset_in_page(args->addr | args->size))
    301 		return -EINVAL;
    302 
    303 	/* reject unknown flag values */
    304 	if (args->flags & ~(RADEON_GEM_USERPTR_READONLY |
    305 	    RADEON_GEM_USERPTR_ANONONLY | RADEON_GEM_USERPTR_VALIDATE |
    306 	    RADEON_GEM_USERPTR_REGISTER))
    307 		return -EINVAL;
    308 
    309 	if (args->flags & RADEON_GEM_USERPTR_READONLY) {
    310 		/* readonly pages not tested on older hardware */
    311 		if (rdev->family < CHIP_R600)
    312 			return -EINVAL;
    313 
    314 	} else if (!(args->flags & RADEON_GEM_USERPTR_ANONONLY) ||
    315 		   !(args->flags & RADEON_GEM_USERPTR_REGISTER)) {
    316 
    317 		/* if we want to write to it we must require anonymous
    318 		   memory and install a MMU notifier */
    319 		return -EACCES;
    320 	}
    321 
    322 	down_read(&rdev->exclusive_lock);
    323 
    324 	/* create a gem object to contain this object in */
    325 	r = radeon_gem_object_create(rdev, args->size, 0,
    326 				     RADEON_GEM_DOMAIN_CPU, 0,
    327 				     false, &gobj);
    328 	if (r)
    329 		goto handle_lockup;
    330 
    331 	bo = gem_to_radeon_bo(gobj);
    332 	r = radeon_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
    333 	if (r)
    334 		goto release_object;
    335 
    336 	if (args->flags & RADEON_GEM_USERPTR_REGISTER) {
    337 		r = radeon_mn_register(bo, args->addr);
    338 		if (r)
    339 			goto release_object;
    340 	}
    341 
    342 	if (args->flags & RADEON_GEM_USERPTR_VALIDATE) {
    343 		down_read(&current->mm->mmap_sem);
    344 		r = radeon_bo_reserve(bo, true);
    345 		if (r) {
    346 			up_read(&current->mm->mmap_sem);
    347 			goto release_object;
    348 		}
    349 
    350 		radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT);
    351 		r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
    352 		radeon_bo_unreserve(bo);
    353 		up_read(&current->mm->mmap_sem);
    354 		if (r)
    355 			goto release_object;
    356 	}
    357 
    358 	r = drm_gem_handle_create(filp, gobj, &handle);
    359 	/* drop reference from allocate - handle holds it now */
    360 	drm_gem_object_unreference_unlocked(gobj);
    361 	if (r)
    362 		goto handle_lockup;
    363 
    364 	args->handle = handle;
    365 	up_read(&rdev->exclusive_lock);
    366 	return 0;
    367 
    368 release_object:
    369 	drm_gem_object_unreference_unlocked(gobj);
    370 
    371 handle_lockup:
    372 	up_read(&rdev->exclusive_lock);
    373 	r = radeon_gem_handle_lockup(rdev, r);
    374 
    375 	return r;
    376 }
    377 
    378 int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
    379 				struct drm_file *filp)
    380 {
    381 	/* transition the BO to a domain -
    382 	 * just validate the BO into a certain domain */
    383 	struct radeon_device *rdev = dev->dev_private;
    384 	struct drm_radeon_gem_set_domain *args = data;
    385 	struct drm_gem_object *gobj;
    386 	struct radeon_bo *robj;
    387 	int r;
    388 
    389 	/* for now if someone requests domain CPU -
    390 	 * just make sure the buffer is finished with */
    391 	down_read(&rdev->exclusive_lock);
    392 
    393 	/* just do a BO wait for now */
    394 	gobj = drm_gem_object_lookup(dev, filp, args->handle);
    395 	if (gobj == NULL) {
    396 		up_read(&rdev->exclusive_lock);
    397 		return -ENOENT;
    398 	}
    399 	robj = gem_to_radeon_bo(gobj);
    400 
    401 	r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
    402 
    403 	drm_gem_object_unreference_unlocked(gobj);
    404 	up_read(&rdev->exclusive_lock);
    405 	r = radeon_gem_handle_lockup(robj->rdev, r);
    406 	return r;
    407 }
    408 
    409 int radeon_mode_dumb_mmap(struct drm_file *filp,
    410 			  struct drm_device *dev,
    411 			  uint32_t handle, uint64_t *offset_p)
    412 {
    413 	struct drm_gem_object *gobj;
    414 	struct radeon_bo *robj;
    415 
    416 	gobj = drm_gem_object_lookup(dev, filp, handle);
    417 	if (gobj == NULL) {
    418 		return -ENOENT;
    419 	}
    420 	robj = gem_to_radeon_bo(gobj);
    421 	if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) {
    422 		drm_gem_object_unreference_unlocked(gobj);
    423 		return -EPERM;
    424 	}
    425 	*offset_p = radeon_bo_mmap_offset(robj);
    426 	drm_gem_object_unreference_unlocked(gobj);
    427 	return 0;
    428 }
    429 
    430 int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
    431 			  struct drm_file *filp)
    432 {
    433 	struct drm_radeon_gem_mmap *args = data;
    434 
    435 	return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
    436 }
    437 
    438 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
    439 			  struct drm_file *filp)
    440 {
    441 	struct drm_radeon_gem_busy *args = data;
    442 	struct drm_gem_object *gobj;
    443 	struct radeon_bo *robj;
    444 	int r;
    445 	uint32_t cur_placement = 0;
    446 
    447 	gobj = drm_gem_object_lookup(dev, filp, args->handle);
    448 	if (gobj == NULL) {
    449 		return -ENOENT;
    450 	}
    451 	robj = gem_to_radeon_bo(gobj);
    452 
    453 	r = reservation_object_test_signaled_rcu(robj->tbo.resv, true);
    454 	if (r == 0)
    455 		r = -EBUSY;
    456 	else
    457 		r = 0;
    458 
    459 	cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type);
    460 	args->domain = radeon_mem_type_to_domain(cur_placement);
    461 	drm_gem_object_unreference_unlocked(gobj);
    462 	return r;
    463 }
    464 
    465 int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
    466 			      struct drm_file *filp)
    467 {
    468 	struct radeon_device *rdev = dev->dev_private;
    469 	struct drm_radeon_gem_wait_idle *args = data;
    470 	struct drm_gem_object *gobj;
    471 	struct radeon_bo *robj;
    472 	int r = 0;
    473 	uint32_t cur_placement = 0;
    474 	long ret;
    475 
    476 	gobj = drm_gem_object_lookup(dev, filp, args->handle);
    477 	if (gobj == NULL) {
    478 		return -ENOENT;
    479 	}
    480 	robj = gem_to_radeon_bo(gobj);
    481 
    482 	ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ);
    483 	if (ret == 0)
    484 		r = -EBUSY;
    485 	else if (ret < 0)
    486 		r = ret;
    487 
    488 	/* Flush HDP cache via MMIO if necessary */
    489 	cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type);
    490 	if (rdev->asic->mmio_hdp_flush &&
    491 	    radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
    492 		robj->rdev->asic->mmio_hdp_flush(rdev);
    493 	drm_gem_object_unreference_unlocked(gobj);
    494 	r = radeon_gem_handle_lockup(rdev, r);
    495 	return r;
    496 }
    497 
    498 int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
    499 				struct drm_file *filp)
    500 {
    501 	struct drm_radeon_gem_set_tiling *args = data;
    502 	struct drm_gem_object *gobj;
    503 	struct radeon_bo *robj;
    504 	int r = 0;
    505 
    506 	DRM_DEBUG("%d \n", args->handle);
    507 	gobj = drm_gem_object_lookup(dev, filp, args->handle);
    508 	if (gobj == NULL)
    509 		return -ENOENT;
    510 	robj = gem_to_radeon_bo(gobj);
    511 	r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
    512 	drm_gem_object_unreference_unlocked(gobj);
    513 	return r;
    514 }
    515 
    516 int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
    517 				struct drm_file *filp)
    518 {
    519 	struct drm_radeon_gem_get_tiling *args = data;
    520 	struct drm_gem_object *gobj;
    521 	struct radeon_bo *rbo;
    522 	int r = 0;
    523 
    524 	DRM_DEBUG("\n");
    525 	gobj = drm_gem_object_lookup(dev, filp, args->handle);
    526 	if (gobj == NULL)
    527 		return -ENOENT;
    528 	rbo = gem_to_radeon_bo(gobj);
    529 	r = radeon_bo_reserve(rbo, false);
    530 	if (unlikely(r != 0))
    531 		goto out;
    532 	radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
    533 	radeon_bo_unreserve(rbo);
    534 out:
    535 	drm_gem_object_unreference_unlocked(gobj);
    536 	return r;
    537 }
    538 
    539 /**
    540  * radeon_gem_va_update_vm -update the bo_va in its VM
    541  *
    542  * @rdev: radeon_device pointer
    543  * @bo_va: bo_va to update
    544  *
    545  * Update the bo_va directly after setting it's address. Errors are not
    546  * vital here, so they are not reported back to userspace.
    547  */
    548 static void radeon_gem_va_update_vm(struct radeon_device *rdev,
    549 				    struct radeon_bo_va *bo_va)
    550 {
    551 	struct ttm_validate_buffer tv, *entry;
    552 	struct radeon_bo_list *vm_bos;
    553 	struct ww_acquire_ctx ticket;
    554 	struct list_head list;
    555 	unsigned domain;
    556 	int r;
    557 
    558 	INIT_LIST_HEAD(&list);
    559 
    560 	tv.bo = &bo_va->bo->tbo;
    561 	tv.shared = true;
    562 	list_add(&tv.head, &list);
    563 
    564 	vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list);
    565 	if (!vm_bos)
    566 		return;
    567 
    568 	r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
    569 	if (r)
    570 		goto error_free;
    571 
    572 	list_for_each_entry(entry, &list, head) {
    573 		domain = radeon_mem_type_to_domain(entry->bo->mem.mem_type);
    574 		/* if anything is swapped out don't swap it in here,
    575 		   just abort and wait for the next CS */
    576 		if (domain == RADEON_GEM_DOMAIN_CPU)
    577 			goto error_unreserve;
    578 	}
    579 
    580 	mutex_lock(&bo_va->vm->mutex);
    581 	r = radeon_vm_clear_freed(rdev, bo_va->vm);
    582 	if (r)
    583 		goto error_unlock;
    584 
    585 	if (bo_va->it.start)
    586 		r = radeon_vm_bo_update(rdev, bo_va, &bo_va->bo->tbo.mem);
    587 
    588 error_unlock:
    589 	mutex_unlock(&bo_va->vm->mutex);
    590 
    591 error_unreserve:
    592 	ttm_eu_backoff_reservation(&ticket, &list);
    593 
    594 error_free:
    595 	drm_free_large(vm_bos);
    596 
    597 	if (r && r != -ERESTARTSYS)
    598 		DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
    599 }
    600 
    601 int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
    602 			  struct drm_file *filp)
    603 {
    604 	struct drm_radeon_gem_va *args = data;
    605 	struct drm_gem_object *gobj;
    606 	struct radeon_device *rdev = dev->dev_private;
    607 	struct radeon_fpriv *fpriv = filp->driver_priv;
    608 	struct radeon_bo *rbo;
    609 	struct radeon_bo_va *bo_va;
    610 	u32 invalid_flags;
    611 	int r = 0;
    612 
    613 	if (!rdev->vm_manager.enabled) {
    614 		args->operation = RADEON_VA_RESULT_ERROR;
    615 		return -ENOTTY;
    616 	}
    617 
    618 	/* !! DONT REMOVE !!
    619 	 * We don't support vm_id yet, to be sure we don't have have broken
    620 	 * userspace, reject anyone trying to use non 0 value thus moving
    621 	 * forward we can use those fields without breaking existant userspace
    622 	 */
    623 	if (args->vm_id) {
    624 		args->operation = RADEON_VA_RESULT_ERROR;
    625 		return -EINVAL;
    626 	}
    627 
    628 	if (args->offset < RADEON_VA_RESERVED_SIZE) {
    629 		dev_err(dev->dev,
    630 			"offset 0x%lX is in reserved area 0x%X\n",
    631 			(unsigned long)args->offset,
    632 			RADEON_VA_RESERVED_SIZE);
    633 		args->operation = RADEON_VA_RESULT_ERROR;
    634 		return -EINVAL;
    635 	}
    636 
    637 	/* don't remove, we need to enforce userspace to set the snooped flag
    638 	 * otherwise we will endup with broken userspace and we won't be able
    639 	 * to enable this feature without adding new interface
    640 	 */
    641 	invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM;
    642 	if ((args->flags & invalid_flags)) {
    643 		dev_err(dev->dev, "invalid flags 0x%08X vs 0x%08X\n",
    644 			args->flags, invalid_flags);
    645 		args->operation = RADEON_VA_RESULT_ERROR;
    646 		return -EINVAL;
    647 	}
    648 
    649 	switch (args->operation) {
    650 	case RADEON_VA_MAP:
    651 	case RADEON_VA_UNMAP:
    652 		break;
    653 	default:
    654 		dev_err(dev->dev, "unsupported operation %d\n",
    655 			args->operation);
    656 		args->operation = RADEON_VA_RESULT_ERROR;
    657 		return -EINVAL;
    658 	}
    659 
    660 	gobj = drm_gem_object_lookup(dev, filp, args->handle);
    661 	if (gobj == NULL) {
    662 		args->operation = RADEON_VA_RESULT_ERROR;
    663 		return -ENOENT;
    664 	}
    665 	rbo = gem_to_radeon_bo(gobj);
    666 	r = radeon_bo_reserve(rbo, false);
    667 	if (r) {
    668 		args->operation = RADEON_VA_RESULT_ERROR;
    669 		drm_gem_object_unreference_unlocked(gobj);
    670 		return r;
    671 	}
    672 	bo_va = radeon_vm_bo_find(&fpriv->vm, rbo);
    673 	if (!bo_va) {
    674 		args->operation = RADEON_VA_RESULT_ERROR;
    675 		drm_gem_object_unreference_unlocked(gobj);
    676 		return -ENOENT;
    677 	}
    678 
    679 	switch (args->operation) {
    680 	case RADEON_VA_MAP:
    681 		if (bo_va->it.start) {
    682 			args->operation = RADEON_VA_RESULT_VA_EXIST;
    683 			args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE;
    684 			radeon_bo_unreserve(rbo);
    685 			goto out;
    686 		}
    687 		r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags);
    688 		break;
    689 	case RADEON_VA_UNMAP:
    690 		r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0);
    691 		break;
    692 	default:
    693 		break;
    694 	}
    695 	if (!r)
    696 		radeon_gem_va_update_vm(rdev, bo_va);
    697 	args->operation = RADEON_VA_RESULT_OK;
    698 	if (r) {
    699 		args->operation = RADEON_VA_RESULT_ERROR;
    700 	}
    701 out:
    702 	drm_gem_object_unreference_unlocked(gobj);
    703 	return r;
    704 }
    705 
    706 int radeon_gem_op_ioctl(struct drm_device *dev, void *data,
    707 			struct drm_file *filp)
    708 {
    709 	struct drm_radeon_gem_op *args = data;
    710 	struct drm_gem_object *gobj;
    711 	struct radeon_bo *robj;
    712 	int r;
    713 
    714 	gobj = drm_gem_object_lookup(dev, filp, args->handle);
    715 	if (gobj == NULL) {
    716 		return -ENOENT;
    717 	}
    718 	robj = gem_to_radeon_bo(gobj);
    719 
    720 	r = -EPERM;
    721 	if (radeon_ttm_tt_has_userptr(robj->tbo.ttm))
    722 		goto out;
    723 
    724 	r = radeon_bo_reserve(robj, false);
    725 	if (unlikely(r))
    726 		goto out;
    727 
    728 	switch (args->op) {
    729 	case RADEON_GEM_OP_GET_INITIAL_DOMAIN:
    730 		args->value = robj->initial_domain;
    731 		break;
    732 	case RADEON_GEM_OP_SET_INITIAL_DOMAIN:
    733 		robj->initial_domain = args->value & (RADEON_GEM_DOMAIN_VRAM |
    734 						      RADEON_GEM_DOMAIN_GTT |
    735 						      RADEON_GEM_DOMAIN_CPU);
    736 		break;
    737 	default:
    738 		r = -EINVAL;
    739 	}
    740 
    741 	radeon_bo_unreserve(robj);
    742 out:
    743 	drm_gem_object_unreference_unlocked(gobj);
    744 	return r;
    745 }
    746 
    747 int radeon_mode_dumb_create(struct drm_file *file_priv,
    748 			    struct drm_device *dev,
    749 			    struct drm_mode_create_dumb *args)
    750 {
    751 	struct radeon_device *rdev = dev->dev_private;
    752 	struct drm_gem_object *gobj;
    753 	uint32_t handle;
    754 	int r;
    755 
    756 	args->pitch = radeon_align_pitch(rdev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
    757 	args->size = args->pitch * args->height;
    758 #ifdef __NetBSD__		/* XXX ALIGN means something else.  */
    759 	args->size = round_up(args->size, PAGE_SIZE);
    760 #else
    761 	args->size = ALIGN(args->size, PAGE_SIZE);
    762 #endif
    763 
    764 	r = radeon_gem_object_create(rdev, args->size, 0,
    765 				     RADEON_GEM_DOMAIN_VRAM, 0,
    766 				     false, &gobj);
    767 	if (r)
    768 		return -ENOMEM;
    769 
    770 	r = drm_gem_handle_create(file_priv, gobj, &handle);
    771 	/* drop reference from allocate - handle holds it now */
    772 	drm_gem_object_unreference_unlocked(gobj);
    773 	if (r) {
    774 		return r;
    775 	}
    776 	args->handle = handle;
    777 	return 0;
    778 }
    779 
    780 #if defined(CONFIG_DEBUG_FS)
    781 static int radeon_debugfs_gem_info(struct seq_file *m, void *data)
    782 {
    783 	struct drm_info_node *node = (struct drm_info_node *)m->private;
    784 	struct drm_device *dev = node->minor->dev;
    785 	struct radeon_device *rdev = dev->dev_private;
    786 	struct radeon_bo *rbo;
    787 	unsigned i = 0;
    788 
    789 	mutex_lock(&rdev->gem.mutex);
    790 	list_for_each_entry(rbo, &rdev->gem.objects, list) {
    791 		unsigned domain;
    792 		const char *placement;
    793 
    794 		domain = radeon_mem_type_to_domain(rbo->tbo.mem.mem_type);
    795 		switch (domain) {
    796 		case RADEON_GEM_DOMAIN_VRAM:
    797 			placement = "VRAM";
    798 			break;
    799 		case RADEON_GEM_DOMAIN_GTT:
    800 			placement = " GTT";
    801 			break;
    802 		case RADEON_GEM_DOMAIN_CPU:
    803 		default:
    804 			placement = " CPU";
    805 			break;
    806 		}
    807 		seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
    808 			   i, radeon_bo_size(rbo) >> 10, radeon_bo_size(rbo) >> 20,
    809 			   placement, (unsigned long)rbo->pid);
    810 		i++;
    811 	}
    812 	mutex_unlock(&rdev->gem.mutex);
    813 	return 0;
    814 }
    815 
    816 static struct drm_info_list radeon_debugfs_gem_list[] = {
    817 	{"radeon_gem_info", &radeon_debugfs_gem_info, 0, NULL},
    818 };
    819 #endif
    820 
    821 int radeon_gem_debugfs_init(struct radeon_device *rdev)
    822 {
    823 #if defined(CONFIG_DEBUG_FS)
    824 	return radeon_debugfs_add_files(rdev, radeon_debugfs_gem_list, 1);
    825 #endif
    826 	return 0;
    827 }
    828