Home | History | Annotate | Line # | Download | only in nouveau
nouveau_gem.c revision 1.1.1.2
      1 /*	$NetBSD: nouveau_gem.c,v 1.1.1.2 2014/08/06 12:36:23 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright (C) 2008 Ben Skeggs.
      5  * All Rights Reserved.
      6  *
      7  * Permission is hereby granted, free of charge, to any person obtaining
      8  * a copy of this software and associated documentation files (the
      9  * "Software"), to deal in the Software without restriction, including
     10  * without limitation the rights to use, copy, modify, merge, publish,
     11  * distribute, sublicense, and/or sell copies of the Software, and to
     12  * permit persons to whom the Software is furnished to do so, subject to
     13  * the following conditions:
     14  *
     15  * The above copyright notice and this permission notice (including the
     16  * next paragraph) shall be included in all copies or substantial
     17  * portions of the Software.
     18  *
     19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
     20  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
     21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
     22  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
     23  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
     24  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
     25  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
     26  *
     27  */
     28 
     29 #include <sys/cdefs.h>
     30 __KERNEL_RCSID(0, "$NetBSD: nouveau_gem.c,v 1.1.1.2 2014/08/06 12:36:23 riastradh Exp $");
     31 
     32 #include <subdev/fb.h>
     33 
     34 #include "nouveau_drm.h"
     35 #include "nouveau_dma.h"
     36 #include "nouveau_fence.h"
     37 #include "nouveau_abi16.h"
     38 
     39 #include "nouveau_ttm.h"
     40 #include "nouveau_gem.h"
     41 
     42 void
     43 nouveau_gem_object_del(struct drm_gem_object *gem)
     44 {
     45 	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
     46 	struct ttm_buffer_object *bo = &nvbo->bo;
     47 
     48 	if (gem->import_attach)
     49 		drm_prime_gem_destroy(gem, nvbo->bo.sg);
     50 
     51 	drm_gem_object_release(gem);
     52 
     53 	/* reset filp so nouveau_bo_del_ttm() can test for it */
     54 	gem->filp = NULL;
     55 	ttm_bo_unref(&bo);
     56 }
     57 
     58 int
     59 nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
     60 {
     61 	struct nouveau_cli *cli = nouveau_cli(file_priv);
     62 	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
     63 	struct nouveau_vma *vma;
     64 	int ret;
     65 
     66 	if (!cli->base.vm)
     67 		return 0;
     68 
     69 	ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
     70 	if (ret)
     71 		return ret;
     72 
     73 	vma = nouveau_bo_vma_find(nvbo, cli->base.vm);
     74 	if (!vma) {
     75 		vma = kzalloc(sizeof(*vma), GFP_KERNEL);
     76 		if (!vma) {
     77 			ret = -ENOMEM;
     78 			goto out;
     79 		}
     80 
     81 		ret = nouveau_bo_vma_add(nvbo, cli->base.vm, vma);
     82 		if (ret) {
     83 			kfree(vma);
     84 			goto out;
     85 		}
     86 	} else {
     87 		vma->refcount++;
     88 	}
     89 
     90 out:
     91 	ttm_bo_unreserve(&nvbo->bo);
     92 	return ret;
     93 }
     94 
     95 static void
     96 nouveau_gem_object_delete(void *data)
     97 {
     98 	struct nouveau_vma *vma = data;
     99 	nouveau_vm_unmap(vma);
    100 	nouveau_vm_put(vma);
    101 	kfree(vma);
    102 }
    103 
    104 static void
    105 nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
    106 {
    107 	const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM;
    108 	struct nouveau_fence *fence = NULL;
    109 
    110 	list_del(&vma->head);
    111 
    112 	if (mapped) {
    113 		spin_lock(&nvbo->bo.bdev->fence_lock);
    114 		fence = nouveau_fence_ref(nvbo->bo.sync_obj);
    115 		spin_unlock(&nvbo->bo.bdev->fence_lock);
    116 	}
    117 
    118 	if (fence) {
    119 		nouveau_fence_work(fence, nouveau_gem_object_delete, vma);
    120 	} else {
    121 		if (mapped)
    122 			nouveau_vm_unmap(vma);
    123 		nouveau_vm_put(vma);
    124 		kfree(vma);
    125 	}
    126 	nouveau_fence_unref(&fence);
    127 }
    128 
    129 void
    130 nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
    131 {
    132 	struct nouveau_cli *cli = nouveau_cli(file_priv);
    133 	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
    134 	struct nouveau_vma *vma;
    135 	int ret;
    136 
    137 	if (!cli->base.vm)
    138 		return;
    139 
    140 	ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
    141 	if (ret)
    142 		return;
    143 
    144 	vma = nouveau_bo_vma_find(nvbo, cli->base.vm);
    145 	if (vma) {
    146 		if (--vma->refcount == 0)
    147 			nouveau_gem_object_unmap(nvbo, vma);
    148 	}
    149 	ttm_bo_unreserve(&nvbo->bo);
    150 }
    151 
    152 int
    153 nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
    154 		uint32_t tile_mode, uint32_t tile_flags,
    155 		struct nouveau_bo **pnvbo)
    156 {
    157 	struct nouveau_drm *drm = nouveau_drm(dev);
    158 	struct nouveau_bo *nvbo;
    159 	u32 flags = 0;
    160 	int ret;
    161 
    162 	if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
    163 		flags |= TTM_PL_FLAG_VRAM;
    164 	if (domain & NOUVEAU_GEM_DOMAIN_GART)
    165 		flags |= TTM_PL_FLAG_TT;
    166 	if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU)
    167 		flags |= TTM_PL_FLAG_SYSTEM;
    168 
    169 	ret = nouveau_bo_new(dev, size, align, flags, tile_mode,
    170 			     tile_flags, NULL, pnvbo);
    171 	if (ret)
    172 		return ret;
    173 	nvbo = *pnvbo;
    174 
    175 	/* we restrict allowed domains on nv50+ to only the types
    176 	 * that were requested at creation time.  not possibly on
    177 	 * earlier chips without busting the ABI.
    178 	 */
    179 	nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
    180 			      NOUVEAU_GEM_DOMAIN_GART;
    181 	if (nv_device(drm->device)->card_type >= NV_50)
    182 		nvbo->valid_domains &= domain;
    183 
    184 	/* Initialize the embedded gem-object. We return a single gem-reference
    185 	 * to the caller, instead of a normal nouveau_bo ttm reference. */
    186 	ret = drm_gem_object_init(dev, &nvbo->gem, nvbo->bo.mem.size);
    187 	if (ret) {
    188 		nouveau_bo_ref(NULL, pnvbo);
    189 		return -ENOMEM;
    190 	}
    191 
    192 	nvbo->bo.persistent_swap_storage = nvbo->gem.filp;
    193 	return 0;
    194 }
    195 
    196 static int
    197 nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
    198 		 struct drm_nouveau_gem_info *rep)
    199 {
    200 	struct nouveau_cli *cli = nouveau_cli(file_priv);
    201 	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
    202 	struct nouveau_vma *vma;
    203 
    204 	if (nvbo->bo.mem.mem_type == TTM_PL_TT)
    205 		rep->domain = NOUVEAU_GEM_DOMAIN_GART;
    206 	else
    207 		rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
    208 
    209 	rep->offset = nvbo->bo.offset;
    210 	if (cli->base.vm) {
    211 		vma = nouveau_bo_vma_find(nvbo, cli->base.vm);
    212 		if (!vma)
    213 			return -EINVAL;
    214 
    215 		rep->offset = vma->offset;
    216 	}
    217 
    218 	rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
    219 	rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.vma_node);
    220 	rep->tile_mode = nvbo->tile_mode;
    221 	rep->tile_flags = nvbo->tile_flags;
    222 	return 0;
    223 }
    224 
    225 int
    226 nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
    227 		      struct drm_file *file_priv)
    228 {
    229 	struct nouveau_drm *drm = nouveau_drm(dev);
    230 	struct nouveau_cli *cli = nouveau_cli(file_priv);
    231 	struct nouveau_fb *pfb = nouveau_fb(drm->device);
    232 	struct drm_nouveau_gem_new *req = data;
    233 	struct nouveau_bo *nvbo = NULL;
    234 	int ret = 0;
    235 
    236 	if (!pfb->memtype_valid(pfb, req->info.tile_flags)) {
    237 		NV_ERROR(cli, "bad page flags: 0x%08x\n", req->info.tile_flags);
    238 		return -EINVAL;
    239 	}
    240 
    241 	ret = nouveau_gem_new(dev, req->info.size, req->align,
    242 			      req->info.domain, req->info.tile_mode,
    243 			      req->info.tile_flags, &nvbo);
    244 	if (ret)
    245 		return ret;
    246 
    247 	ret = drm_gem_handle_create(file_priv, &nvbo->gem, &req->info.handle);
    248 	if (ret == 0) {
    249 		ret = nouveau_gem_info(file_priv, &nvbo->gem, &req->info);
    250 		if (ret)
    251 			drm_gem_handle_delete(file_priv, req->info.handle);
    252 	}
    253 
    254 	/* drop reference from allocate - handle holds it now */
    255 	drm_gem_object_unreference_unlocked(&nvbo->gem);
    256 	return ret;
    257 }
    258 
    259 static int
    260 nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
    261 		       uint32_t write_domains, uint32_t valid_domains)
    262 {
    263 	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
    264 	struct ttm_buffer_object *bo = &nvbo->bo;
    265 	uint32_t domains = valid_domains & nvbo->valid_domains &
    266 		(write_domains ? write_domains : read_domains);
    267 	uint32_t pref_flags = 0, valid_flags = 0;
    268 
    269 	if (!domains)
    270 		return -EINVAL;
    271 
    272 	if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
    273 		valid_flags |= TTM_PL_FLAG_VRAM;
    274 
    275 	if (valid_domains & NOUVEAU_GEM_DOMAIN_GART)
    276 		valid_flags |= TTM_PL_FLAG_TT;
    277 
    278 	if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
    279 	    bo->mem.mem_type == TTM_PL_VRAM)
    280 		pref_flags |= TTM_PL_FLAG_VRAM;
    281 
    282 	else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
    283 		 bo->mem.mem_type == TTM_PL_TT)
    284 		pref_flags |= TTM_PL_FLAG_TT;
    285 
    286 	else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
    287 		pref_flags |= TTM_PL_FLAG_VRAM;
    288 
    289 	else
    290 		pref_flags |= TTM_PL_FLAG_TT;
    291 
    292 	nouveau_bo_placement_set(nvbo, pref_flags, valid_flags);
    293 
    294 	return 0;
    295 }
    296 
    297 struct validate_op {
    298 	struct list_head vram_list;
    299 	struct list_head gart_list;
    300 	struct list_head both_list;
    301 	struct ww_acquire_ctx ticket;
    302 };
    303 
    304 static void
    305 validate_fini_list(struct list_head *list, struct nouveau_fence *fence,
    306 		   struct ww_acquire_ctx *ticket)
    307 {
    308 	struct list_head *entry, *tmp;
    309 	struct nouveau_bo *nvbo;
    310 
    311 	list_for_each_safe(entry, tmp, list) {
    312 		nvbo = list_entry(entry, struct nouveau_bo, entry);
    313 
    314 		if (likely(fence))
    315 			nouveau_bo_fence(nvbo, fence);
    316 
    317 		if (unlikely(nvbo->validate_mapped)) {
    318 			ttm_bo_kunmap(&nvbo->kmap);
    319 			nvbo->validate_mapped = false;
    320 		}
    321 
    322 		list_del(&nvbo->entry);
    323 		nvbo->reserved_by = NULL;
    324 		ttm_bo_unreserve_ticket(&nvbo->bo, ticket);
    325 		drm_gem_object_unreference_unlocked(&nvbo->gem);
    326 	}
    327 }
    328 
    329 static void
    330 validate_fini_no_ticket(struct validate_op *op, struct nouveau_fence *fence)
    331 {
    332 	validate_fini_list(&op->vram_list, fence, &op->ticket);
    333 	validate_fini_list(&op->gart_list, fence, &op->ticket);
    334 	validate_fini_list(&op->both_list, fence, &op->ticket);
    335 }
    336 
    337 static void
    338 validate_fini(struct validate_op *op, struct nouveau_fence *fence)
    339 {
    340 	validate_fini_no_ticket(op, fence);
    341 	ww_acquire_fini(&op->ticket);
    342 }
    343 
    344 static int
    345 validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
    346 	      struct drm_nouveau_gem_pushbuf_bo *pbbo,
    347 	      int nr_buffers, struct validate_op *op)
    348 {
    349 	struct nouveau_cli *cli = nouveau_cli(file_priv);
    350 	struct drm_device *dev = chan->drm->dev;
    351 	int trycnt = 0;
    352 	int ret, i;
    353 	struct nouveau_bo *res_bo = NULL;
    354 
    355 	ww_acquire_init(&op->ticket, &reservation_ww_class);
    356 retry:
    357 	if (++trycnt > 100000) {
    358 		NV_ERROR(cli, "%s failed and gave up.\n", __func__);
    359 		return -EINVAL;
    360 	}
    361 
    362 	for (i = 0; i < nr_buffers; i++) {
    363 		struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i];
    364 		struct drm_gem_object *gem;
    365 		struct nouveau_bo *nvbo;
    366 
    367 		gem = drm_gem_object_lookup(dev, file_priv, b->handle);
    368 		if (!gem) {
    369 			NV_ERROR(cli, "Unknown handle 0x%08x\n", b->handle);
    370 			ww_acquire_done(&op->ticket);
    371 			validate_fini(op, NULL);
    372 			return -ENOENT;
    373 		}
    374 		nvbo = nouveau_gem_object(gem);
    375 		if (nvbo == res_bo) {
    376 			res_bo = NULL;
    377 			drm_gem_object_unreference_unlocked(gem);
    378 			continue;
    379 		}
    380 
    381 		if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
    382 			NV_ERROR(cli, "multiple instances of buffer %d on "
    383 				      "validation list\n", b->handle);
    384 			drm_gem_object_unreference_unlocked(gem);
    385 			ww_acquire_done(&op->ticket);
    386 			validate_fini(op, NULL);
    387 			return -EINVAL;
    388 		}
    389 
    390 		ret = ttm_bo_reserve(&nvbo->bo, true, false, true, &op->ticket);
    391 		if (ret) {
    392 			validate_fini_no_ticket(op, NULL);
    393 			if (unlikely(ret == -EDEADLK)) {
    394 				ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
    395 							      &op->ticket);
    396 				if (!ret)
    397 					res_bo = nvbo;
    398 			}
    399 			if (unlikely(ret)) {
    400 				ww_acquire_done(&op->ticket);
    401 				ww_acquire_fini(&op->ticket);
    402 				drm_gem_object_unreference_unlocked(gem);
    403 				if (ret != -ERESTARTSYS)
    404 					NV_ERROR(cli, "fail reserve\n");
    405 				return ret;
    406 			}
    407 		}
    408 
    409 		b->user_priv = (uint64_t)(unsigned long)nvbo;
    410 		nvbo->reserved_by = file_priv;
    411 		nvbo->pbbo_index = i;
    412 		if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
    413 		    (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
    414 			list_add_tail(&nvbo->entry, &op->both_list);
    415 		else
    416 		if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
    417 			list_add_tail(&nvbo->entry, &op->vram_list);
    418 		else
    419 		if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
    420 			list_add_tail(&nvbo->entry, &op->gart_list);
    421 		else {
    422 			NV_ERROR(cli, "invalid valid domains: 0x%08x\n",
    423 				 b->valid_domains);
    424 			list_add_tail(&nvbo->entry, &op->both_list);
    425 			ww_acquire_done(&op->ticket);
    426 			validate_fini(op, NULL);
    427 			return -EINVAL;
    428 		}
    429 		if (nvbo == res_bo)
    430 			goto retry;
    431 	}
    432 
    433 	ww_acquire_done(&op->ticket);
    434 	return 0;
    435 }
    436 
    437 static int
    438 validate_sync(struct nouveau_channel *chan, struct nouveau_bo *nvbo)
    439 {
    440 	struct nouveau_fence *fence = NULL;
    441 	int ret = 0;
    442 
    443 	spin_lock(&nvbo->bo.bdev->fence_lock);
    444 	fence = nouveau_fence_ref(nvbo->bo.sync_obj);
    445 	spin_unlock(&nvbo->bo.bdev->fence_lock);
    446 
    447 	if (fence) {
    448 		ret = nouveau_fence_sync(fence, chan);
    449 		nouveau_fence_unref(&fence);
    450 	}
    451 
    452 	return ret;
    453 }
    454 
    455 static int
    456 validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
    457 	      struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo,
    458 	      uint64_t user_pbbo_ptr)
    459 {
    460 	struct nouveau_drm *drm = chan->drm;
    461 	struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
    462 				(void __force __user *)(uintptr_t)user_pbbo_ptr;
    463 	struct nouveau_bo *nvbo;
    464 	int ret, relocs = 0;
    465 
    466 	list_for_each_entry(nvbo, list, entry) {
    467 		struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
    468 
    469 		ret = nouveau_gem_set_domain(&nvbo->gem, b->read_domains,
    470 					     b->write_domains,
    471 					     b->valid_domains);
    472 		if (unlikely(ret)) {
    473 			NV_ERROR(cli, "fail set_domain\n");
    474 			return ret;
    475 		}
    476 
    477 		ret = nouveau_bo_validate(nvbo, true, false);
    478 		if (unlikely(ret)) {
    479 			if (ret != -ERESTARTSYS)
    480 				NV_ERROR(cli, "fail ttm_validate\n");
    481 			return ret;
    482 		}
    483 
    484 		ret = validate_sync(chan, nvbo);
    485 		if (unlikely(ret)) {
    486 			NV_ERROR(cli, "fail post-validate sync\n");
    487 			return ret;
    488 		}
    489 
    490 		if (nv_device(drm->device)->card_type < NV_50) {
    491 			if (nvbo->bo.offset == b->presumed.offset &&
    492 			    ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
    493 			      b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
    494 			     (nvbo->bo.mem.mem_type == TTM_PL_TT &&
    495 			      b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
    496 				continue;
    497 
    498 			if (nvbo->bo.mem.mem_type == TTM_PL_TT)
    499 				b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
    500 			else
    501 				b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
    502 			b->presumed.offset = nvbo->bo.offset;
    503 			b->presumed.valid = 0;
    504 			relocs++;
    505 
    506 			if (copy_to_user(&upbbo[nvbo->pbbo_index].presumed,
    507 					     &b->presumed, sizeof(b->presumed)))
    508 				return -EFAULT;
    509 		}
    510 	}
    511 
    512 	return relocs;
    513 }
    514 
    515 static int
    516 nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
    517 			     struct drm_file *file_priv,
    518 			     struct drm_nouveau_gem_pushbuf_bo *pbbo,
    519 			     uint64_t user_buffers, int nr_buffers,
    520 			     struct validate_op *op, int *apply_relocs)
    521 {
    522 	struct nouveau_cli *cli = nouveau_cli(file_priv);
    523 	int ret, relocs = 0;
    524 
    525 	INIT_LIST_HEAD(&op->vram_list);
    526 	INIT_LIST_HEAD(&op->gart_list);
    527 	INIT_LIST_HEAD(&op->both_list);
    528 
    529 	if (nr_buffers == 0)
    530 		return 0;
    531 
    532 	ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
    533 	if (unlikely(ret)) {
    534 		if (ret != -ERESTARTSYS)
    535 			NV_ERROR(cli, "validate_init\n");
    536 		return ret;
    537 	}
    538 
    539 	ret = validate_list(chan, cli, &op->vram_list, pbbo, user_buffers);
    540 	if (unlikely(ret < 0)) {
    541 		if (ret != -ERESTARTSYS)
    542 			NV_ERROR(cli, "validate vram_list\n");
    543 		validate_fini(op, NULL);
    544 		return ret;
    545 	}
    546 	relocs += ret;
    547 
    548 	ret = validate_list(chan, cli, &op->gart_list, pbbo, user_buffers);
    549 	if (unlikely(ret < 0)) {
    550 		if (ret != -ERESTARTSYS)
    551 			NV_ERROR(cli, "validate gart_list\n");
    552 		validate_fini(op, NULL);
    553 		return ret;
    554 	}
    555 	relocs += ret;
    556 
    557 	ret = validate_list(chan, cli, &op->both_list, pbbo, user_buffers);
    558 	if (unlikely(ret < 0)) {
    559 		if (ret != -ERESTARTSYS)
    560 			NV_ERROR(cli, "validate both_list\n");
    561 		validate_fini(op, NULL);
    562 		return ret;
    563 	}
    564 	relocs += ret;
    565 
    566 	*apply_relocs = relocs;
    567 	return 0;
    568 }
    569 
    570 static inline void
    571 u_free(void *addr)
    572 {
    573 	if (!is_vmalloc_addr(addr))
    574 		kfree(addr);
    575 	else
    576 		vfree(addr);
    577 }
    578 
    579 static inline void *
    580 u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
    581 {
    582 	void *mem;
    583 	void __user *userptr = (void __force __user *)(uintptr_t)user;
    584 
    585 	size *= nmemb;
    586 
    587 	mem = kmalloc(size, GFP_KERNEL | __GFP_NOWARN);
    588 	if (!mem)
    589 		mem = vmalloc(size);
    590 	if (!mem)
    591 		return ERR_PTR(-ENOMEM);
    592 
    593 	if (copy_from_user(mem, userptr, size)) {
    594 		u_free(mem);
    595 		return ERR_PTR(-EFAULT);
    596 	}
    597 
    598 	return mem;
    599 }
    600 
    601 static int
    602 nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
    603 				struct drm_nouveau_gem_pushbuf *req,
    604 				struct drm_nouveau_gem_pushbuf_bo *bo)
    605 {
    606 	struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
    607 	int ret = 0;
    608 	unsigned i;
    609 
    610 	reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
    611 	if (IS_ERR(reloc))
    612 		return PTR_ERR(reloc);
    613 
    614 	for (i = 0; i < req->nr_relocs; i++) {
    615 		struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
    616 		struct drm_nouveau_gem_pushbuf_bo *b;
    617 		struct nouveau_bo *nvbo;
    618 		uint32_t data;
    619 
    620 		if (unlikely(r->bo_index > req->nr_buffers)) {
    621 			NV_ERROR(cli, "reloc bo index invalid\n");
    622 			ret = -EINVAL;
    623 			break;
    624 		}
    625 
    626 		b = &bo[r->bo_index];
    627 		if (b->presumed.valid)
    628 			continue;
    629 
    630 		if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
    631 			NV_ERROR(cli, "reloc container bo index invalid\n");
    632 			ret = -EINVAL;
    633 			break;
    634 		}
    635 		nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
    636 
    637 		if (unlikely(r->reloc_bo_offset + 4 >
    638 			     nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
    639 			NV_ERROR(cli, "reloc outside of bo\n");
    640 			ret = -EINVAL;
    641 			break;
    642 		}
    643 
    644 		if (!nvbo->kmap.virtual) {
    645 			ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
    646 					  &nvbo->kmap);
    647 			if (ret) {
    648 				NV_ERROR(cli, "failed kmap for reloc\n");
    649 				break;
    650 			}
    651 			nvbo->validate_mapped = true;
    652 		}
    653 
    654 		if (r->flags & NOUVEAU_GEM_RELOC_LOW)
    655 			data = b->presumed.offset + r->data;
    656 		else
    657 		if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
    658 			data = (b->presumed.offset + r->data) >> 32;
    659 		else
    660 			data = r->data;
    661 
    662 		if (r->flags & NOUVEAU_GEM_RELOC_OR) {
    663 			if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART)
    664 				data |= r->tor;
    665 			else
    666 				data |= r->vor;
    667 		}
    668 
    669 		spin_lock(&nvbo->bo.bdev->fence_lock);
    670 		ret = ttm_bo_wait(&nvbo->bo, false, false, false);
    671 		spin_unlock(&nvbo->bo.bdev->fence_lock);
    672 		if (ret) {
    673 			NV_ERROR(cli, "reloc wait_idle failed: %d\n", ret);
    674 			break;
    675 		}
    676 
    677 		nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
    678 	}
    679 
    680 	u_free(reloc);
    681 	return ret;
    682 }
    683 
    684 int
    685 nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
    686 			  struct drm_file *file_priv)
    687 {
    688 	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
    689 	struct nouveau_cli *cli = nouveau_cli(file_priv);
    690 	struct nouveau_abi16_chan *temp;
    691 	struct nouveau_drm *drm = nouveau_drm(dev);
    692 	struct drm_nouveau_gem_pushbuf *req = data;
    693 	struct drm_nouveau_gem_pushbuf_push *push;
    694 	struct drm_nouveau_gem_pushbuf_bo *bo;
    695 	struct nouveau_channel *chan = NULL;
    696 	struct validate_op op;
    697 	struct nouveau_fence *fence = NULL;
    698 	int i, j, ret = 0, do_reloc = 0;
    699 
    700 	if (unlikely(!abi16))
    701 		return -ENOMEM;
    702 
    703 	list_for_each_entry(temp, &abi16->channels, head) {
    704 		if (temp->chan->handle == (NVDRM_CHAN | req->channel)) {
    705 			chan = temp->chan;
    706 			break;
    707 		}
    708 	}
    709 
    710 	if (!chan)
    711 		return nouveau_abi16_put(abi16, -ENOENT);
    712 
    713 	req->vram_available = drm->gem.vram_available;
    714 	req->gart_available = drm->gem.gart_available;
    715 	if (unlikely(req->nr_push == 0))
    716 		goto out_next;
    717 
    718 	if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
    719 		NV_ERROR(cli, "pushbuf push count exceeds limit: %d max %d\n",
    720 			 req->nr_push, NOUVEAU_GEM_MAX_PUSH);
    721 		return nouveau_abi16_put(abi16, -EINVAL);
    722 	}
    723 
    724 	if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
    725 		NV_ERROR(cli, "pushbuf bo count exceeds limit: %d max %d\n",
    726 			 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
    727 		return nouveau_abi16_put(abi16, -EINVAL);
    728 	}
    729 
    730 	if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
    731 		NV_ERROR(cli, "pushbuf reloc count exceeds limit: %d max %d\n",
    732 			 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
    733 		return nouveau_abi16_put(abi16, -EINVAL);
    734 	}
    735 
    736 	push = u_memcpya(req->push, req->nr_push, sizeof(*push));
    737 	if (IS_ERR(push))
    738 		return nouveau_abi16_put(abi16, PTR_ERR(push));
    739 
    740 	bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
    741 	if (IS_ERR(bo)) {
    742 		u_free(push);
    743 		return nouveau_abi16_put(abi16, PTR_ERR(bo));
    744 	}
    745 
    746 	/* Ensure all push buffers are on validate list */
    747 	for (i = 0; i < req->nr_push; i++) {
    748 		if (push[i].bo_index >= req->nr_buffers) {
    749 			NV_ERROR(cli, "push %d buffer not in list\n", i);
    750 			ret = -EINVAL;
    751 			goto out_prevalid;
    752 		}
    753 	}
    754 
    755 	/* Validate buffer list */
    756 	ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
    757 					   req->nr_buffers, &op, &do_reloc);
    758 	if (ret) {
    759 		if (ret != -ERESTARTSYS)
    760 			NV_ERROR(cli, "validate: %d\n", ret);
    761 		goto out_prevalid;
    762 	}
    763 
    764 	/* Apply any relocations that are required */
    765 	if (do_reloc) {
    766 		ret = nouveau_gem_pushbuf_reloc_apply(cli, req, bo);
    767 		if (ret) {
    768 			NV_ERROR(cli, "reloc apply: %d\n", ret);
    769 			goto out;
    770 		}
    771 	}
    772 
    773 	if (chan->dma.ib_max) {
    774 		ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
    775 		if (ret) {
    776 			NV_ERROR(cli, "nv50cal_space: %d\n", ret);
    777 			goto out;
    778 		}
    779 
    780 		for (i = 0; i < req->nr_push; i++) {
    781 			struct nouveau_bo *nvbo = (void *)(unsigned long)
    782 				bo[push[i].bo_index].user_priv;
    783 
    784 			nv50_dma_push(chan, nvbo, push[i].offset,
    785 				      push[i].length);
    786 		}
    787 	} else
    788 	if (nv_device(drm->device)->chipset >= 0x25) {
    789 		ret = RING_SPACE(chan, req->nr_push * 2);
    790 		if (ret) {
    791 			NV_ERROR(cli, "cal_space: %d\n", ret);
    792 			goto out;
    793 		}
    794 
    795 		for (i = 0; i < req->nr_push; i++) {
    796 			struct nouveau_bo *nvbo = (void *)(unsigned long)
    797 				bo[push[i].bo_index].user_priv;
    798 
    799 			OUT_RING(chan, (nvbo->bo.offset + push[i].offset) | 2);
    800 			OUT_RING(chan, 0);
    801 		}
    802 	} else {
    803 		ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
    804 		if (ret) {
    805 			NV_ERROR(cli, "jmp_space: %d\n", ret);
    806 			goto out;
    807 		}
    808 
    809 		for (i = 0; i < req->nr_push; i++) {
    810 			struct nouveau_bo *nvbo = (void *)(unsigned long)
    811 				bo[push[i].bo_index].user_priv;
    812 			uint32_t cmd;
    813 
    814 			cmd = chan->push.vma.offset + ((chan->dma.cur + 2) << 2);
    815 			cmd |= 0x20000000;
    816 			if (unlikely(cmd != req->suffix0)) {
    817 				if (!nvbo->kmap.virtual) {
    818 					ret = ttm_bo_kmap(&nvbo->bo, 0,
    819 							  nvbo->bo.mem.
    820 							  num_pages,
    821 							  &nvbo->kmap);
    822 					if (ret) {
    823 						WIND_RING(chan);
    824 						goto out;
    825 					}
    826 					nvbo->validate_mapped = true;
    827 				}
    828 
    829 				nouveau_bo_wr32(nvbo, (push[i].offset +
    830 						push[i].length - 8) / 4, cmd);
    831 			}
    832 
    833 			OUT_RING(chan, 0x20000000 |
    834 				      (nvbo->bo.offset + push[i].offset));
    835 			OUT_RING(chan, 0);
    836 			for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
    837 				OUT_RING(chan, 0);
    838 		}
    839 	}
    840 
    841 	ret = nouveau_fence_new(chan, false, &fence);
    842 	if (ret) {
    843 		NV_ERROR(cli, "error fencing pushbuf: %d\n", ret);
    844 		WIND_RING(chan);
    845 		goto out;
    846 	}
    847 
    848 out:
    849 	validate_fini(&op, fence);
    850 	nouveau_fence_unref(&fence);
    851 
    852 out_prevalid:
    853 	u_free(bo);
    854 	u_free(push);
    855 
    856 out_next:
    857 	if (chan->dma.ib_max) {
    858 		req->suffix0 = 0x00000000;
    859 		req->suffix1 = 0x00000000;
    860 	} else
    861 	if (nv_device(drm->device)->chipset >= 0x25) {
    862 		req->suffix0 = 0x00020000;
    863 		req->suffix1 = 0x00000000;
    864 	} else {
    865 		req->suffix0 = 0x20000000 |
    866 			      (chan->push.vma.offset + ((chan->dma.cur + 2) << 2));
    867 		req->suffix1 = 0x00000000;
    868 	}
    869 
    870 	return nouveau_abi16_put(abi16, ret);
    871 }
    872 
    873 static inline uint32_t
    874 domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain)
    875 {
    876 	uint32_t flags = 0;
    877 
    878 	if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
    879 		flags |= TTM_PL_FLAG_VRAM;
    880 	if (domain & NOUVEAU_GEM_DOMAIN_GART)
    881 		flags |= TTM_PL_FLAG_TT;
    882 
    883 	return flags;
    884 }
    885 
    886 int
    887 nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
    888 			   struct drm_file *file_priv)
    889 {
    890 	struct drm_nouveau_gem_cpu_prep *req = data;
    891 	struct drm_gem_object *gem;
    892 	struct nouveau_bo *nvbo;
    893 	bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
    894 	int ret = -EINVAL;
    895 
    896 	gem = drm_gem_object_lookup(dev, file_priv, req->handle);
    897 	if (!gem)
    898 		return -ENOENT;
    899 	nvbo = nouveau_gem_object(gem);
    900 
    901 	spin_lock(&nvbo->bo.bdev->fence_lock);
    902 	ret = ttm_bo_wait(&nvbo->bo, true, true, no_wait);
    903 	spin_unlock(&nvbo->bo.bdev->fence_lock);
    904 	drm_gem_object_unreference_unlocked(gem);
    905 	return ret;
    906 }
    907 
    908 int
    909 nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
    910 			   struct drm_file *file_priv)
    911 {
    912 	return 0;
    913 }
    914 
    915 int
    916 nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
    917 		       struct drm_file *file_priv)
    918 {
    919 	struct drm_nouveau_gem_info *req = data;
    920 	struct drm_gem_object *gem;
    921 	int ret;
    922 
    923 	gem = drm_gem_object_lookup(dev, file_priv, req->handle);
    924 	if (!gem)
    925 		return -ENOENT;
    926 
    927 	ret = nouveau_gem_info(file_priv, gem, req);
    928 	drm_gem_object_unreference_unlocked(gem);
    929 	return ret;
    930 }
    931 
    932