Home | History | Annotate | Line # | Download | only in nouveau
nouveau_gem.c revision 1.1.1.3
      1 /*	$NetBSD: nouveau_gem.c,v 1.1.1.3 2018/08/27 01:34:55 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright (C) 2008 Ben Skeggs.
      5  * All Rights Reserved.
      6  *
      7  * Permission is hereby granted, free of charge, to any person obtaining
      8  * a copy of this software and associated documentation files (the
      9  * "Software"), to deal in the Software without restriction, including
     10  * without limitation the rights to use, copy, modify, merge, publish,
     11  * distribute, sublicense, and/or sell copies of the Software, and to
     12  * permit persons to whom the Software is furnished to do so, subject to
     13  * the following conditions:
     14  *
     15  * The above copyright notice and this permission notice (including the
     16  * next paragraph) shall be included in all copies or substantial
     17  * portions of the Software.
     18  *
     19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
     20  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
     21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
     22  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
     23  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
     24  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
     25  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
     26  *
     27  */
     28 
     29 #include <sys/cdefs.h>
     30 __KERNEL_RCSID(0, "$NetBSD: nouveau_gem.c,v 1.1.1.3 2018/08/27 01:34:55 riastradh Exp $");
     31 
     32 #include "nouveau_drm.h"
     33 #include "nouveau_dma.h"
     34 #include "nouveau_fence.h"
     35 #include "nouveau_abi16.h"
     36 
     37 #include "nouveau_ttm.h"
     38 #include "nouveau_gem.h"
     39 
     40 void
     41 nouveau_gem_object_del(struct drm_gem_object *gem)
     42 {
     43 	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
     44 	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
     45 	struct ttm_buffer_object *bo = &nvbo->bo;
     46 	struct device *dev = drm->dev->dev;
     47 	int ret;
     48 
     49 	ret = pm_runtime_get_sync(dev);
     50 	if (WARN_ON(ret < 0 && ret != -EACCES))
     51 		return;
     52 
     53 	if (gem->import_attach)
     54 		drm_prime_gem_destroy(gem, nvbo->bo.sg);
     55 
     56 	drm_gem_object_release(gem);
     57 
     58 	/* reset filp so nouveau_bo_del_ttm() can test for it */
     59 	gem->filp = NULL;
     60 	ttm_bo_unref(&bo);
     61 
     62 	pm_runtime_mark_last_busy(dev);
     63 	pm_runtime_put_autosuspend(dev);
     64 }
     65 
     66 int
     67 nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
     68 {
     69 	struct nouveau_cli *cli = nouveau_cli(file_priv);
     70 	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
     71 	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
     72 	struct nvkm_vma *vma;
     73 	struct device *dev = drm->dev->dev;
     74 	int ret;
     75 
     76 	if (!cli->vm)
     77 		return 0;
     78 
     79 	ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL);
     80 	if (ret)
     81 		return ret;
     82 
     83 	vma = nouveau_bo_vma_find(nvbo, cli->vm);
     84 	if (!vma) {
     85 		vma = kzalloc(sizeof(*vma), GFP_KERNEL);
     86 		if (!vma) {
     87 			ret = -ENOMEM;
     88 			goto out;
     89 		}
     90 
     91 		ret = pm_runtime_get_sync(dev);
     92 		if (ret < 0 && ret != -EACCES) {
     93 			kfree(vma);
     94 			goto out;
     95 		}
     96 
     97 		ret = nouveau_bo_vma_add(nvbo, cli->vm, vma);
     98 		if (ret)
     99 			kfree(vma);
    100 
    101 		pm_runtime_mark_last_busy(dev);
    102 		pm_runtime_put_autosuspend(dev);
    103 	} else {
    104 		vma->refcount++;
    105 	}
    106 
    107 out:
    108 	ttm_bo_unreserve(&nvbo->bo);
    109 	return ret;
    110 }
    111 
    112 static void
    113 nouveau_gem_object_delete(void *data)
    114 {
    115 	struct nvkm_vma *vma = data;
    116 	nvkm_vm_unmap(vma);
    117 	nvkm_vm_put(vma);
    118 	kfree(vma);
    119 }
    120 
    121 static void
    122 nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nvkm_vma *vma)
    123 {
    124 	const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM;
    125 	struct reservation_object *resv = nvbo->bo.resv;
    126 	struct reservation_object_list *fobj;
    127 	struct fence *fence = NULL;
    128 
    129 	fobj = reservation_object_get_list(resv);
    130 
    131 	list_del(&vma->head);
    132 
    133 	if (fobj && fobj->shared_count > 1)
    134 		ttm_bo_wait(&nvbo->bo, true, false, false);
    135 	else if (fobj && fobj->shared_count == 1)
    136 		fence = rcu_dereference_protected(fobj->shared[0],
    137 						reservation_object_held(resv));
    138 	else
    139 		fence = reservation_object_get_excl(nvbo->bo.resv);
    140 
    141 	if (fence && mapped) {
    142 		nouveau_fence_work(fence, nouveau_gem_object_delete, vma);
    143 	} else {
    144 		if (mapped)
    145 			nvkm_vm_unmap(vma);
    146 		nvkm_vm_put(vma);
    147 		kfree(vma);
    148 	}
    149 }
    150 
    151 void
    152 nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
    153 {
    154 	struct nouveau_cli *cli = nouveau_cli(file_priv);
    155 	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
    156 	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
    157 	struct device *dev = drm->dev->dev;
    158 	struct nvkm_vma *vma;
    159 	int ret;
    160 
    161 	if (!cli->vm)
    162 		return;
    163 
    164 	ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL);
    165 	if (ret)
    166 		return;
    167 
    168 	vma = nouveau_bo_vma_find(nvbo, cli->vm);
    169 	if (vma) {
    170 		if (--vma->refcount == 0) {
    171 			ret = pm_runtime_get_sync(dev);
    172 			if (!WARN_ON(ret < 0 && ret != -EACCES)) {
    173 				nouveau_gem_object_unmap(nvbo, vma);
    174 				pm_runtime_mark_last_busy(dev);
    175 				pm_runtime_put_autosuspend(dev);
    176 			}
    177 		}
    178 	}
    179 	ttm_bo_unreserve(&nvbo->bo);
    180 }
    181 
    182 int
    183 nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
    184 		uint32_t tile_mode, uint32_t tile_flags,
    185 		struct nouveau_bo **pnvbo)
    186 {
    187 	struct nouveau_drm *drm = nouveau_drm(dev);
    188 	struct nouveau_bo *nvbo;
    189 	u32 flags = 0;
    190 	int ret;
    191 
    192 	if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
    193 		flags |= TTM_PL_FLAG_VRAM;
    194 	if (domain & NOUVEAU_GEM_DOMAIN_GART)
    195 		flags |= TTM_PL_FLAG_TT;
    196 	if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU)
    197 		flags |= TTM_PL_FLAG_SYSTEM;
    198 
    199 	if (domain & NOUVEAU_GEM_DOMAIN_COHERENT)
    200 		flags |= TTM_PL_FLAG_UNCACHED;
    201 
    202 	ret = nouveau_bo_new(dev, size, align, flags, tile_mode,
    203 			     tile_flags, NULL, NULL, pnvbo);
    204 	if (ret)
    205 		return ret;
    206 	nvbo = *pnvbo;
    207 
    208 	/* we restrict allowed domains on nv50+ to only the types
    209 	 * that were requested at creation time.  not possibly on
    210 	 * earlier chips without busting the ABI.
    211 	 */
    212 	nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
    213 			      NOUVEAU_GEM_DOMAIN_GART;
    214 	if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
    215 		nvbo->valid_domains &= domain;
    216 
    217 	/* Initialize the embedded gem-object. We return a single gem-reference
    218 	 * to the caller, instead of a normal nouveau_bo ttm reference. */
    219 	ret = drm_gem_object_init(dev, &nvbo->gem, nvbo->bo.mem.size);
    220 	if (ret) {
    221 		nouveau_bo_ref(NULL, pnvbo);
    222 		return -ENOMEM;
    223 	}
    224 
    225 	nvbo->bo.persistent_swap_storage = nvbo->gem.filp;
    226 	return 0;
    227 }
    228 
    229 static int
    230 nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
    231 		 struct drm_nouveau_gem_info *rep)
    232 {
    233 	struct nouveau_cli *cli = nouveau_cli(file_priv);
    234 	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
    235 	struct nvkm_vma *vma;
    236 
    237 	if (is_power_of_2(nvbo->valid_domains))
    238 		rep->domain = nvbo->valid_domains;
    239 	else if (nvbo->bo.mem.mem_type == TTM_PL_TT)
    240 		rep->domain = NOUVEAU_GEM_DOMAIN_GART;
    241 	else
    242 		rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
    243 	rep->offset = nvbo->bo.offset;
    244 	if (cli->vm) {
    245 		vma = nouveau_bo_vma_find(nvbo, cli->vm);
    246 		if (!vma)
    247 			return -EINVAL;
    248 
    249 		rep->offset = vma->offset;
    250 	}
    251 
    252 	rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
    253 	rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.vma_node);
    254 	rep->tile_mode = nvbo->tile_mode;
    255 	rep->tile_flags = nvbo->tile_flags;
    256 	return 0;
    257 }
    258 
    259 int
    260 nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
    261 		      struct drm_file *file_priv)
    262 {
    263 	struct nouveau_drm *drm = nouveau_drm(dev);
    264 	struct nouveau_cli *cli = nouveau_cli(file_priv);
    265 	struct nvkm_fb *fb = nvxx_fb(&drm->device);
    266 	struct drm_nouveau_gem_new *req = data;
    267 	struct nouveau_bo *nvbo = NULL;
    268 	int ret = 0;
    269 
    270 	if (!nvkm_fb_memtype_valid(fb, req->info.tile_flags)) {
    271 		NV_PRINTK(err, cli, "bad page flags: 0x%08x\n", req->info.tile_flags);
    272 		return -EINVAL;
    273 	}
    274 
    275 	ret = nouveau_gem_new(dev, req->info.size, req->align,
    276 			      req->info.domain, req->info.tile_mode,
    277 			      req->info.tile_flags, &nvbo);
    278 	if (ret)
    279 		return ret;
    280 
    281 	ret = drm_gem_handle_create(file_priv, &nvbo->gem, &req->info.handle);
    282 	if (ret == 0) {
    283 		ret = nouveau_gem_info(file_priv, &nvbo->gem, &req->info);
    284 		if (ret)
    285 			drm_gem_handle_delete(file_priv, req->info.handle);
    286 	}
    287 
    288 	/* drop reference from allocate - handle holds it now */
    289 	drm_gem_object_unreference_unlocked(&nvbo->gem);
    290 	return ret;
    291 }
    292 
    293 static int
    294 nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
    295 		       uint32_t write_domains, uint32_t valid_domains)
    296 {
    297 	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
    298 	struct ttm_buffer_object *bo = &nvbo->bo;
    299 	uint32_t domains = valid_domains & nvbo->valid_domains &
    300 		(write_domains ? write_domains : read_domains);
    301 	uint32_t pref_flags = 0, valid_flags = 0;
    302 
    303 	if (!domains)
    304 		return -EINVAL;
    305 
    306 	if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
    307 		valid_flags |= TTM_PL_FLAG_VRAM;
    308 
    309 	if (valid_domains & NOUVEAU_GEM_DOMAIN_GART)
    310 		valid_flags |= TTM_PL_FLAG_TT;
    311 
    312 	if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
    313 	    bo->mem.mem_type == TTM_PL_VRAM)
    314 		pref_flags |= TTM_PL_FLAG_VRAM;
    315 
    316 	else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
    317 		 bo->mem.mem_type == TTM_PL_TT)
    318 		pref_flags |= TTM_PL_FLAG_TT;
    319 
    320 	else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
    321 		pref_flags |= TTM_PL_FLAG_VRAM;
    322 
    323 	else
    324 		pref_flags |= TTM_PL_FLAG_TT;
    325 
    326 	nouveau_bo_placement_set(nvbo, pref_flags, valid_flags);
    327 
    328 	return 0;
    329 }
    330 
    331 struct validate_op {
    332 	struct list_head list;
    333 	struct ww_acquire_ctx ticket;
    334 };
    335 
    336 static void
    337 validate_fini_no_ticket(struct validate_op *op, struct nouveau_fence *fence,
    338 			struct drm_nouveau_gem_pushbuf_bo *pbbo)
    339 {
    340 	struct nouveau_bo *nvbo;
    341 	struct drm_nouveau_gem_pushbuf_bo *b;
    342 
    343 	while (!list_empty(&op->list)) {
    344 		nvbo = list_entry(op->list.next, struct nouveau_bo, entry);
    345 		b = &pbbo[nvbo->pbbo_index];
    346 
    347 		if (likely(fence))
    348 			nouveau_bo_fence(nvbo, fence, !!b->write_domains);
    349 
    350 		if (unlikely(nvbo->validate_mapped)) {
    351 			ttm_bo_kunmap(&nvbo->kmap);
    352 			nvbo->validate_mapped = false;
    353 		}
    354 
    355 		list_del(&nvbo->entry);
    356 		nvbo->reserved_by = NULL;
    357 		ttm_bo_unreserve_ticket(&nvbo->bo, &op->ticket);
    358 		drm_gem_object_unreference_unlocked(&nvbo->gem);
    359 	}
    360 }
    361 
    362 static void
    363 validate_fini(struct validate_op *op, struct nouveau_fence *fence,
    364 	      struct drm_nouveau_gem_pushbuf_bo *pbbo)
    365 {
    366 	validate_fini_no_ticket(op, fence, pbbo);
    367 	ww_acquire_fini(&op->ticket);
    368 }
    369 
    370 static int
    371 validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
    372 	      struct drm_nouveau_gem_pushbuf_bo *pbbo,
    373 	      int nr_buffers, struct validate_op *op)
    374 {
    375 	struct nouveau_cli *cli = nouveau_cli(file_priv);
    376 	struct drm_device *dev = chan->drm->dev;
    377 	int trycnt = 0;
    378 	int ret = -EINVAL, i;
    379 	struct nouveau_bo *res_bo = NULL;
    380 	LIST_HEAD(gart_list);
    381 	LIST_HEAD(vram_list);
    382 	LIST_HEAD(both_list);
    383 
    384 	ww_acquire_init(&op->ticket, &reservation_ww_class);
    385 retry:
    386 	if (++trycnt > 100000) {
    387 		NV_PRINTK(err, cli, "%s failed and gave up.\n", __func__);
    388 		return -EINVAL;
    389 	}
    390 
    391 	for (i = 0; i < nr_buffers; i++) {
    392 		struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i];
    393 		struct drm_gem_object *gem;
    394 		struct nouveau_bo *nvbo;
    395 
    396 		gem = drm_gem_object_lookup(dev, file_priv, b->handle);
    397 		if (!gem) {
    398 			NV_PRINTK(err, cli, "Unknown handle 0x%08x\n", b->handle);
    399 			ret = -ENOENT;
    400 			break;
    401 		}
    402 		nvbo = nouveau_gem_object(gem);
    403 		if (nvbo == res_bo) {
    404 			res_bo = NULL;
    405 			drm_gem_object_unreference_unlocked(gem);
    406 			continue;
    407 		}
    408 
    409 		if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
    410 			NV_PRINTK(err, cli, "multiple instances of buffer %d on "
    411 				      "validation list\n", b->handle);
    412 			drm_gem_object_unreference_unlocked(gem);
    413 			ret = -EINVAL;
    414 			break;
    415 		}
    416 
    417 		ret = ttm_bo_reserve(&nvbo->bo, true, false, true, &op->ticket);
    418 		if (ret) {
    419 			list_splice_tail_init(&vram_list, &op->list);
    420 			list_splice_tail_init(&gart_list, &op->list);
    421 			list_splice_tail_init(&both_list, &op->list);
    422 			validate_fini_no_ticket(op, NULL, NULL);
    423 			if (unlikely(ret == -EDEADLK)) {
    424 				ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
    425 							      &op->ticket);
    426 				if (!ret)
    427 					res_bo = nvbo;
    428 			}
    429 			if (unlikely(ret)) {
    430 				if (ret != -ERESTARTSYS)
    431 					NV_PRINTK(err, cli, "fail reserve\n");
    432 				break;
    433 			}
    434 		}
    435 
    436 		b->user_priv = (uint64_t)(unsigned long)nvbo;
    437 		nvbo->reserved_by = file_priv;
    438 		nvbo->pbbo_index = i;
    439 		if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
    440 		    (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
    441 			list_add_tail(&nvbo->entry, &both_list);
    442 		else
    443 		if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
    444 			list_add_tail(&nvbo->entry, &vram_list);
    445 		else
    446 		if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
    447 			list_add_tail(&nvbo->entry, &gart_list);
    448 		else {
    449 			NV_PRINTK(err, cli, "invalid valid domains: 0x%08x\n",
    450 				 b->valid_domains);
    451 			list_add_tail(&nvbo->entry, &both_list);
    452 			ret = -EINVAL;
    453 			break;
    454 		}
    455 		if (nvbo == res_bo)
    456 			goto retry;
    457 	}
    458 
    459 	ww_acquire_done(&op->ticket);
    460 	list_splice_tail(&vram_list, &op->list);
    461 	list_splice_tail(&gart_list, &op->list);
    462 	list_splice_tail(&both_list, &op->list);
    463 	if (ret)
    464 		validate_fini(op, NULL, NULL);
    465 	return ret;
    466 
    467 }
    468 
    469 static int
    470 validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
    471 	      struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo,
    472 	      uint64_t user_pbbo_ptr)
    473 {
    474 	struct nouveau_drm *drm = chan->drm;
    475 	struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
    476 				(void __force __user *)(uintptr_t)user_pbbo_ptr;
    477 	struct nouveau_bo *nvbo;
    478 	int ret, relocs = 0;
    479 
    480 	list_for_each_entry(nvbo, list, entry) {
    481 		struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
    482 
    483 		ret = nouveau_gem_set_domain(&nvbo->gem, b->read_domains,
    484 					     b->write_domains,
    485 					     b->valid_domains);
    486 		if (unlikely(ret)) {
    487 			NV_PRINTK(err, cli, "fail set_domain\n");
    488 			return ret;
    489 		}
    490 
    491 		ret = nouveau_bo_validate(nvbo, true, false);
    492 		if (unlikely(ret)) {
    493 			if (ret != -ERESTARTSYS)
    494 				NV_PRINTK(err, cli, "fail ttm_validate\n");
    495 			return ret;
    496 		}
    497 
    498 		ret = nouveau_fence_sync(nvbo, chan, !!b->write_domains, true);
    499 		if (unlikely(ret)) {
    500 			if (ret != -ERESTARTSYS)
    501 				NV_PRINTK(err, cli, "fail post-validate sync\n");
    502 			return ret;
    503 		}
    504 
    505 		if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
    506 			if (nvbo->bo.offset == b->presumed.offset &&
    507 			    ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
    508 			      b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
    509 			     (nvbo->bo.mem.mem_type == TTM_PL_TT &&
    510 			      b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
    511 				continue;
    512 
    513 			if (nvbo->bo.mem.mem_type == TTM_PL_TT)
    514 				b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
    515 			else
    516 				b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
    517 			b->presumed.offset = nvbo->bo.offset;
    518 			b->presumed.valid = 0;
    519 			relocs++;
    520 
    521 			if (copy_to_user(&upbbo[nvbo->pbbo_index].presumed,
    522 					     &b->presumed, sizeof(b->presumed)))
    523 				return -EFAULT;
    524 		}
    525 	}
    526 
    527 	return relocs;
    528 }
    529 
    530 static int
    531 nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
    532 			     struct drm_file *file_priv,
    533 			     struct drm_nouveau_gem_pushbuf_bo *pbbo,
    534 			     uint64_t user_buffers, int nr_buffers,
    535 			     struct validate_op *op, int *apply_relocs)
    536 {
    537 	struct nouveau_cli *cli = nouveau_cli(file_priv);
    538 	int ret;
    539 
    540 	INIT_LIST_HEAD(&op->list);
    541 
    542 	if (nr_buffers == 0)
    543 		return 0;
    544 
    545 	ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
    546 	if (unlikely(ret)) {
    547 		if (ret != -ERESTARTSYS)
    548 			NV_PRINTK(err, cli, "validate_init\n");
    549 		return ret;
    550 	}
    551 
    552 	ret = validate_list(chan, cli, &op->list, pbbo, user_buffers);
    553 	if (unlikely(ret < 0)) {
    554 		if (ret != -ERESTARTSYS)
    555 			NV_PRINTK(err, cli, "validating bo list\n");
    556 		validate_fini(op, NULL, NULL);
    557 		return ret;
    558 	}
    559 	*apply_relocs = ret;
    560 	return 0;
    561 }
    562 
    563 static inline void
    564 u_free(void *addr)
    565 {
    566 	kvfree(addr);
    567 }
    568 
    569 static inline void *
    570 u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
    571 {
    572 	void *mem;
    573 	void __user *userptr = (void __force __user *)(uintptr_t)user;
    574 
    575 	size *= nmemb;
    576 
    577 	mem = kmalloc(size, GFP_KERNEL | __GFP_NOWARN);
    578 	if (!mem)
    579 		mem = vmalloc(size);
    580 	if (!mem)
    581 		return ERR_PTR(-ENOMEM);
    582 
    583 	if (copy_from_user(mem, userptr, size)) {
    584 		u_free(mem);
    585 		return ERR_PTR(-EFAULT);
    586 	}
    587 
    588 	return mem;
    589 }
    590 
    591 static int
    592 nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
    593 				struct drm_nouveau_gem_pushbuf *req,
    594 				struct drm_nouveau_gem_pushbuf_bo *bo)
    595 {
    596 	struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
    597 	int ret = 0;
    598 	unsigned i;
    599 
    600 	reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
    601 	if (IS_ERR(reloc))
    602 		return PTR_ERR(reloc);
    603 
    604 	for (i = 0; i < req->nr_relocs; i++) {
    605 		struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
    606 		struct drm_nouveau_gem_pushbuf_bo *b;
    607 		struct nouveau_bo *nvbo;
    608 		uint32_t data;
    609 
    610 		if (unlikely(r->bo_index > req->nr_buffers)) {
    611 			NV_PRINTK(err, cli, "reloc bo index invalid\n");
    612 			ret = -EINVAL;
    613 			break;
    614 		}
    615 
    616 		b = &bo[r->bo_index];
    617 		if (b->presumed.valid)
    618 			continue;
    619 
    620 		if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
    621 			NV_PRINTK(err, cli, "reloc container bo index invalid\n");
    622 			ret = -EINVAL;
    623 			break;
    624 		}
    625 		nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
    626 
    627 		if (unlikely(r->reloc_bo_offset + 4 >
    628 			     nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
    629 			NV_PRINTK(err, cli, "reloc outside of bo\n");
    630 			ret = -EINVAL;
    631 			break;
    632 		}
    633 
    634 		if (!nvbo->kmap.virtual) {
    635 			ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
    636 					  &nvbo->kmap);
    637 			if (ret) {
    638 				NV_PRINTK(err, cli, "failed kmap for reloc\n");
    639 				break;
    640 			}
    641 			nvbo->validate_mapped = true;
    642 		}
    643 
    644 		if (r->flags & NOUVEAU_GEM_RELOC_LOW)
    645 			data = b->presumed.offset + r->data;
    646 		else
    647 		if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
    648 			data = (b->presumed.offset + r->data) >> 32;
    649 		else
    650 			data = r->data;
    651 
    652 		if (r->flags & NOUVEAU_GEM_RELOC_OR) {
    653 			if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART)
    654 				data |= r->tor;
    655 			else
    656 				data |= r->vor;
    657 		}
    658 
    659 		ret = ttm_bo_wait(&nvbo->bo, true, false, false);
    660 		if (ret) {
    661 			NV_PRINTK(err, cli, "reloc wait_idle failed: %d\n", ret);
    662 			break;
    663 		}
    664 
    665 		nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
    666 	}
    667 
    668 	u_free(reloc);
    669 	return ret;
    670 }
    671 
    672 int
    673 nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
    674 			  struct drm_file *file_priv)
    675 {
    676 	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
    677 	struct nouveau_cli *cli = nouveau_cli(file_priv);
    678 	struct nouveau_abi16_chan *temp;
    679 	struct nouveau_drm *drm = nouveau_drm(dev);
    680 	struct drm_nouveau_gem_pushbuf *req = data;
    681 	struct drm_nouveau_gem_pushbuf_push *push;
    682 	struct drm_nouveau_gem_pushbuf_bo *bo;
    683 	struct nouveau_channel *chan = NULL;
    684 	struct validate_op op;
    685 	struct nouveau_fence *fence = NULL;
    686 	int i, j, ret = 0, do_reloc = 0;
    687 
    688 	if (unlikely(!abi16))
    689 		return -ENOMEM;
    690 
    691 	list_for_each_entry(temp, &abi16->channels, head) {
    692 		if (temp->chan->chid == req->channel) {
    693 			chan = temp->chan;
    694 			break;
    695 		}
    696 	}
    697 
    698 	if (!chan)
    699 		return nouveau_abi16_put(abi16, -ENOENT);
    700 
    701 	req->vram_available = drm->gem.vram_available;
    702 	req->gart_available = drm->gem.gart_available;
    703 	if (unlikely(req->nr_push == 0))
    704 		goto out_next;
    705 
    706 	if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
    707 		NV_PRINTK(err, cli, "pushbuf push count exceeds limit: %d max %d\n",
    708 			 req->nr_push, NOUVEAU_GEM_MAX_PUSH);
    709 		return nouveau_abi16_put(abi16, -EINVAL);
    710 	}
    711 
    712 	if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
    713 		NV_PRINTK(err, cli, "pushbuf bo count exceeds limit: %d max %d\n",
    714 			 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
    715 		return nouveau_abi16_put(abi16, -EINVAL);
    716 	}
    717 
    718 	if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
    719 		NV_PRINTK(err, cli, "pushbuf reloc count exceeds limit: %d max %d\n",
    720 			 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
    721 		return nouveau_abi16_put(abi16, -EINVAL);
    722 	}
    723 
    724 	push = u_memcpya(req->push, req->nr_push, sizeof(*push));
    725 	if (IS_ERR(push))
    726 		return nouveau_abi16_put(abi16, PTR_ERR(push));
    727 
    728 	bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
    729 	if (IS_ERR(bo)) {
    730 		u_free(push);
    731 		return nouveau_abi16_put(abi16, PTR_ERR(bo));
    732 	}
    733 
    734 	/* Ensure all push buffers are on validate list */
    735 	for (i = 0; i < req->nr_push; i++) {
    736 		if (push[i].bo_index >= req->nr_buffers) {
    737 			NV_PRINTK(err, cli, "push %d buffer not in list\n", i);
    738 			ret = -EINVAL;
    739 			goto out_prevalid;
    740 		}
    741 	}
    742 
    743 	/* Validate buffer list */
    744 	ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
    745 					   req->nr_buffers, &op, &do_reloc);
    746 	if (ret) {
    747 		if (ret != -ERESTARTSYS)
    748 			NV_PRINTK(err, cli, "validate: %d\n", ret);
    749 		goto out_prevalid;
    750 	}
    751 
    752 	/* Apply any relocations that are required */
    753 	if (do_reloc) {
    754 		ret = nouveau_gem_pushbuf_reloc_apply(cli, req, bo);
    755 		if (ret) {
    756 			NV_PRINTK(err, cli, "reloc apply: %d\n", ret);
    757 			goto out;
    758 		}
    759 	}
    760 
    761 	if (chan->dma.ib_max) {
    762 		ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
    763 		if (ret) {
    764 			NV_PRINTK(err, cli, "nv50cal_space: %d\n", ret);
    765 			goto out;
    766 		}
    767 
    768 		for (i = 0; i < req->nr_push; i++) {
    769 			struct nouveau_bo *nvbo = (void *)(unsigned long)
    770 				bo[push[i].bo_index].user_priv;
    771 
    772 			nv50_dma_push(chan, nvbo, push[i].offset,
    773 				      push[i].length);
    774 		}
    775 	} else
    776 	if (drm->device.info.chipset >= 0x25) {
    777 		ret = RING_SPACE(chan, req->nr_push * 2);
    778 		if (ret) {
    779 			NV_PRINTK(err, cli, "cal_space: %d\n", ret);
    780 			goto out;
    781 		}
    782 
    783 		for (i = 0; i < req->nr_push; i++) {
    784 			struct nouveau_bo *nvbo = (void *)(unsigned long)
    785 				bo[push[i].bo_index].user_priv;
    786 
    787 			OUT_RING(chan, (nvbo->bo.offset + push[i].offset) | 2);
    788 			OUT_RING(chan, 0);
    789 		}
    790 	} else {
    791 		ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
    792 		if (ret) {
    793 			NV_PRINTK(err, cli, "jmp_space: %d\n", ret);
    794 			goto out;
    795 		}
    796 
    797 		for (i = 0; i < req->nr_push; i++) {
    798 			struct nouveau_bo *nvbo = (void *)(unsigned long)
    799 				bo[push[i].bo_index].user_priv;
    800 			uint32_t cmd;
    801 
    802 			cmd = chan->push.vma.offset + ((chan->dma.cur + 2) << 2);
    803 			cmd |= 0x20000000;
    804 			if (unlikely(cmd != req->suffix0)) {
    805 				if (!nvbo->kmap.virtual) {
    806 					ret = ttm_bo_kmap(&nvbo->bo, 0,
    807 							  nvbo->bo.mem.
    808 							  num_pages,
    809 							  &nvbo->kmap);
    810 					if (ret) {
    811 						WIND_RING(chan);
    812 						goto out;
    813 					}
    814 					nvbo->validate_mapped = true;
    815 				}
    816 
    817 				nouveau_bo_wr32(nvbo, (push[i].offset +
    818 						push[i].length - 8) / 4, cmd);
    819 			}
    820 
    821 			OUT_RING(chan, 0x20000000 |
    822 				      (nvbo->bo.offset + push[i].offset));
    823 			OUT_RING(chan, 0);
    824 			for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
    825 				OUT_RING(chan, 0);
    826 		}
    827 	}
    828 
    829 	ret = nouveau_fence_new(chan, false, &fence);
    830 	if (ret) {
    831 		NV_PRINTK(err, cli, "error fencing pushbuf: %d\n", ret);
    832 		WIND_RING(chan);
    833 		goto out;
    834 	}
    835 
    836 out:
    837 	validate_fini(&op, fence, bo);
    838 	nouveau_fence_unref(&fence);
    839 
    840 out_prevalid:
    841 	u_free(bo);
    842 	u_free(push);
    843 
    844 out_next:
    845 	if (chan->dma.ib_max) {
    846 		req->suffix0 = 0x00000000;
    847 		req->suffix1 = 0x00000000;
    848 	} else
    849 	if (drm->device.info.chipset >= 0x25) {
    850 		req->suffix0 = 0x00020000;
    851 		req->suffix1 = 0x00000000;
    852 	} else {
    853 		req->suffix0 = 0x20000000 |
    854 			      (chan->push.vma.offset + ((chan->dma.cur + 2) << 2));
    855 		req->suffix1 = 0x00000000;
    856 	}
    857 
    858 	return nouveau_abi16_put(abi16, ret);
    859 }
    860 
    861 int
    862 nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
    863 			   struct drm_file *file_priv)
    864 {
    865 	struct drm_nouveau_gem_cpu_prep *req = data;
    866 	struct drm_gem_object *gem;
    867 	struct nouveau_bo *nvbo;
    868 	bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
    869 	bool write = !!(req->flags & NOUVEAU_GEM_CPU_PREP_WRITE);
    870 	int ret;
    871 
    872 	gem = drm_gem_object_lookup(dev, file_priv, req->handle);
    873 	if (!gem)
    874 		return -ENOENT;
    875 	nvbo = nouveau_gem_object(gem);
    876 
    877 	if (no_wait)
    878 		ret = reservation_object_test_signaled_rcu(nvbo->bo.resv, write) ? 0 : -EBUSY;
    879 	else {
    880 		long lret;
    881 
    882 		lret = reservation_object_wait_timeout_rcu(nvbo->bo.resv, write, true, 30 * HZ);
    883 		if (!lret)
    884 			ret = -EBUSY;
    885 		else if (lret > 0)
    886 			ret = 0;
    887 		else
    888 			ret = lret;
    889 	}
    890 	nouveau_bo_sync_for_cpu(nvbo);
    891 	drm_gem_object_unreference_unlocked(gem);
    892 
    893 	return ret;
    894 }
    895 
    896 int
    897 nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
    898 			   struct drm_file *file_priv)
    899 {
    900 	struct drm_nouveau_gem_cpu_fini *req = data;
    901 	struct drm_gem_object *gem;
    902 	struct nouveau_bo *nvbo;
    903 
    904 	gem = drm_gem_object_lookup(dev, file_priv, req->handle);
    905 	if (!gem)
    906 		return -ENOENT;
    907 	nvbo = nouveau_gem_object(gem);
    908 
    909 	nouveau_bo_sync_for_device(nvbo);
    910 	drm_gem_object_unreference_unlocked(gem);
    911 	return 0;
    912 }
    913 
    914 int
    915 nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
    916 		       struct drm_file *file_priv)
    917 {
    918 	struct drm_nouveau_gem_info *req = data;
    919 	struct drm_gem_object *gem;
    920 	int ret;
    921 
    922 	gem = drm_gem_object_lookup(dev, file_priv, req->handle);
    923 	if (!gem)
    924 		return -ENOENT;
    925 
    926 	ret = nouveau_gem_info(file_priv, gem, req);
    927 	drm_gem_object_unreference_unlocked(gem);
    928 	return ret;
    929 }
    930 
    931