nouveau_gem.c revision 1.12 1 /* $NetBSD: nouveau_gem.c,v 1.12 2021/12/18 23:45:32 riastradh Exp $ */
2
3 /*
4 * Copyright (C) 2008 Ben Skeggs.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining
8 * a copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sublicense, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial
17 * portions of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
22 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
23 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
24 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
25 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 */
28
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: nouveau_gem.c,v 1.12 2021/12/18 23:45:32 riastradh Exp $");
31
32 #include "nouveau_drv.h"
33 #include "nouveau_dma.h"
34 #include "nouveau_fence.h"
35 #include "nouveau_abi16.h"
36
37 #include "nouveau_ttm.h"
38 #include "nouveau_gem.h"
39 #include "nouveau_mem.h"
40 #include "nouveau_vmm.h"
41
42 #include <nvif/class.h>
43
44 #include <linux/nbsd-namespace.h>
45
46 void
47 nouveau_gem_object_del(struct drm_gem_object *gem)
48 {
49 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
50 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
51 struct device *dev = drm->dev->dev;
52 int ret;
53
54 ret = pm_runtime_get_sync(dev);
55 if (WARN_ON(ret < 0 && ret != -EACCES))
56 return;
57
58 if (gem->import_attach)
59 drm_prime_gem_destroy(gem, nvbo->bo.sg);
60
61 ttm_bo_put(&nvbo->bo);
62
63 pm_runtime_mark_last_busy(dev);
64 pm_runtime_put_autosuspend(dev);
65 }
66
67 int
68 nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
69 {
70 struct nouveau_cli *cli = nouveau_cli(file_priv);
71 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
72 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
73 struct device *dev = drm->dev->dev;
74 struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : &cli->vmm;
75 struct nouveau_vma *vma;
76 int ret;
77
78 if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50)
79 return 0;
80
81 ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
82 if (ret)
83 return ret;
84
85 ret = pm_runtime_get_sync(dev);
86 if (ret < 0 && ret != -EACCES)
87 goto out;
88
89 ret = nouveau_vma_new(nvbo, vmm, &vma);
90 pm_runtime_mark_last_busy(dev);
91 pm_runtime_put_autosuspend(dev);
92 out:
93 ttm_bo_unreserve(&nvbo->bo);
94 return ret;
95 }
96
97 struct nouveau_gem_object_unmap {
98 struct nouveau_cli_work work;
99 struct nouveau_vma *vma;
100 };
101
102 static void
103 nouveau_gem_object_delete(struct nouveau_vma *vma)
104 {
105 nouveau_fence_unref(&vma->fence);
106 nouveau_vma_del(&vma);
107 }
108
109 static void
110 nouveau_gem_object_delete_work(struct nouveau_cli_work *w)
111 {
112 struct nouveau_gem_object_unmap *work =
113 container_of(w, typeof(*work), work);
114 nouveau_gem_object_delete(work->vma);
115 kfree(work);
116 }
117
118 static void
119 nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
120 {
121 struct dma_fence *fence = vma->fence ? &vma->fence->base : NULL;
122 struct nouveau_gem_object_unmap *work;
123
124 list_del_init(&vma->head);
125
126 if (!fence) {
127 nouveau_gem_object_delete(vma);
128 return;
129 }
130
131 if (!(work = kmalloc(sizeof(*work), GFP_KERNEL))) {
132 WARN_ON(dma_fence_wait_timeout(fence, false, 2 * HZ) <= 0);
133 nouveau_gem_object_delete(vma);
134 return;
135 }
136
137 work->work.func = nouveau_gem_object_delete_work;
138 work->vma = vma;
139 nouveau_cli_work_queue(vma->vmm->cli, fence, &work->work);
140 }
141
142 void
143 nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
144 {
145 struct nouveau_cli *cli = nouveau_cli(file_priv);
146 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
147 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
148 struct device *dev = drm->dev->dev;
149 struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : & cli->vmm;
150 struct nouveau_vma *vma;
151 int ret;
152
153 if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50)
154 return;
155
156 ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
157 if (ret)
158 return;
159
160 vma = nouveau_vma_find(nvbo, vmm);
161 if (vma) {
162 if (--vma->refs == 0) {
163 ret = pm_runtime_get_sync(dev);
164 if (!WARN_ON(ret < 0 && ret != -EACCES)) {
165 nouveau_gem_object_unmap(nvbo, vma);
166 pm_runtime_mark_last_busy(dev);
167 pm_runtime_put_autosuspend(dev);
168 }
169 }
170 }
171 ttm_bo_unreserve(&nvbo->bo);
172 }
173
174 int
175 nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
176 uint32_t tile_mode, uint32_t tile_flags,
177 struct nouveau_bo **pnvbo)
178 {
179 struct nouveau_drm *drm = cli->drm;
180 struct nouveau_bo *nvbo;
181 u32 flags = 0;
182 int ret;
183
184 if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
185 flags |= TTM_PL_FLAG_VRAM;
186 if (domain & NOUVEAU_GEM_DOMAIN_GART)
187 flags |= TTM_PL_FLAG_TT;
188 if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU)
189 flags |= TTM_PL_FLAG_SYSTEM;
190
191 if (domain & NOUVEAU_GEM_DOMAIN_COHERENT)
192 flags |= TTM_PL_FLAG_UNCACHED;
193
194 nvbo = nouveau_bo_alloc(cli, &size, &align, flags, tile_mode,
195 tile_flags);
196 if (IS_ERR(nvbo))
197 return PTR_ERR(nvbo);
198
199 /* Initialize the embedded gem-object. We return a single gem-reference
200 * to the caller, instead of a normal nouveau_bo ttm reference. */
201 ret = drm_gem_object_init(drm->dev, &nvbo->bo.base, size);
202 if (ret) {
203 nouveau_bo_ref(NULL, &nvbo);
204 return ret;
205 }
206
207 ret = nouveau_bo_init(nvbo, size, align, flags, NULL, NULL);
208 if (ret) {
209 nouveau_bo_ref(NULL, &nvbo);
210 return ret;
211 }
212
213 /* we restrict allowed domains on nv50+ to only the types
214 * that were requested at creation time. not possibly on
215 * earlier chips without busting the ABI.
216 */
217 nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
218 NOUVEAU_GEM_DOMAIN_GART;
219 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
220 nvbo->valid_domains &= domain;
221
222 #ifndef __NetBSD__ /* XXX Let TTM swap; skip GEM like radeon. */
223 nvbo->bo.persistent_swap_storage = nvbo->bo.base.filp;
224 #endif
225 *pnvbo = nvbo;
226 return 0;
227 }
228
229 static int
230 nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
231 struct drm_nouveau_gem_info *rep)
232 {
233 struct nouveau_cli *cli = nouveau_cli(file_priv);
234 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
235 struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : &cli->vmm;
236 struct nouveau_vma *vma;
237
238 if (is_power_of_2(nvbo->valid_domains))
239 rep->domain = nvbo->valid_domains;
240 else if (nvbo->bo.mem.mem_type == TTM_PL_TT)
241 rep->domain = NOUVEAU_GEM_DOMAIN_GART;
242 else
243 rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
244 rep->offset = nvbo->bo.offset;
245 if (vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
246 vma = nouveau_vma_find(nvbo, vmm);
247 if (!vma)
248 return -EINVAL;
249
250 rep->offset = vma->addr;
251 }
252
253 rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
254 rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.base.vma_node);
255 rep->tile_mode = nvbo->mode;
256 rep->tile_flags = nvbo->contig ? 0 : NOUVEAU_GEM_TILE_NONCONTIG;
257 if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI)
258 rep->tile_flags |= nvbo->kind << 8;
259 else
260 if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
261 rep->tile_flags |= nvbo->kind << 8 | nvbo->comp << 16;
262 else
263 rep->tile_flags |= nvbo->zeta;
264 return 0;
265 }
266
267 int
268 nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
269 struct drm_file *file_priv)
270 {
271 struct nouveau_cli *cli = nouveau_cli(file_priv);
272 struct drm_nouveau_gem_new *req = data;
273 struct nouveau_bo *nvbo = NULL;
274 int ret = 0;
275
276 ret = nouveau_gem_new(cli, req->info.size, req->align,
277 req->info.domain, req->info.tile_mode,
278 req->info.tile_flags, &nvbo);
279 if (ret)
280 return ret;
281
282 ret = drm_gem_handle_create(file_priv, &nvbo->bo.base,
283 &req->info.handle);
284 if (ret == 0) {
285 ret = nouveau_gem_info(file_priv, &nvbo->bo.base, &req->info);
286 if (ret)
287 drm_gem_handle_delete(file_priv, req->info.handle);
288 }
289
290 /* drop reference from allocate - handle holds it now */
291 drm_gem_object_put_unlocked(&nvbo->bo.base);
292 return ret;
293 }
294
295 static int
296 nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
297 uint32_t write_domains, uint32_t valid_domains)
298 {
299 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
300 struct ttm_buffer_object *bo = &nvbo->bo;
301 uint32_t domains = valid_domains & nvbo->valid_domains &
302 (write_domains ? write_domains : read_domains);
303 uint32_t pref_flags = 0, valid_flags = 0;
304
305 if (!domains)
306 return -EINVAL;
307
308 if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
309 valid_flags |= TTM_PL_FLAG_VRAM;
310
311 if (valid_domains & NOUVEAU_GEM_DOMAIN_GART)
312 valid_flags |= TTM_PL_FLAG_TT;
313
314 if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
315 bo->mem.mem_type == TTM_PL_VRAM)
316 pref_flags |= TTM_PL_FLAG_VRAM;
317
318 else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
319 bo->mem.mem_type == TTM_PL_TT)
320 pref_flags |= TTM_PL_FLAG_TT;
321
322 else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
323 pref_flags |= TTM_PL_FLAG_VRAM;
324
325 else
326 pref_flags |= TTM_PL_FLAG_TT;
327
328 nouveau_bo_placement_set(nvbo, pref_flags, valid_flags);
329
330 return 0;
331 }
332
333 struct validate_op {
334 struct list_head list;
335 struct ww_acquire_ctx ticket;
336 };
337
338 static void
339 validate_fini_no_ticket(struct validate_op *op, struct nouveau_channel *chan,
340 struct nouveau_fence *fence,
341 struct drm_nouveau_gem_pushbuf_bo *pbbo)
342 {
343 struct nouveau_bo *nvbo;
344 struct drm_nouveau_gem_pushbuf_bo *b;
345
346 while (!list_empty(&op->list)) {
347 nvbo = list_entry(op->list.next, struct nouveau_bo, entry);
348 b = &pbbo[nvbo->pbbo_index];
349
350 if (likely(fence)) {
351 nouveau_bo_fence(nvbo, fence, !!b->write_domains);
352
353 if (chan->vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
354 struct nouveau_vma *vma =
355 (void *)(unsigned long)b->user_priv;
356 nouveau_fence_unref(&vma->fence);
357 dma_fence_get(&fence->base);
358 vma->fence = fence;
359 }
360 }
361
362 if (unlikely(nvbo->validate_mapped)) {
363 ttm_bo_kunmap(&nvbo->kmap);
364 nvbo->validate_mapped = false;
365 }
366
367 list_del(&nvbo->entry);
368 nvbo->reserved_by = NULL;
369 ttm_bo_unreserve(&nvbo->bo);
370 drm_gem_object_put_unlocked(&nvbo->bo.base);
371 }
372 }
373
374 static void
375 validate_fini(struct validate_op *op, struct nouveau_channel *chan,
376 struct nouveau_fence *fence,
377 struct drm_nouveau_gem_pushbuf_bo *pbbo)
378 {
379 validate_fini_no_ticket(op, chan, fence, pbbo);
380 ww_acquire_fini(&op->ticket);
381 }
382
383 static int
384 validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
385 struct drm_nouveau_gem_pushbuf_bo *pbbo,
386 int nr_buffers, struct validate_op *op)
387 {
388 struct nouveau_cli *cli = nouveau_cli(file_priv);
389 int trycnt = 0;
390 int ret = -EINVAL, i;
391 struct nouveau_bo *res_bo = NULL;
392 LIST_HEAD(gart_list);
393 LIST_HEAD(vram_list);
394 LIST_HEAD(both_list);
395
396 ww_acquire_init(&op->ticket, &reservation_ww_class);
397 retry:
398 if (++trycnt > 100000) {
399 NV_PRINTK(err, cli, "%s failed and gave up.\n", __func__);
400 return -EINVAL;
401 }
402
403 for (i = 0; i < nr_buffers; i++) {
404 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i];
405 struct drm_gem_object *gem;
406 struct nouveau_bo *nvbo;
407
408 gem = drm_gem_object_lookup(file_priv, b->handle);
409 if (!gem) {
410 NV_PRINTK(err, cli, "Unknown handle 0x%08x\n", b->handle);
411 ret = -ENOENT;
412 break;
413 }
414 nvbo = nouveau_gem_object(gem);
415 if (nvbo == res_bo) {
416 res_bo = NULL;
417 drm_gem_object_put_unlocked(gem);
418 continue;
419 }
420
421 if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
422 NV_PRINTK(err, cli, "multiple instances of buffer %d on "
423 "validation list\n", b->handle);
424 drm_gem_object_put_unlocked(gem);
425 ret = -EINVAL;
426 break;
427 }
428
429 ret = ttm_bo_reserve(&nvbo->bo, true, false, &op->ticket);
430 if (ret) {
431 list_splice_tail_init(&vram_list, &op->list);
432 list_splice_tail_init(&gart_list, &op->list);
433 list_splice_tail_init(&both_list, &op->list);
434 validate_fini_no_ticket(op, chan, NULL, NULL);
435 if (unlikely(ret == -EDEADLK)) {
436 ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
437 &op->ticket);
438 if (!ret)
439 res_bo = nvbo;
440 }
441 if (unlikely(ret)) {
442 if (ret != -ERESTARTSYS)
443 NV_PRINTK(err, cli, "fail reserve\n");
444 break;
445 }
446 }
447
448 if (chan->vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
449 struct nouveau_vmm *vmm = chan->vmm;
450 struct nouveau_vma *vma = nouveau_vma_find(nvbo, vmm);
451 if (!vma) {
452 NV_PRINTK(err, cli, "vma not found!\n");
453 ret = -EINVAL;
454 break;
455 }
456
457 b->user_priv = (uint64_t)(unsigned long)vma;
458 } else {
459 b->user_priv = (uint64_t)(unsigned long)nvbo;
460 }
461
462 nvbo->reserved_by = file_priv;
463 nvbo->pbbo_index = i;
464 if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
465 (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
466 list_add_tail(&nvbo->entry, &both_list);
467 else
468 if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
469 list_add_tail(&nvbo->entry, &vram_list);
470 else
471 if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
472 list_add_tail(&nvbo->entry, &gart_list);
473 else {
474 NV_PRINTK(err, cli, "invalid valid domains: 0x%08x\n",
475 b->valid_domains);
476 list_add_tail(&nvbo->entry, &both_list);
477 ret = -EINVAL;
478 break;
479 }
480 if (nvbo == res_bo)
481 goto retry;
482 }
483
484 ww_acquire_done(&op->ticket);
485 list_splice_tail(&vram_list, &op->list);
486 list_splice_tail(&gart_list, &op->list);
487 list_splice_tail(&both_list, &op->list);
488 if (ret)
489 validate_fini(op, chan, NULL, NULL);
490 return ret;
491
492 }
493
494 #ifdef __NetBSD__ /* XXX yargleblargh */
495 # define __force
496 #endif
497
498 static int
499 validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
500 struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo)
501 {
502 struct nouveau_drm *drm = chan->drm;
503 struct nouveau_bo *nvbo;
504 int ret, relocs = 0;
505
506 list_for_each_entry(nvbo, list, entry) {
507 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
508
509 ret = nouveau_gem_set_domain(&nvbo->bo.base, b->read_domains,
510 b->write_domains,
511 b->valid_domains);
512 if (unlikely(ret)) {
513 NV_PRINTK(err, cli, "fail set_domain\n");
514 return ret;
515 }
516
517 ret = nouveau_bo_validate(nvbo, true, false);
518 if (unlikely(ret)) {
519 if (ret != -ERESTARTSYS)
520 NV_PRINTK(err, cli, "fail ttm_validate\n");
521 return ret;
522 }
523
524 ret = nouveau_fence_sync(nvbo, chan, !!b->write_domains, true);
525 if (unlikely(ret)) {
526 if (ret != -ERESTARTSYS)
527 NV_PRINTK(err, cli, "fail post-validate sync\n");
528 return ret;
529 }
530
531 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
532 if (nvbo->bo.offset == b->presumed.offset &&
533 ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
534 b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
535 (nvbo->bo.mem.mem_type == TTM_PL_TT &&
536 b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
537 continue;
538
539 if (nvbo->bo.mem.mem_type == TTM_PL_TT)
540 b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
541 else
542 b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
543 b->presumed.offset = nvbo->bo.offset;
544 b->presumed.valid = 0;
545 relocs++;
546 }
547 }
548
549 return relocs;
550 }
551
552 static int
553 nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
554 struct drm_file *file_priv,
555 struct drm_nouveau_gem_pushbuf_bo *pbbo,
556 int nr_buffers,
557 struct validate_op *op, bool *apply_relocs)
558 {
559 struct nouveau_cli *cli = nouveau_cli(file_priv);
560 int ret;
561
562 INIT_LIST_HEAD(&op->list);
563
564 if (nr_buffers == 0)
565 return 0;
566
567 ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
568 if (unlikely(ret)) {
569 if (ret != -ERESTARTSYS)
570 NV_PRINTK(err, cli, "validate_init\n");
571 return ret;
572 }
573
574 ret = validate_list(chan, cli, &op->list, pbbo);
575 if (unlikely(ret < 0)) {
576 if (ret != -ERESTARTSYS)
577 NV_PRINTK(err, cli, "validating bo list\n");
578 validate_fini(op, chan, NULL, NULL);
579 return ret;
580 }
581 *apply_relocs = ret;
582 return 0;
583 }
584
585 static inline void
586 u_free(void *addr)
587 {
588 kvfree(addr);
589 }
590
591 static inline void *
592 u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
593 {
594 void *mem;
595 void __user *userptr = (void __force __user *)(uintptr_t)user;
596
597 size *= nmemb;
598
599 mem = kvmalloc(size, GFP_KERNEL);
600 if (!mem)
601 return ERR_PTR(-ENOMEM);
602
603 if (copy_from_user(mem, userptr, size)) {
604 u_free(mem);
605 return ERR_PTR(-EFAULT);
606 }
607
608 return mem;
609 }
610
611 static int
612 nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
613 struct drm_nouveau_gem_pushbuf *req,
614 struct drm_nouveau_gem_pushbuf_reloc *reloc,
615 struct drm_nouveau_gem_pushbuf_bo *bo)
616 {
617 int ret = 0;
618 unsigned i;
619
620 for (i = 0; i < req->nr_relocs; i++) {
621 struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
622 struct drm_nouveau_gem_pushbuf_bo *b;
623 struct nouveau_bo *nvbo;
624 uint32_t data;
625
626 if (unlikely(r->bo_index >= req->nr_buffers)) {
627 NV_PRINTK(err, cli, "reloc bo index invalid\n");
628 ret = -EINVAL;
629 break;
630 }
631
632 b = &bo[r->bo_index];
633 if (b->presumed.valid)
634 continue;
635
636 if (unlikely(r->reloc_bo_index >= req->nr_buffers)) {
637 NV_PRINTK(err, cli, "reloc container bo index invalid\n");
638 ret = -EINVAL;
639 break;
640 }
641 nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
642
643 if (unlikely(r->reloc_bo_offset + 4 >
644 nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
645 NV_PRINTK(err, cli, "reloc outside of bo\n");
646 ret = -EINVAL;
647 break;
648 }
649
650 if (!nvbo->kmap.virtual) {
651 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
652 &nvbo->kmap);
653 if (ret) {
654 NV_PRINTK(err, cli, "failed kmap for reloc\n");
655 break;
656 }
657 nvbo->validate_mapped = true;
658 }
659
660 if (r->flags & NOUVEAU_GEM_RELOC_LOW)
661 data = b->presumed.offset + r->data;
662 else
663 if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
664 data = (b->presumed.offset + r->data) >> 32;
665 else
666 data = r->data;
667
668 if (r->flags & NOUVEAU_GEM_RELOC_OR) {
669 if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART)
670 data |= r->tor;
671 else
672 data |= r->vor;
673 }
674
675 ret = ttm_bo_wait(&nvbo->bo, false, false);
676 if (ret) {
677 NV_PRINTK(err, cli, "reloc wait_idle failed: %d\n", ret);
678 break;
679 }
680
681 nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
682 }
683
684 u_free(reloc);
685 return ret;
686 }
687
688 int
689 nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
690 struct drm_file *file_priv)
691 {
692 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
693 struct nouveau_cli *cli = nouveau_cli(file_priv);
694 struct nouveau_abi16_chan *temp;
695 struct nouveau_drm *drm = nouveau_drm(dev);
696 struct drm_nouveau_gem_pushbuf *req = data;
697 struct drm_nouveau_gem_pushbuf_push *push;
698 struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
699 struct drm_nouveau_gem_pushbuf_bo *bo;
700 struct nouveau_channel *chan = NULL;
701 struct validate_op op;
702 struct nouveau_fence *fence = NULL;
703 int i, j, ret = 0;
704 bool do_reloc = false, sync = false;
705
706 if (unlikely(!abi16))
707 return -ENOMEM;
708
709 list_for_each_entry(temp, &abi16->channels, head) {
710 if (temp->chan->chid == req->channel) {
711 chan = temp->chan;
712 break;
713 }
714 }
715
716 if (!chan)
717 return nouveau_abi16_put(abi16, -ENOENT);
718 if (unlikely(atomic_read(&chan->killed)))
719 return nouveau_abi16_put(abi16, -ENODEV);
720
721 sync = req->vram_available & NOUVEAU_GEM_PUSHBUF_SYNC;
722
723 req->vram_available = drm->gem.vram_available;
724 req->gart_available = drm->gem.gart_available;
725 if (unlikely(req->nr_push == 0))
726 goto out_next;
727
728 if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
729 NV_PRINTK(err, cli, "pushbuf push count exceeds limit: %d max %d\n",
730 req->nr_push, NOUVEAU_GEM_MAX_PUSH);
731 return nouveau_abi16_put(abi16, -EINVAL);
732 }
733
734 if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
735 NV_PRINTK(err, cli, "pushbuf bo count exceeds limit: %d max %d\n",
736 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
737 return nouveau_abi16_put(abi16, -EINVAL);
738 }
739
740 if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
741 NV_PRINTK(err, cli, "pushbuf reloc count exceeds limit: %d max %d\n",
742 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
743 return nouveau_abi16_put(abi16, -EINVAL);
744 }
745
746 push = u_memcpya(req->push, req->nr_push, sizeof(*push));
747 if (IS_ERR(push))
748 return nouveau_abi16_put(abi16, PTR_ERR(push));
749
750 bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
751 if (IS_ERR(bo)) {
752 u_free(push);
753 return nouveau_abi16_put(abi16, PTR_ERR(bo));
754 }
755
756 /* Ensure all push buffers are on validate list */
757 for (i = 0; i < req->nr_push; i++) {
758 if (push[i].bo_index >= req->nr_buffers) {
759 NV_PRINTK(err, cli, "push %d buffer not in list\n", i);
760 ret = -EINVAL;
761 goto out_prevalid;
762 }
763 }
764
765 /* Validate buffer list */
766 revalidate:
767 ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo,
768 req->nr_buffers, &op, &do_reloc);
769 if (ret) {
770 if (ret != -ERESTARTSYS)
771 NV_PRINTK(err, cli, "validate: %d\n", ret);
772 goto out_prevalid;
773 }
774
775 /* Apply any relocations that are required */
776 if (do_reloc) {
777 if (!reloc) {
778 validate_fini(&op, chan, NULL, bo);
779 reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
780 if (IS_ERR(reloc)) {
781 ret = PTR_ERR(reloc);
782 goto out_prevalid;
783 }
784
785 goto revalidate;
786 }
787
788 ret = nouveau_gem_pushbuf_reloc_apply(cli, req, reloc, bo);
789 if (ret) {
790 NV_PRINTK(err, cli, "reloc apply: %d\n", ret);
791 goto out;
792 }
793 }
794
795 if (chan->dma.ib_max) {
796 ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
797 if (ret) {
798 NV_PRINTK(err, cli, "nv50cal_space: %d\n", ret);
799 goto out;
800 }
801
802 for (i = 0; i < req->nr_push; i++) {
803 struct nouveau_vma *vma = (void *)(unsigned long)
804 bo[push[i].bo_index].user_priv;
805
806 nv50_dma_push(chan, vma->addr + push[i].offset,
807 push[i].length);
808 }
809 } else
810 if (drm->client.device.info.chipset >= 0x25) {
811 ret = RING_SPACE(chan, req->nr_push * 2);
812 if (ret) {
813 NV_PRINTK(err, cli, "cal_space: %d\n", ret);
814 goto out;
815 }
816
817 for (i = 0; i < req->nr_push; i++) {
818 struct nouveau_bo *nvbo = (void *)(unsigned long)
819 bo[push[i].bo_index].user_priv;
820
821 OUT_RING(chan, (nvbo->bo.offset + push[i].offset) | 2);
822 OUT_RING(chan, 0);
823 }
824 } else {
825 ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
826 if (ret) {
827 NV_PRINTK(err, cli, "jmp_space: %d\n", ret);
828 goto out;
829 }
830
831 for (i = 0; i < req->nr_push; i++) {
832 struct nouveau_bo *nvbo = (void *)(unsigned long)
833 bo[push[i].bo_index].user_priv;
834 uint32_t cmd;
835
836 cmd = chan->push.addr + ((chan->dma.cur + 2) << 2);
837 cmd |= 0x20000000;
838 if (unlikely(cmd != req->suffix0)) {
839 if (!nvbo->kmap.virtual) {
840 ret = ttm_bo_kmap(&nvbo->bo, 0,
841 nvbo->bo.mem.
842 num_pages,
843 &nvbo->kmap);
844 if (ret) {
845 WIND_RING(chan);
846 goto out;
847 }
848 nvbo->validate_mapped = true;
849 }
850
851 nouveau_bo_wr32(nvbo, (push[i].offset +
852 push[i].length - 8) / 4, cmd);
853 }
854
855 OUT_RING(chan, 0x20000000 |
856 (nvbo->bo.offset + push[i].offset));
857 OUT_RING(chan, 0);
858 for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
859 OUT_RING(chan, 0);
860 }
861 }
862
863 ret = nouveau_fence_new(chan, false, &fence);
864 if (ret) {
865 NV_PRINTK(err, cli, "error fencing pushbuf: %d\n", ret);
866 WIND_RING(chan);
867 goto out;
868 }
869
870 if (sync) {
871 if (!(ret = nouveau_fence_wait(fence, false, false))) {
872 if ((ret = dma_fence_get_status(&fence->base)) == 1)
873 ret = 0;
874 }
875 }
876
877 out:
878 validate_fini(&op, chan, fence, bo);
879 nouveau_fence_unref(&fence);
880
881 if (do_reloc) {
882 struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
883 u64_to_user_ptr(req->buffers);
884
885 for (i = 0; i < req->nr_buffers; i++) {
886 if (bo[i].presumed.valid)
887 continue;
888
889 if (copy_to_user(&upbbo[i].presumed, &bo[i].presumed,
890 sizeof(bo[i].presumed))) {
891 ret = -EFAULT;
892 break;
893 }
894 }
895 u_free(reloc);
896 }
897 out_prevalid:
898 u_free(bo);
899 u_free(push);
900
901 out_next:
902 if (chan->dma.ib_max) {
903 req->suffix0 = 0x00000000;
904 req->suffix1 = 0x00000000;
905 } else
906 if (drm->client.device.info.chipset >= 0x25) {
907 req->suffix0 = 0x00020000;
908 req->suffix1 = 0x00000000;
909 } else {
910 req->suffix0 = 0x20000000 |
911 (chan->push.addr + ((chan->dma.cur + 2) << 2));
912 req->suffix1 = 0x00000000;
913 }
914
915 return nouveau_abi16_put(abi16, ret);
916 }
917
918 int
919 nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
920 struct drm_file *file_priv)
921 {
922 struct drm_nouveau_gem_cpu_prep *req = data;
923 struct drm_gem_object *gem;
924 struct nouveau_bo *nvbo;
925 bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
926 bool write = !!(req->flags & NOUVEAU_GEM_CPU_PREP_WRITE);
927 long lret;
928 int ret;
929
930 gem = drm_gem_object_lookup(file_priv, req->handle);
931 if (!gem)
932 return -ENOENT;
933 nvbo = nouveau_gem_object(gem);
934
935 lret = dma_resv_wait_timeout_rcu(nvbo->bo.base.resv, write, true,
936 no_wait ? 0 : 30 * HZ);
937 if (!lret)
938 ret = -EBUSY;
939 else if (lret > 0)
940 ret = 0;
941 else
942 ret = lret;
943
944 nouveau_bo_sync_for_cpu(nvbo);
945 drm_gem_object_put_unlocked(gem);
946
947 return ret;
948 }
949
950 int
951 nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
952 struct drm_file *file_priv)
953 {
954 struct drm_nouveau_gem_cpu_fini *req = data;
955 struct drm_gem_object *gem;
956 struct nouveau_bo *nvbo;
957
958 gem = drm_gem_object_lookup(file_priv, req->handle);
959 if (!gem)
960 return -ENOENT;
961 nvbo = nouveau_gem_object(gem);
962
963 nouveau_bo_sync_for_device(nvbo);
964 drm_gem_object_put_unlocked(gem);
965 return 0;
966 }
967
968 int
969 nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
970 struct drm_file *file_priv)
971 {
972 struct drm_nouveau_gem_info *req = data;
973 struct drm_gem_object *gem;
974 int ret;
975
976 gem = drm_gem_object_lookup(file_priv, req->handle);
977 if (!gem)
978 return -ENOENT;
979
980 ret = nouveau_gem_info(file_priv, gem, req);
981 drm_gem_object_put_unlocked(gem);
982 return ret;
983 }
984
985