nouveau_sgdma.c revision 1.1.1.2.32.1 1 /* $NetBSD: nouveau_sgdma.c,v 1.1.1.2.32.1 2019/06/10 22:08:07 christos Exp $ */
2
3 #include <sys/cdefs.h>
4 __KERNEL_RCSID(0, "$NetBSD: nouveau_sgdma.c,v 1.1.1.2.32.1 2019/06/10 22:08:07 christos Exp $");
5
6 #include <linux/pagemap.h>
7 #include <linux/slab.h>
8
9 #include "nouveau_drm.h"
10 #include "nouveau_ttm.h"
11
12 struct nouveau_sgdma_be {
13 /* this has to be the first field so populate/unpopulated in
14 * nouve_bo.c works properly, otherwise have to move them here
15 */
16 struct ttm_dma_tt ttm;
17 struct nvkm_mem *node;
18 };
19
20 static void
21 nouveau_sgdma_destroy(struct ttm_tt *ttm)
22 {
23 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
24
25 if (ttm) {
26 ttm_dma_tt_fini(&nvbe->ttm);
27 kfree(nvbe);
28 }
29 }
30
31 static int
32 nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
33 {
34 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
35 struct nvkm_mem *node = mem->mm_node;
36
37 if (ttm->sg) {
38 node->sg = ttm->sg;
39 node->pages = NULL;
40 } else {
41 node->sg = NULL;
42 node->pages = nvbe->ttm.dma_address;
43 }
44 node->size = (mem->num_pages << PAGE_SHIFT) >> 12;
45
46 nvkm_vm_map(&node->vma[0], node);
47 nvbe->node = node;
48 return 0;
49 }
50
51 static int
52 nv04_sgdma_unbind(struct ttm_tt *ttm)
53 {
54 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
55 nvkm_vm_unmap(&nvbe->node->vma[0]);
56 return 0;
57 }
58
59 static struct ttm_backend_func nv04_sgdma_backend = {
60 .bind = nv04_sgdma_bind,
61 .unbind = nv04_sgdma_unbind,
62 .destroy = nouveau_sgdma_destroy
63 };
64
65 static int
66 nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
67 {
68 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
69 struct nvkm_mem *node = mem->mm_node;
70
71 /* noop: bound in move_notify() */
72 if (ttm->sg) {
73 node->sg = ttm->sg;
74 node->pages = NULL;
75 } else {
76 node->sg = NULL;
77 node->pages = nvbe->ttm.dma_address;
78 }
79 node->size = (mem->num_pages << PAGE_SHIFT) >> 12;
80 return 0;
81 }
82
83 static int
84 nv50_sgdma_unbind(struct ttm_tt *ttm)
85 {
86 /* noop: unbound in move_notify() */
87 return 0;
88 }
89
90 static struct ttm_backend_func nv50_sgdma_backend = {
91 .bind = nv50_sgdma_bind,
92 .unbind = nv50_sgdma_unbind,
93 .destroy = nouveau_sgdma_destroy
94 };
95
96 struct ttm_tt *
97 nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
98 unsigned long size, uint32_t page_flags,
99 struct page *dummy_read_page)
100 {
101 struct nouveau_drm *drm = nouveau_bdev(bdev);
102 struct nouveau_sgdma_be *nvbe;
103
104 nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
105 if (!nvbe)
106 return NULL;
107
108 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA)
109 nvbe->ttm.ttm.func = &nv04_sgdma_backend;
110 else
111 nvbe->ttm.ttm.func = &nv50_sgdma_backend;
112
113 if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page))
114 /*
115 * A failing ttm_dma_tt_init() will call ttm_tt_destroy()
116 * and thus our nouveau_sgdma_destroy() hook, so we don't need
117 * to free nvbe here.
118 */
119 return NULL;
120 return &nvbe->ttm.ttm;
121 }
122