drm_gem_cma_helper.c revision 1.9 1 /* $NetBSD: drm_gem_cma_helper.c,v 1.9 2019/11/05 23:29:28 jmcneill Exp $ */
2
3 /*-
4 * Copyright (c) 2015-2017 Jared McNeill <jmcneill (at) invisible.ca>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: drm_gem_cma_helper.c,v 1.9 2019/11/05 23:29:28 jmcneill Exp $");
31
32 #include <drm/drmP.h>
33 #include <drm/drm_gem_cma_helper.h>
34 #include <drm/bus_dma_hacks.h>
35
36 #include <uvm/uvm.h>
37
38 static struct drm_gem_cma_object *
39 drm_gem_cma_create_internal(struct drm_device *ddev, size_t size,
40 struct sg_table *sgt)
41 {
42 struct drm_gem_cma_object *obj;
43 int error, nsegs;
44
45 obj = kmem_zalloc(sizeof(*obj), KM_SLEEP);
46 obj->dmat = ddev->dmat;
47 obj->dmasize = size;
48
49 if (sgt) {
50 error = -drm_prime_sg_to_bus_dmamem(obj->dmat, obj->dmasegs, 1,
51 &nsegs, sgt);
52 } else {
53 if (ddev->cma_pool != NULL) {
54 error = vmem_xalloc(ddev->cma_pool, obj->dmasize,
55 PAGE_SIZE, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX,
56 VM_BESTFIT | VM_NOSLEEP, &obj->vmem_addr);
57 if (!error) {
58 obj->vmem_pool = ddev->cma_pool;
59 obj->dmasegs[0].ds_addr =
60 PHYS_TO_BUS_MEM(obj->dmat, obj->vmem_addr);
61 obj->dmasegs[0].ds_len =
62 roundup(obj->dmasize, PAGE_SIZE);
63 nsegs = 1;
64 }
65 }
66 if (obj->vmem_pool == NULL) {
67 error = bus_dmamem_alloc(obj->dmat, obj->dmasize,
68 PAGE_SIZE, 0, obj->dmasegs, 1, &nsegs,
69 BUS_DMA_WAITOK);
70 }
71 }
72 if (error)
73 goto failed;
74 error = bus_dmamem_map(obj->dmat, obj->dmasegs, nsegs,
75 obj->dmasize, &obj->vaddr,
76 BUS_DMA_WAITOK | BUS_DMA_PREFETCHABLE);
77 if (error)
78 goto free;
79 error = bus_dmamap_create(obj->dmat, obj->dmasize, 1,
80 obj->dmasize, 0, BUS_DMA_WAITOK, &obj->dmamap);
81 if (error)
82 goto unmap;
83 error = bus_dmamap_load(obj->dmat, obj->dmamap, obj->vaddr,
84 obj->dmasize, NULL, BUS_DMA_WAITOK);
85 if (error)
86 goto destroy;
87
88 if (!sgt)
89 memset(obj->vaddr, 0, obj->dmasize);
90
91 drm_gem_private_object_init(ddev, &obj->base, size);
92
93 return obj;
94
95 destroy:
96 bus_dmamap_destroy(obj->dmat, obj->dmamap);
97 unmap:
98 bus_dmamem_unmap(obj->dmat, obj->vaddr, obj->dmasize);
99 free:
100 if (obj->sgt)
101 drm_prime_sg_free(obj->sgt);
102 else if (obj->vmem_pool)
103 vmem_xfree(obj->vmem_pool, obj->vmem_addr, obj->dmasize);
104 else
105 bus_dmamem_free(obj->dmat, obj->dmasegs, nsegs);
106 failed:
107 kmem_free(obj, sizeof(*obj));
108
109 return NULL;
110 }
111
112 struct drm_gem_cma_object *
113 drm_gem_cma_create(struct drm_device *ddev, size_t size)
114 {
115
116 return drm_gem_cma_create_internal(ddev, size, NULL);
117 }
118
119 static void
120 drm_gem_cma_obj_free(struct drm_gem_cma_object *obj)
121 {
122
123 bus_dmamap_unload(obj->dmat, obj->dmamap);
124 bus_dmamap_destroy(obj->dmat, obj->dmamap);
125 bus_dmamem_unmap(obj->dmat, obj->vaddr, obj->dmasize);
126 if (obj->sgt)
127 drm_prime_sg_free(obj->sgt);
128 else if (obj->vmem_pool)
129 vmem_xfree(obj->vmem_pool, obj->vmem_addr, obj->dmasize);
130 else
131 bus_dmamem_free(obj->dmat, obj->dmasegs, 1);
132 kmem_free(obj, sizeof(*obj));
133 }
134
135 void
136 drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
137 {
138 struct drm_gem_cma_object *obj = to_drm_gem_cma_obj(gem_obj);
139
140 drm_gem_free_mmap_offset(gem_obj);
141 drm_gem_object_release(gem_obj);
142 drm_gem_cma_obj_free(obj);
143 }
144
145 int
146 drm_gem_cma_dumb_create(struct drm_file *file_priv, struct drm_device *ddev,
147 struct drm_mode_create_dumb *args)
148 {
149 struct drm_gem_cma_object *obj;
150 uint32_t handle;
151 int error;
152
153 args->pitch = args->width * ((args->bpp + 7) / 8);
154 args->size = args->pitch * args->height;
155 args->size = roundup(args->size, PAGE_SIZE);
156 args->handle = 0;
157
158 obj = drm_gem_cma_create(ddev, args->size);
159 if (obj == NULL)
160 return -ENOMEM;
161
162 error = drm_gem_handle_create(file_priv, &obj->base, &handle);
163 drm_gem_object_unreference_unlocked(&obj->base);
164 if (error) {
165 drm_gem_cma_obj_free(obj);
166 return error;
167 }
168
169 args->handle = handle;
170
171 return 0;
172 }
173
174 int
175 drm_gem_cma_dumb_map_offset(struct drm_file *file_priv, struct drm_device *ddev,
176 uint32_t handle, uint64_t *offset)
177 {
178 struct drm_gem_object *gem_obj;
179 struct drm_gem_cma_object *obj;
180 int error;
181
182 gem_obj = drm_gem_object_lookup(ddev, file_priv, handle);
183 if (gem_obj == NULL)
184 return -ENOENT;
185
186 obj = to_drm_gem_cma_obj(gem_obj);
187
188 if (drm_vma_node_has_offset(&obj->base.vma_node) == 0) {
189 error = drm_gem_create_mmap_offset(&obj->base);
190 if (error)
191 goto done;
192 } else {
193 error = 0;
194 }
195
196 *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
197
198 done:
199 drm_gem_object_unreference_unlocked(&obj->base);
200
201 return error;
202 }
203
204 static int
205 drm_gem_cma_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr,
206 struct vm_page **pps, int npages, int centeridx, vm_prot_t access_type,
207 int flags)
208 {
209 struct vm_map_entry *entry = ufi->entry;
210 struct uvm_object *uobj = entry->object.uvm_obj;
211 struct drm_gem_object *gem_obj =
212 container_of(uobj, struct drm_gem_object, gemo_uvmobj);
213 struct drm_gem_cma_object *obj = to_drm_gem_cma_obj(gem_obj);
214 off_t curr_offset;
215 vaddr_t curr_va;
216 paddr_t paddr, mdpgno;
217 u_int mmapflags;
218 int lcv, retval;
219 vm_prot_t mapprot;
220
221 if (UVM_ET_ISCOPYONWRITE(entry))
222 return EIO;
223
224 curr_offset = entry->offset + (vaddr - entry->start);
225 curr_va = vaddr;
226
227 retval = 0;
228 for (lcv = 0; lcv < npages; lcv++, curr_offset += PAGE_SIZE,
229 curr_va += PAGE_SIZE) {
230 if ((flags & PGO_ALLPAGES) == 0 && lcv != centeridx)
231 continue;
232 if (pps[lcv] == PGO_DONTCARE)
233 continue;
234
235 mdpgno = bus_dmamem_mmap(obj->dmat, obj->dmasegs, 1,
236 curr_offset, access_type, BUS_DMA_PREFETCHABLE);
237 if (mdpgno == -1) {
238 retval = EIO;
239 break;
240 }
241 paddr = pmap_phys_address(mdpgno);
242 mmapflags = pmap_mmap_flags(mdpgno);
243 mapprot = ufi->entry->protection;
244
245 if (pmap_enter(ufi->orig_map->pmap, curr_va, paddr, mapprot,
246 PMAP_CANFAIL | mapprot | mmapflags) != 0) {
247 pmap_update(ufi->orig_map->pmap);
248 uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj);
249 uvm_wait("drm_gem_cma_fault");
250 return ERESTART;
251 }
252 }
253
254 pmap_update(ufi->orig_map->pmap);
255 uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj);
256
257 return retval;
258 }
259
260 const struct uvm_pagerops drm_gem_cma_uvm_ops = {
261 .pgo_reference = drm_gem_pager_reference,
262 .pgo_detach = drm_gem_pager_detach,
263 .pgo_fault = drm_gem_cma_fault,
264 };
265
266 struct sg_table *
267 drm_gem_cma_prime_get_sg_table(struct drm_gem_object *gem_obj)
268 {
269 struct drm_gem_cma_object *obj = to_drm_gem_cma_obj(gem_obj);
270
271 return drm_prime_bus_dmamem_to_sg(obj->dmat, obj->dmasegs, 1);
272 }
273
274 struct drm_gem_object *
275 drm_gem_cma_prime_import_sg_table(struct drm_device *ddev,
276 struct dma_buf_attachment *attach, struct sg_table *sgt)
277 {
278 size_t size = drm_prime_sg_size(sgt);
279 struct drm_gem_cma_object *obj;
280
281 obj = drm_gem_cma_create_internal(ddev, size, sgt);
282 if (obj == NULL)
283 return ERR_PTR(-ENOMEM);
284
285 return &obj->base;
286 }
287
288 void *
289 drm_gem_cma_prime_vmap(struct drm_gem_object *gem_obj)
290 {
291 struct drm_gem_cma_object *obj = to_drm_gem_cma_obj(gem_obj);
292
293 return obj->vaddr;
294 }
295
296 void
297 drm_gem_cma_prime_vunmap(struct drm_gem_object *gem_obj, void *vaddr)
298 {
299 struct drm_gem_cma_object *obj __diagused =
300 to_drm_gem_cma_obj(gem_obj);
301
302 KASSERT(vaddr == obj->vaddr);
303 }
304