drm_prime.c revision 1.14 1 1.14 riastrad /* $NetBSD: drm_prime.c,v 1.14 2021/12/19 10:38:22 riastradh Exp $ */
2 1.2 riastrad
3 1.1 riastrad /*
4 1.1 riastrad * Copyright 2012 Red Hat
5 1.1 riastrad *
6 1.1 riastrad * Permission is hereby granted, free of charge, to any person obtaining a
7 1.1 riastrad * copy of this software and associated documentation files (the "Software"),
8 1.1 riastrad * to deal in the Software without restriction, including without limitation
9 1.1 riastrad * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 1.1 riastrad * and/or sell copies of the Software, and to permit persons to whom the
11 1.1 riastrad * Software is furnished to do so, subject to the following conditions:
12 1.1 riastrad *
13 1.1 riastrad * The above copyright notice and this permission notice (including the next
14 1.1 riastrad * paragraph) shall be included in all copies or substantial portions of the
15 1.1 riastrad * Software.
16 1.1 riastrad *
17 1.1 riastrad * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 1.1 riastrad * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 1.1 riastrad * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 1.1 riastrad * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 1.1 riastrad * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 1.1 riastrad * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23 1.1 riastrad * IN THE SOFTWARE.
24 1.1 riastrad *
25 1.1 riastrad * Authors:
26 1.1 riastrad * Dave Airlie <airlied (at) redhat.com>
27 1.1 riastrad * Rob Clark <rob.clark (at) linaro.org>
28 1.1 riastrad *
29 1.1 riastrad */
30 1.1 riastrad
31 1.2 riastrad #include <sys/cdefs.h>
32 1.14 riastrad __KERNEL_RCSID(0, "$NetBSD: drm_prime.c,v 1.14 2021/12/19 10:38:22 riastradh Exp $");
33 1.2 riastrad
34 1.1 riastrad #include <linux/export.h>
35 1.1 riastrad #include <linux/dma-buf.h>
36 1.10 riastrad #include <linux/rbtree.h>
37 1.10 riastrad
38 1.10 riastrad #include <drm/drm.h>
39 1.10 riastrad #include <drm/drm_drv.h>
40 1.10 riastrad #include <drm/drm_file.h>
41 1.10 riastrad #include <drm/drm_framebuffer.h>
42 1.2 riastrad #include <drm/drm_gem.h>
43 1.10 riastrad #include <drm/drm_prime.h>
44 1.2 riastrad
45 1.2 riastrad #include "drm_internal.h"
46 1.1 riastrad
47 1.4 riastrad #ifdef __NetBSD__
48 1.4 riastrad
49 1.14 riastrad #include <sys/file.h>
50 1.14 riastrad
51 1.5 riastrad #include <drm/bus_dma_hacks.h>
52 1.5 riastrad
53 1.8 riastrad #include <linux/nbsd-namespace.h>
54 1.8 riastrad
55 1.4 riastrad /*
56 1.5 riastrad * We use struct sg_table just to pass around an array of pages from
57 1.5 riastrad * one device to another in drm prime. Since this is _not_ a complete
58 1.5 riastrad * implementation of Linux's sg table abstraction (e.g., it does not
59 1.5 riastrad * remember DMA addresses and RAM pages separately, and it doesn't
60 1.5 riastrad * support the nested chained iteration of Linux scatterlists), we
61 1.5 riastrad * isolate it to this file and make all callers go through a few extra
62 1.5 riastrad * subroutines (drm_prime_sg_size, drm_prime_sg_free, &c.) to use it.
63 1.5 riastrad * Don't use this outside drm prime!
64 1.4 riastrad */
65 1.4 riastrad
66 1.3 riastrad struct sg_table {
67 1.6 riastrad paddr_t *sgt_pgs;
68 1.5 riastrad unsigned sgt_npgs;
69 1.3 riastrad };
70 1.3 riastrad
71 1.3 riastrad static int
72 1.3 riastrad sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
73 1.3 riastrad unsigned npages, bus_size_t offset, bus_size_t size, gfp_t gfp)
74 1.3 riastrad {
75 1.3 riastrad unsigned i;
76 1.3 riastrad
77 1.3 riastrad KASSERT(offset == 0);
78 1.3 riastrad KASSERT(size == npages << PAGE_SHIFT);
79 1.3 riastrad
80 1.5 riastrad sgt->sgt_pgs = kcalloc(npages, sizeof(sgt->sgt_pgs[0]), gfp);
81 1.5 riastrad if (sgt->sgt_pgs == NULL)
82 1.3 riastrad return -ENOMEM;
83 1.5 riastrad sgt->sgt_npgs = npages;
84 1.3 riastrad
85 1.5 riastrad for (i = 0; i < npages; i++)
86 1.6 riastrad sgt->sgt_pgs[i] = VM_PAGE_TO_PHYS(&pages[i]->p_vmp);
87 1.3 riastrad
88 1.3 riastrad return 0;
89 1.3 riastrad }
90 1.3 riastrad
91 1.3 riastrad static int
92 1.5 riastrad sg_alloc_table_from_pglist(struct sg_table *sgt, const struct pglist *pglist,
93 1.3 riastrad unsigned npages, bus_size_t offset, bus_size_t size, gfp_t gfp)
94 1.3 riastrad {
95 1.3 riastrad struct vm_page *pg;
96 1.3 riastrad unsigned i;
97 1.3 riastrad
98 1.3 riastrad KASSERT(offset == 0);
99 1.3 riastrad KASSERT(size == npages << PAGE_SHIFT);
100 1.3 riastrad
101 1.5 riastrad sgt->sgt_pgs = kcalloc(npages, sizeof(sgt->sgt_pgs[0]), gfp);
102 1.5 riastrad if (sgt->sgt_pgs == NULL)
103 1.3 riastrad return -ENOMEM;
104 1.5 riastrad sgt->sgt_npgs = npages;
105 1.3 riastrad
106 1.3 riastrad i = 0;
107 1.3 riastrad TAILQ_FOREACH(pg, pglist, pageq.queue) {
108 1.3 riastrad KASSERT(i < npages);
109 1.6 riastrad sgt->sgt_pgs[i] = VM_PAGE_TO_PHYS(pg);
110 1.3 riastrad }
111 1.3 riastrad KASSERT(i == npages);
112 1.3 riastrad
113 1.3 riastrad return 0;
114 1.3 riastrad }
115 1.3 riastrad
116 1.4 riastrad static int
117 1.5 riastrad sg_alloc_table_from_bus_dmamem(struct sg_table *sgt, bus_dma_tag_t dmat,
118 1.5 riastrad const bus_dma_segment_t *segs, int nsegs, gfp_t gfp)
119 1.4 riastrad {
120 1.5 riastrad int ret;
121 1.4 riastrad
122 1.4 riastrad KASSERT(nsegs > 0);
123 1.5 riastrad sgt->sgt_pgs = kcalloc(nsegs, sizeof(sgt->sgt_pgs[0]), gfp);
124 1.5 riastrad if (sgt->sgt_pgs == NULL)
125 1.4 riastrad return -ENOMEM;
126 1.5 riastrad sgt->sgt_npgs = nsegs;
127 1.4 riastrad
128 1.5 riastrad /* XXX errno NetBSD->Linux */
129 1.5 riastrad ret = -bus_dmamem_export_pages(dmat, segs, nsegs, sgt->sgt_pgs,
130 1.5 riastrad sgt->sgt_npgs);
131 1.5 riastrad if (ret)
132 1.5 riastrad return ret;
133 1.4 riastrad
134 1.4 riastrad return 0;
135 1.4 riastrad }
136 1.4 riastrad
137 1.3 riastrad static void
138 1.3 riastrad sg_free_table(struct sg_table *sgt)
139 1.3 riastrad {
140 1.3 riastrad
141 1.5 riastrad kfree(sgt->sgt_pgs);
142 1.5 riastrad sgt->sgt_pgs = NULL;
143 1.5 riastrad sgt->sgt_npgs = 0;
144 1.3 riastrad }
145 1.3 riastrad
146 1.4 riastrad #endif /* __NetBSD__ */
147 1.4 riastrad
148 1.10 riastrad /**
149 1.10 riastrad * DOC: overview and lifetime rules
150 1.1 riastrad *
151 1.10 riastrad * Similar to GEM global names, PRIME file descriptors are also used to share
152 1.10 riastrad * buffer objects across processes. They offer additional security: as file
153 1.10 riastrad * descriptors must be explicitly sent over UNIX domain sockets to be shared
154 1.10 riastrad * between applications, they can't be guessed like the globally unique GEM
155 1.10 riastrad * names.
156 1.10 riastrad *
157 1.10 riastrad * Drivers that support the PRIME API implement the
158 1.10 riastrad * &drm_driver.prime_handle_to_fd and &drm_driver.prime_fd_to_handle operations.
159 1.10 riastrad * GEM based drivers must use drm_gem_prime_handle_to_fd() and
160 1.10 riastrad * drm_gem_prime_fd_to_handle() to implement these. For GEM based drivers the
161 1.10 riastrad * actual driver interfaces is provided through the &drm_gem_object_funcs.export
162 1.10 riastrad * and &drm_driver.gem_prime_import hooks.
163 1.10 riastrad *
164 1.10 riastrad * &dma_buf_ops implementations for GEM drivers are all individually exported
165 1.10 riastrad * for drivers which need to overwrite or reimplement some of them.
166 1.10 riastrad *
167 1.10 riastrad * Reference Counting for GEM Drivers
168 1.10 riastrad * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
169 1.10 riastrad *
170 1.10 riastrad * On the export the &dma_buf holds a reference to the exported buffer object,
171 1.10 riastrad * usually a &drm_gem_object. It takes this reference in the PRIME_HANDLE_TO_FD
172 1.10 riastrad * IOCTL, when it first calls &drm_gem_object_funcs.export
173 1.10 riastrad * and stores the exporting GEM object in the &dma_buf.priv field. This
174 1.10 riastrad * reference needs to be released when the final reference to the &dma_buf
175 1.10 riastrad * itself is dropped and its &dma_buf_ops.release function is called. For
176 1.10 riastrad * GEM-based drivers, the &dma_buf should be exported using
177 1.10 riastrad * drm_gem_dmabuf_export() and then released by drm_gem_dmabuf_release().
178 1.10 riastrad *
179 1.10 riastrad * Thus the chain of references always flows in one direction, avoiding loops:
180 1.10 riastrad * importing GEM object -> dma-buf -> exported GEM bo. A further complication
181 1.10 riastrad * are the lookup caches for import and export. These are required to guarantee
182 1.10 riastrad * that any given object will always have only one uniqe userspace handle. This
183 1.10 riastrad * is required to allow userspace to detect duplicated imports, since some GEM
184 1.10 riastrad * drivers do fail command submissions if a given buffer object is listed more
185 1.10 riastrad * than once. These import and export caches in &drm_prime_file_private only
186 1.10 riastrad * retain a weak reference, which is cleaned up when the corresponding object is
187 1.10 riastrad * released.
188 1.10 riastrad *
189 1.10 riastrad * Self-importing: If userspace is using PRIME as a replacement for flink then
190 1.10 riastrad * it will get a fd->handle request for a GEM object that it created. Drivers
191 1.10 riastrad * should detect this situation and return back the underlying object from the
192 1.10 riastrad * dma-buf private. For GEM based drivers this is handled in
193 1.10 riastrad * drm_gem_prime_import() already.
194 1.1 riastrad */
195 1.1 riastrad
196 1.1 riastrad struct drm_prime_member {
197 1.1 riastrad struct dma_buf *dma_buf;
198 1.1 riastrad uint32_t handle;
199 1.1 riastrad
200 1.10 riastrad struct rb_node dmabuf_rb;
201 1.10 riastrad struct rb_node handle_rb;
202 1.2 riastrad };
203 1.2 riastrad
204 1.11 riastrad #ifdef __NetBSD__
205 1.11 riastrad static int
206 1.11 riastrad compare_dmabufs(void *cookie, const void *va, const void *vb)
207 1.11 riastrad {
208 1.11 riastrad const struct drm_prime_member *ma = va;
209 1.11 riastrad const struct drm_prime_member *mb = vb;
210 1.11 riastrad
211 1.11 riastrad if (ma->dma_buf < mb->dma_buf)
212 1.11 riastrad return -1;
213 1.11 riastrad if (ma->dma_buf > mb->dma_buf)
214 1.11 riastrad return +1;
215 1.11 riastrad return 0;
216 1.11 riastrad }
217 1.11 riastrad
218 1.11 riastrad static int
219 1.11 riastrad compare_dmabuf_key(void *cookie, const void *vm, const void *vk)
220 1.11 riastrad {
221 1.11 riastrad const struct drm_prime_member *m = vm;
222 1.11 riastrad const struct dma_buf *const *kp = vk;
223 1.11 riastrad
224 1.11 riastrad if (m->dma_buf < *kp)
225 1.11 riastrad return -1;
226 1.11 riastrad if (m->dma_buf > *kp)
227 1.11 riastrad return +1;
228 1.11 riastrad return 0;
229 1.11 riastrad }
230 1.11 riastrad
231 1.11 riastrad static int
232 1.11 riastrad compare_handles(void *cookie, const void *va, const void *vb)
233 1.11 riastrad {
234 1.11 riastrad const struct drm_prime_member *ma = va;
235 1.11 riastrad const struct drm_prime_member *mb = vb;
236 1.11 riastrad
237 1.11 riastrad if (ma->handle < mb->handle)
238 1.11 riastrad return -1;
239 1.11 riastrad if (ma->handle > mb->handle)
240 1.11 riastrad return +1;
241 1.11 riastrad return 0;
242 1.11 riastrad }
243 1.11 riastrad
244 1.11 riastrad static int
245 1.11 riastrad compare_handle_key(void *cookie, const void *vm, const void *vk)
246 1.11 riastrad {
247 1.11 riastrad const struct drm_prime_member *m = vm;
248 1.11 riastrad const uint32_t *kp = vk;
249 1.11 riastrad
250 1.11 riastrad if (m->handle < *kp)
251 1.11 riastrad return -1;
252 1.11 riastrad if (m->handle > *kp)
253 1.11 riastrad return +1;
254 1.11 riastrad return 0;
255 1.11 riastrad }
256 1.11 riastrad
257 1.11 riastrad static const rb_tree_ops_t dmabuf_ops = {
258 1.11 riastrad .rbto_compare_nodes = compare_dmabufs,
259 1.11 riastrad .rbto_compare_key = compare_dmabuf_key,
260 1.11 riastrad .rbto_node_offset = offsetof(struct drm_prime_member, dmabuf_rb),
261 1.11 riastrad };
262 1.11 riastrad
263 1.11 riastrad static const rb_tree_ops_t handle_ops = {
264 1.11 riastrad .rbto_compare_nodes = compare_handles,
265 1.11 riastrad .rbto_compare_key = compare_handle_key,
266 1.11 riastrad .rbto_node_offset = offsetof(struct drm_prime_member, handle_rb),
267 1.11 riastrad };
268 1.11 riastrad #endif
269 1.11 riastrad
270 1.2 riastrad static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
271 1.2 riastrad struct dma_buf *dma_buf, uint32_t handle)
272 1.2 riastrad {
273 1.2 riastrad struct drm_prime_member *member;
274 1.11 riastrad #ifdef __NetBSD__
275 1.11 riastrad struct drm_prime_member *collision __diagused;
276 1.11 riastrad #else
277 1.10 riastrad struct rb_node **p, *rb;
278 1.11 riastrad #endif
279 1.2 riastrad
280 1.2 riastrad member = kmalloc(sizeof(*member), GFP_KERNEL);
281 1.2 riastrad if (!member)
282 1.2 riastrad return -ENOMEM;
283 1.2 riastrad
284 1.2 riastrad get_dma_buf(dma_buf);
285 1.2 riastrad member->dma_buf = dma_buf;
286 1.2 riastrad member->handle = handle;
287 1.10 riastrad
288 1.11 riastrad #ifdef __NetBSD__
289 1.11 riastrad collision = rb_tree_insert_node(&prime_fpriv->dmabufs.rbr_tree,
290 1.11 riastrad member);
291 1.11 riastrad KASSERT(collision == NULL);
292 1.11 riastrad #else
293 1.10 riastrad rb = NULL;
294 1.10 riastrad p = &prime_fpriv->dmabufs.rb_node;
295 1.10 riastrad while (*p) {
296 1.10 riastrad struct drm_prime_member *pos;
297 1.10 riastrad
298 1.10 riastrad rb = *p;
299 1.10 riastrad pos = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
300 1.10 riastrad if (dma_buf > pos->dma_buf)
301 1.10 riastrad p = &rb->rb_right;
302 1.10 riastrad else
303 1.10 riastrad p = &rb->rb_left;
304 1.10 riastrad }
305 1.10 riastrad rb_link_node(&member->dmabuf_rb, rb, p);
306 1.10 riastrad rb_insert_color(&member->dmabuf_rb, &prime_fpriv->dmabufs);
307 1.11 riastrad #endif
308 1.10 riastrad
309 1.11 riastrad #ifdef __NetBSD__
310 1.11 riastrad collision = rb_tree_insert_node(&prime_fpriv->handles.rbr_tree,
311 1.11 riastrad member);
312 1.11 riastrad KASSERT(collision == NULL);
313 1.11 riastrad #else
314 1.10 riastrad rb = NULL;
315 1.10 riastrad p = &prime_fpriv->handles.rb_node;
316 1.10 riastrad while (*p) {
317 1.10 riastrad struct drm_prime_member *pos;
318 1.10 riastrad
319 1.10 riastrad rb = *p;
320 1.10 riastrad pos = rb_entry(rb, struct drm_prime_member, handle_rb);
321 1.10 riastrad if (handle > pos->handle)
322 1.10 riastrad p = &rb->rb_right;
323 1.10 riastrad else
324 1.10 riastrad p = &rb->rb_left;
325 1.10 riastrad }
326 1.10 riastrad rb_link_node(&member->handle_rb, rb, p);
327 1.10 riastrad rb_insert_color(&member->handle_rb, &prime_fpriv->handles);
328 1.11 riastrad #endif
329 1.10 riastrad
330 1.2 riastrad return 0;
331 1.2 riastrad }
332 1.2 riastrad
333 1.2 riastrad static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv,
334 1.2 riastrad uint32_t handle)
335 1.2 riastrad {
336 1.11 riastrad #ifdef __NetBSD__
337 1.11 riastrad return rb_tree_find_node(&prime_fpriv->handles.rbr_tree, &handle);
338 1.11 riastrad #else
339 1.10 riastrad struct rb_node *rb;
340 1.10 riastrad
341 1.10 riastrad rb = prime_fpriv->handles.rb_node;
342 1.10 riastrad while (rb) {
343 1.10 riastrad struct drm_prime_member *member;
344 1.2 riastrad
345 1.10 riastrad member = rb_entry(rb, struct drm_prime_member, handle_rb);
346 1.2 riastrad if (member->handle == handle)
347 1.2 riastrad return member->dma_buf;
348 1.10 riastrad else if (member->handle < handle)
349 1.10 riastrad rb = rb->rb_right;
350 1.10 riastrad else
351 1.10 riastrad rb = rb->rb_left;
352 1.2 riastrad }
353 1.2 riastrad
354 1.2 riastrad return NULL;
355 1.11 riastrad #endif
356 1.2 riastrad }
357 1.2 riastrad
358 1.2 riastrad static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv,
359 1.2 riastrad struct dma_buf *dma_buf,
360 1.2 riastrad uint32_t *handle)
361 1.2 riastrad {
362 1.11 riastrad #ifdef __NetBSD__
363 1.11 riastrad struct drm_prime_member *member;
364 1.11 riastrad
365 1.11 riastrad member = rb_tree_find_node(&prime_fpriv->dmabufs.rbr_tree, &dma_buf);
366 1.11 riastrad if (member == NULL)
367 1.11 riastrad return -ENOENT;
368 1.11 riastrad *handle = member->handle;
369 1.11 riastrad return 0;
370 1.11 riastrad #else
371 1.10 riastrad struct rb_node *rb;
372 1.10 riastrad
373 1.10 riastrad rb = prime_fpriv->dmabufs.rb_node;
374 1.10 riastrad while (rb) {
375 1.10 riastrad struct drm_prime_member *member;
376 1.2 riastrad
377 1.10 riastrad member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
378 1.2 riastrad if (member->dma_buf == dma_buf) {
379 1.2 riastrad *handle = member->handle;
380 1.2 riastrad return 0;
381 1.10 riastrad } else if (member->dma_buf < dma_buf) {
382 1.10 riastrad rb = rb->rb_right;
383 1.10 riastrad } else {
384 1.10 riastrad rb = rb->rb_left;
385 1.2 riastrad }
386 1.2 riastrad }
387 1.10 riastrad
388 1.2 riastrad return -ENOENT;
389 1.11 riastrad #endif
390 1.2 riastrad }
391 1.2 riastrad
392 1.2 riastrad void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
393 1.2 riastrad struct dma_buf *dma_buf)
394 1.2 riastrad {
395 1.11 riastrad #ifdef __NetBSD__
396 1.11 riastrad struct drm_prime_member *member;
397 1.11 riastrad
398 1.11 riastrad member = rb_tree_find_node(&prime_fpriv->dmabufs.rbr_tree, &dma_buf);
399 1.11 riastrad if (member != NULL) {
400 1.11 riastrad rb_tree_remove_node(&prime_fpriv->handles.rbr_tree, member);
401 1.11 riastrad rb_tree_remove_node(&prime_fpriv->dmabufs.rbr_tree, member);
402 1.11 riastrad }
403 1.11 riastrad #else
404 1.10 riastrad struct rb_node *rb;
405 1.2 riastrad
406 1.10 riastrad rb = prime_fpriv->dmabufs.rb_node;
407 1.10 riastrad while (rb) {
408 1.10 riastrad struct drm_prime_member *member;
409 1.10 riastrad
410 1.10 riastrad member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
411 1.2 riastrad if (member->dma_buf == dma_buf) {
412 1.10 riastrad rb_erase(&member->handle_rb, &prime_fpriv->handles);
413 1.10 riastrad rb_erase(&member->dmabuf_rb, &prime_fpriv->dmabufs);
414 1.10 riastrad
415 1.2 riastrad dma_buf_put(dma_buf);
416 1.2 riastrad kfree(member);
417 1.10 riastrad return;
418 1.10 riastrad } else if (member->dma_buf < dma_buf) {
419 1.10 riastrad rb = rb->rb_right;
420 1.10 riastrad } else {
421 1.10 riastrad rb = rb->rb_left;
422 1.2 riastrad }
423 1.2 riastrad }
424 1.11 riastrad #endif
425 1.2 riastrad }
426 1.2 riastrad
427 1.10 riastrad void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
428 1.2 riastrad {
429 1.11 riastrad #ifdef __NetBSD__
430 1.11 riastrad linux_mutex_init(&prime_fpriv->lock);
431 1.11 riastrad #else
432 1.10 riastrad mutex_init(&prime_fpriv->lock);
433 1.11 riastrad #endif
434 1.11 riastrad #ifdef __NetBSD__
435 1.11 riastrad rb_tree_init(&prime_fpriv->dmabufs.rbr_tree, &dmabuf_ops);
436 1.11 riastrad rb_tree_init(&prime_fpriv->handles.rbr_tree, &handle_ops);
437 1.11 riastrad #else
438 1.10 riastrad prime_fpriv->dmabufs = RB_ROOT;
439 1.10 riastrad prime_fpriv->handles = RB_ROOT;
440 1.11 riastrad #endif
441 1.10 riastrad }
442 1.2 riastrad
443 1.10 riastrad void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
444 1.10 riastrad {
445 1.11 riastrad #ifdef __NetBSD__ /* XXX post-merge linux doesn't destroy it's lock now? */
446 1.11 riastrad linux_mutex_destroy(&prime_fpriv->lock);
447 1.11 riastrad #endif
448 1.10 riastrad /* by now drm_gem_release should've made sure the list is empty */
449 1.10 riastrad WARN_ON(!RB_EMPTY_ROOT(&prime_fpriv->dmabufs));
450 1.10 riastrad }
451 1.2 riastrad
452 1.10 riastrad /**
453 1.10 riastrad * drm_gem_dmabuf_export - &dma_buf export implementation for GEM
454 1.10 riastrad * @dev: parent device for the exported dmabuf
455 1.10 riastrad * @exp_info: the export information used by dma_buf_export()
456 1.10 riastrad *
457 1.10 riastrad * This wraps dma_buf_export() for use by generic GEM drivers that are using
458 1.10 riastrad * drm_gem_dmabuf_release(). In addition to calling dma_buf_export(), we take
459 1.10 riastrad * a reference to the &drm_device and the exported &drm_gem_object (stored in
460 1.10 riastrad * &dma_buf_export_info.priv) which is released by drm_gem_dmabuf_release().
461 1.10 riastrad *
462 1.10 riastrad * Returns the new dmabuf.
463 1.10 riastrad */
464 1.10 riastrad struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
465 1.10 riastrad struct dma_buf_export_info *exp_info)
466 1.10 riastrad {
467 1.10 riastrad struct drm_gem_object *obj = exp_info->priv;
468 1.10 riastrad struct dma_buf *dma_buf;
469 1.2 riastrad
470 1.10 riastrad dma_buf = dma_buf_export(exp_info);
471 1.10 riastrad if (IS_ERR(dma_buf))
472 1.10 riastrad return dma_buf;
473 1.2 riastrad
474 1.10 riastrad drm_dev_get(dev);
475 1.10 riastrad drm_gem_object_get(obj);
476 1.14 riastrad #ifndef __NetBSD__ /* XXX dmabuf share */
477 1.10 riastrad dma_buf->file->f_mapping = obj->dev->anon_inode->i_mapping;
478 1.14 riastrad #endif
479 1.2 riastrad
480 1.10 riastrad return dma_buf;
481 1.2 riastrad }
482 1.10 riastrad EXPORT_SYMBOL(drm_gem_dmabuf_export);
483 1.2 riastrad
484 1.2 riastrad /**
485 1.10 riastrad * drm_gem_dmabuf_release - &dma_buf release implementation for GEM
486 1.2 riastrad * @dma_buf: buffer to be released
487 1.2 riastrad *
488 1.2 riastrad * Generic release function for dma_bufs exported as PRIME buffers. GEM drivers
489 1.10 riastrad * must use this in their &dma_buf_ops structure as the release callback.
490 1.10 riastrad * drm_gem_dmabuf_release() should be used in conjunction with
491 1.10 riastrad * drm_gem_dmabuf_export().
492 1.2 riastrad */
493 1.2 riastrad void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
494 1.2 riastrad {
495 1.2 riastrad struct drm_gem_object *obj = dma_buf->priv;
496 1.10 riastrad struct drm_device *dev = obj->dev;
497 1.2 riastrad
498 1.2 riastrad /* drop the reference on the export fd holds */
499 1.10 riastrad drm_gem_object_put_unlocked(obj);
500 1.10 riastrad
501 1.10 riastrad drm_dev_put(dev);
502 1.2 riastrad }
503 1.2 riastrad EXPORT_SYMBOL(drm_gem_dmabuf_release);
504 1.2 riastrad
505 1.10 riastrad /**
506 1.10 riastrad * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers
507 1.10 riastrad * @dev: dev to export the buffer from
508 1.10 riastrad * @file_priv: drm file-private structure
509 1.10 riastrad * @prime_fd: fd id of the dma-buf which should be imported
510 1.10 riastrad * @handle: pointer to storage for the handle of the imported buffer object
511 1.10 riastrad *
512 1.10 riastrad * This is the PRIME import function which must be used mandatorily by GEM
513 1.10 riastrad * drivers to ensure correct lifetime management of the underlying GEM object.
514 1.10 riastrad * The actual importing of GEM object from the dma-buf is done through the
515 1.10 riastrad * &drm_driver.gem_prime_import driver callback.
516 1.10 riastrad *
517 1.10 riastrad * Returns 0 on success or a negative error code on failure.
518 1.10 riastrad */
519 1.10 riastrad int drm_gem_prime_fd_to_handle(struct drm_device *dev,
520 1.10 riastrad struct drm_file *file_priv, int prime_fd,
521 1.10 riastrad uint32_t *handle)
522 1.2 riastrad {
523 1.10 riastrad struct dma_buf *dma_buf;
524 1.10 riastrad struct drm_gem_object *obj;
525 1.10 riastrad int ret;
526 1.2 riastrad
527 1.10 riastrad dma_buf = dma_buf_get(prime_fd);
528 1.10 riastrad if (IS_ERR(dma_buf))
529 1.10 riastrad return PTR_ERR(dma_buf);
530 1.2 riastrad
531 1.10 riastrad mutex_lock(&file_priv->prime.lock);
532 1.2 riastrad
533 1.10 riastrad ret = drm_prime_lookup_buf_handle(&file_priv->prime,
534 1.10 riastrad dma_buf, handle);
535 1.10 riastrad if (ret == 0)
536 1.10 riastrad goto out_put;
537 1.2 riastrad
538 1.10 riastrad /* never seen this one, need to import */
539 1.10 riastrad mutex_lock(&dev->object_name_lock);
540 1.10 riastrad if (dev->driver->gem_prime_import)
541 1.10 riastrad obj = dev->driver->gem_prime_import(dev, dma_buf);
542 1.10 riastrad else
543 1.10 riastrad obj = drm_gem_prime_import(dev, dma_buf);
544 1.10 riastrad if (IS_ERR(obj)) {
545 1.10 riastrad ret = PTR_ERR(obj);
546 1.10 riastrad goto out_unlock;
547 1.10 riastrad }
548 1.2 riastrad
549 1.10 riastrad if (obj->dma_buf) {
550 1.10 riastrad WARN_ON(obj->dma_buf != dma_buf);
551 1.10 riastrad } else {
552 1.10 riastrad obj->dma_buf = dma_buf;
553 1.10 riastrad get_dma_buf(dma_buf);
554 1.10 riastrad }
555 1.2 riastrad
556 1.10 riastrad /* _handle_create_tail unconditionally unlocks dev->object_name_lock. */
557 1.10 riastrad ret = drm_gem_handle_create_tail(file_priv, obj, handle);
558 1.10 riastrad drm_gem_object_put_unlocked(obj);
559 1.10 riastrad if (ret)
560 1.10 riastrad goto out_put;
561 1.2 riastrad
562 1.10 riastrad ret = drm_prime_add_buf_handle(&file_priv->prime,
563 1.10 riastrad dma_buf, *handle);
564 1.10 riastrad mutex_unlock(&file_priv->prime.lock);
565 1.10 riastrad if (ret)
566 1.10 riastrad goto fail;
567 1.2 riastrad
568 1.10 riastrad dma_buf_put(dma_buf);
569 1.2 riastrad
570 1.10 riastrad return 0;
571 1.2 riastrad
572 1.10 riastrad fail:
573 1.10 riastrad /* hmm, if driver attached, we are relying on the free-object path
574 1.10 riastrad * to detach.. which seems ok..
575 1.10 riastrad */
576 1.10 riastrad drm_gem_handle_delete(file_priv, *handle);
577 1.10 riastrad dma_buf_put(dma_buf);
578 1.10 riastrad return ret;
579 1.2 riastrad
580 1.10 riastrad out_unlock:
581 1.10 riastrad mutex_unlock(&dev->object_name_lock);
582 1.10 riastrad out_put:
583 1.10 riastrad mutex_unlock(&file_priv->prime.lock);
584 1.10 riastrad dma_buf_put(dma_buf);
585 1.10 riastrad return ret;
586 1.2 riastrad }
587 1.10 riastrad EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
588 1.2 riastrad
589 1.10 riastrad int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
590 1.10 riastrad struct drm_file *file_priv)
591 1.2 riastrad {
592 1.10 riastrad struct drm_prime_handle *args = data;
593 1.2 riastrad
594 1.10 riastrad if (!dev->driver->prime_fd_to_handle)
595 1.10 riastrad return -ENOSYS;
596 1.2 riastrad
597 1.10 riastrad return dev->driver->prime_fd_to_handle(dev, file_priv,
598 1.10 riastrad args->fd, &args->handle);
599 1.2 riastrad }
600 1.2 riastrad
601 1.2 riastrad static struct dma_buf *export_and_register_object(struct drm_device *dev,
602 1.2 riastrad struct drm_gem_object *obj,
603 1.2 riastrad uint32_t flags)
604 1.2 riastrad {
605 1.2 riastrad struct dma_buf *dmabuf;
606 1.2 riastrad
607 1.2 riastrad /* prevent races with concurrent gem_close. */
608 1.2 riastrad if (obj->handle_count == 0) {
609 1.2 riastrad dmabuf = ERR_PTR(-ENOENT);
610 1.2 riastrad return dmabuf;
611 1.2 riastrad }
612 1.2 riastrad
613 1.10 riastrad if (obj->funcs && obj->funcs->export)
614 1.10 riastrad dmabuf = obj->funcs->export(obj, flags);
615 1.10 riastrad else if (dev->driver->gem_prime_export)
616 1.10 riastrad dmabuf = dev->driver->gem_prime_export(obj, flags);
617 1.10 riastrad else
618 1.10 riastrad dmabuf = drm_gem_prime_export(obj, flags);
619 1.2 riastrad if (IS_ERR(dmabuf)) {
620 1.2 riastrad /* normally the created dma-buf takes ownership of the ref,
621 1.2 riastrad * but if that fails then drop the ref
622 1.2 riastrad */
623 1.2 riastrad return dmabuf;
624 1.2 riastrad }
625 1.2 riastrad
626 1.2 riastrad /*
627 1.2 riastrad * Note that callers do not need to clean up the export cache
628 1.2 riastrad * since the check for obj->handle_count guarantees that someone
629 1.2 riastrad * will clean it up.
630 1.2 riastrad */
631 1.2 riastrad obj->dma_buf = dmabuf;
632 1.2 riastrad get_dma_buf(obj->dma_buf);
633 1.2 riastrad
634 1.2 riastrad return dmabuf;
635 1.2 riastrad }
636 1.2 riastrad
637 1.2 riastrad /**
638 1.2 riastrad * drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers
639 1.2 riastrad * @dev: dev to export the buffer from
640 1.2 riastrad * @file_priv: drm file-private structure
641 1.2 riastrad * @handle: buffer handle to export
642 1.2 riastrad * @flags: flags like DRM_CLOEXEC
643 1.2 riastrad * @prime_fd: pointer to storage for the fd id of the create dma-buf
644 1.2 riastrad *
645 1.2 riastrad * This is the PRIME export function which must be used mandatorily by GEM
646 1.2 riastrad * drivers to ensure correct lifetime management of the underlying GEM object.
647 1.2 riastrad * The actual exporting from GEM object to a dma-buf is done through the
648 1.10 riastrad * &drm_driver.gem_prime_export driver callback.
649 1.2 riastrad */
650 1.1 riastrad int drm_gem_prime_handle_to_fd(struct drm_device *dev,
651 1.2 riastrad struct drm_file *file_priv, uint32_t handle,
652 1.2 riastrad uint32_t flags,
653 1.2 riastrad int *prime_fd)
654 1.1 riastrad {
655 1.1 riastrad struct drm_gem_object *obj;
656 1.2 riastrad int ret = 0;
657 1.2 riastrad struct dma_buf *dmabuf;
658 1.1 riastrad
659 1.2 riastrad mutex_lock(&file_priv->prime.lock);
660 1.10 riastrad obj = drm_gem_object_lookup(file_priv, handle);
661 1.2 riastrad if (!obj) {
662 1.2 riastrad ret = -ENOENT;
663 1.2 riastrad goto out_unlock;
664 1.2 riastrad }
665 1.2 riastrad
666 1.2 riastrad dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle);
667 1.2 riastrad if (dmabuf) {
668 1.2 riastrad get_dma_buf(dmabuf);
669 1.2 riastrad goto out_have_handle;
670 1.2 riastrad }
671 1.1 riastrad
672 1.2 riastrad mutex_lock(&dev->object_name_lock);
673 1.1 riastrad /* re-export the original imported object */
674 1.1 riastrad if (obj->import_attach) {
675 1.2 riastrad dmabuf = obj->import_attach->dmabuf;
676 1.2 riastrad get_dma_buf(dmabuf);
677 1.2 riastrad goto out_have_obj;
678 1.2 riastrad }
679 1.2 riastrad
680 1.2 riastrad if (obj->dma_buf) {
681 1.2 riastrad get_dma_buf(obj->dma_buf);
682 1.2 riastrad dmabuf = obj->dma_buf;
683 1.2 riastrad goto out_have_obj;
684 1.2 riastrad }
685 1.2 riastrad
686 1.2 riastrad dmabuf = export_and_register_object(dev, obj, flags);
687 1.2 riastrad if (IS_ERR(dmabuf)) {
688 1.2 riastrad /* normally the created dma-buf takes ownership of the ref,
689 1.2 riastrad * but if that fails then drop the ref
690 1.2 riastrad */
691 1.2 riastrad ret = PTR_ERR(dmabuf);
692 1.2 riastrad mutex_unlock(&dev->object_name_lock);
693 1.2 riastrad goto out;
694 1.1 riastrad }
695 1.1 riastrad
696 1.2 riastrad out_have_obj:
697 1.2 riastrad /*
698 1.2 riastrad * If we've exported this buffer then cheat and add it to the import list
699 1.2 riastrad * so we get the correct handle back. We must do this under the
700 1.2 riastrad * protection of dev->object_name_lock to ensure that a racing gem close
701 1.2 riastrad * ioctl doesn't miss to remove this buffer handle from the cache.
702 1.2 riastrad */
703 1.2 riastrad ret = drm_prime_add_buf_handle(&file_priv->prime,
704 1.2 riastrad dmabuf, handle);
705 1.2 riastrad mutex_unlock(&dev->object_name_lock);
706 1.2 riastrad if (ret)
707 1.2 riastrad goto fail_put_dmabuf;
708 1.2 riastrad
709 1.2 riastrad out_have_handle:
710 1.2 riastrad ret = dma_buf_fd(dmabuf, flags);
711 1.2 riastrad /*
712 1.2 riastrad * We must _not_ remove the buffer from the handle cache since the newly
713 1.2 riastrad * created dma buf is already linked in the global obj->dma_buf pointer,
714 1.2 riastrad * and that is invariant as long as a userspace gem handle exists.
715 1.2 riastrad * Closing the handle will clean out the cache anyway, so we don't leak.
716 1.2 riastrad */
717 1.2 riastrad if (ret < 0) {
718 1.2 riastrad goto fail_put_dmabuf;
719 1.1 riastrad } else {
720 1.2 riastrad *prime_fd = ret;
721 1.2 riastrad ret = 0;
722 1.2 riastrad }
723 1.2 riastrad
724 1.2 riastrad goto out;
725 1.2 riastrad
726 1.2 riastrad fail_put_dmabuf:
727 1.2 riastrad dma_buf_put(dmabuf);
728 1.2 riastrad out:
729 1.10 riastrad drm_gem_object_put_unlocked(obj);
730 1.2 riastrad out_unlock:
731 1.2 riastrad mutex_unlock(&file_priv->prime.lock);
732 1.2 riastrad
733 1.2 riastrad return ret;
734 1.2 riastrad }
735 1.2 riastrad EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
736 1.2 riastrad
737 1.10 riastrad int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
738 1.10 riastrad struct drm_file *file_priv)
739 1.10 riastrad {
740 1.10 riastrad struct drm_prime_handle *args = data;
741 1.10 riastrad
742 1.10 riastrad if (!dev->driver->prime_handle_to_fd)
743 1.10 riastrad return -ENOSYS;
744 1.10 riastrad
745 1.10 riastrad /* check flags are valid */
746 1.10 riastrad if (args->flags & ~(DRM_CLOEXEC | DRM_RDWR))
747 1.10 riastrad return -EINVAL;
748 1.10 riastrad
749 1.10 riastrad return dev->driver->prime_handle_to_fd(dev, file_priv,
750 1.10 riastrad args->handle, args->flags, &args->fd);
751 1.10 riastrad }
752 1.10 riastrad
753 1.10 riastrad /**
754 1.10 riastrad * DOC: PRIME Helpers
755 1.10 riastrad *
756 1.10 riastrad * Drivers can implement &drm_gem_object_funcs.export and
757 1.10 riastrad * &drm_driver.gem_prime_import in terms of simpler APIs by using the helper
758 1.10 riastrad * functions drm_gem_prime_export() and drm_gem_prime_import(). These functions
759 1.10 riastrad * implement dma-buf support in terms of some lower-level helpers, which are
760 1.10 riastrad * again exported for drivers to use individually:
761 1.10 riastrad *
762 1.10 riastrad * Exporting buffers
763 1.10 riastrad * ~~~~~~~~~~~~~~~~~
764 1.10 riastrad *
765 1.10 riastrad * Optional pinning of buffers is handled at dma-buf attach and detach time in
766 1.10 riastrad * drm_gem_map_attach() and drm_gem_map_detach(). Backing storage itself is
767 1.10 riastrad * handled by drm_gem_map_dma_buf() and drm_gem_unmap_dma_buf(), which relies on
768 1.10 riastrad * &drm_gem_object_funcs.get_sg_table.
769 1.10 riastrad *
770 1.10 riastrad * For kernel-internal access there's drm_gem_dmabuf_vmap() and
771 1.10 riastrad * drm_gem_dmabuf_vunmap(). Userspace mmap support is provided by
772 1.10 riastrad * drm_gem_dmabuf_mmap().
773 1.10 riastrad *
774 1.10 riastrad * Note that these export helpers can only be used if the underlying backing
775 1.10 riastrad * storage is fully coherent and either permanently pinned, or it is safe to pin
776 1.10 riastrad * it indefinitely.
777 1.10 riastrad *
778 1.10 riastrad * FIXME: The underlying helper functions are named rather inconsistently.
779 1.10 riastrad *
780 1.10 riastrad * Exporting buffers
781 1.10 riastrad * ~~~~~~~~~~~~~~~~~
782 1.10 riastrad *
783 1.10 riastrad * Importing dma-bufs using drm_gem_prime_import() relies on
784 1.10 riastrad * &drm_driver.gem_prime_import_sg_table.
785 1.10 riastrad *
786 1.10 riastrad * Note that similarly to the export helpers this permanently pins the
787 1.10 riastrad * underlying backing storage. Which is ok for scanout, but is not the best
788 1.10 riastrad * option for sharing lots of buffers for rendering.
789 1.10 riastrad */
790 1.10 riastrad
791 1.2 riastrad /**
792 1.10 riastrad * drm_gem_map_attach - dma_buf attach implementation for GEM
793 1.10 riastrad * @dma_buf: buffer to attach device to
794 1.10 riastrad * @attach: buffer attachment data
795 1.10 riastrad *
796 1.10 riastrad * Calls &drm_gem_object_funcs.pin for device specific handling. This can be
797 1.10 riastrad * used as the &dma_buf_ops.attach callback. Must be used together with
798 1.10 riastrad * drm_gem_map_detach().
799 1.2 riastrad *
800 1.10 riastrad * Returns 0 on success, negative error code on failure.
801 1.10 riastrad */
802 1.10 riastrad int drm_gem_map_attach(struct dma_buf *dma_buf,
803 1.10 riastrad struct dma_buf_attachment *attach)
804 1.10 riastrad {
805 1.10 riastrad struct drm_gem_object *obj = dma_buf->priv;
806 1.10 riastrad
807 1.10 riastrad return drm_gem_pin(obj);
808 1.10 riastrad }
809 1.10 riastrad EXPORT_SYMBOL(drm_gem_map_attach);
810 1.10 riastrad
811 1.10 riastrad /**
812 1.10 riastrad * drm_gem_map_detach - dma_buf detach implementation for GEM
813 1.10 riastrad * @dma_buf: buffer to detach from
814 1.10 riastrad * @attach: attachment to be detached
815 1.10 riastrad *
816 1.10 riastrad * Calls &drm_gem_object_funcs.pin for device specific handling. Cleans up
817 1.10 riastrad * &dma_buf_attachment from drm_gem_map_attach(). This can be used as the
818 1.10 riastrad * &dma_buf_ops.detach callback.
819 1.2 riastrad */
820 1.10 riastrad void drm_gem_map_detach(struct dma_buf *dma_buf,
821 1.10 riastrad struct dma_buf_attachment *attach)
822 1.2 riastrad {
823 1.10 riastrad struct drm_gem_object *obj = dma_buf->priv;
824 1.2 riastrad
825 1.10 riastrad drm_gem_unpin(obj);
826 1.10 riastrad }
827 1.10 riastrad EXPORT_SYMBOL(drm_gem_map_detach);
828 1.10 riastrad
829 1.10 riastrad /**
830 1.10 riastrad * drm_gem_map_dma_buf - map_dma_buf implementation for GEM
831 1.10 riastrad * @attach: attachment whose scatterlist is to be returned
832 1.10 riastrad * @dir: direction of DMA transfer
833 1.10 riastrad *
834 1.10 riastrad * Calls &drm_gem_object_funcs.get_sg_table and then maps the scatterlist. This
835 1.10 riastrad * can be used as the &dma_buf_ops.map_dma_buf callback. Should be used together
836 1.10 riastrad * with drm_gem_unmap_dma_buf().
837 1.10 riastrad *
838 1.10 riastrad * Returns:sg_table containing the scatterlist to be returned; returns ERR_PTR
839 1.10 riastrad * on error. May return -EINTR if it is interrupted by a signal.
840 1.10 riastrad */
841 1.10 riastrad struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
842 1.10 riastrad enum dma_data_direction dir)
843 1.10 riastrad {
844 1.10 riastrad struct drm_gem_object *obj = attach->dmabuf->priv;
845 1.10 riastrad struct sg_table *sgt;
846 1.2 riastrad
847 1.10 riastrad if (WARN_ON(dir == DMA_NONE))
848 1.2 riastrad return ERR_PTR(-EINVAL);
849 1.2 riastrad
850 1.10 riastrad if (obj->funcs)
851 1.10 riastrad sgt = obj->funcs->get_sg_table(obj);
852 1.10 riastrad else
853 1.10 riastrad sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
854 1.2 riastrad
855 1.14 riastrad #ifndef __NetBSD__ /* We map/unmap elsewhere. */
856 1.10 riastrad if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
857 1.10 riastrad DMA_ATTR_SKIP_CPU_SYNC)) {
858 1.10 riastrad sg_free_table(sgt);
859 1.10 riastrad kfree(sgt);
860 1.10 riastrad sgt = ERR_PTR(-ENOMEM);
861 1.10 riastrad }
862 1.14 riastrad #endif
863 1.2 riastrad
864 1.10 riastrad return sgt;
865 1.10 riastrad }
866 1.10 riastrad EXPORT_SYMBOL(drm_gem_map_dma_buf);
867 1.2 riastrad
868 1.10 riastrad /**
869 1.10 riastrad * drm_gem_unmap_dma_buf - unmap_dma_buf implementation for GEM
870 1.10 riastrad * @attach: attachment to unmap buffer from
871 1.10 riastrad * @sgt: scatterlist info of the buffer to unmap
872 1.10 riastrad * @dir: direction of DMA transfer
873 1.10 riastrad *
874 1.10 riastrad * This can be used as the &dma_buf_ops.unmap_dma_buf callback.
875 1.10 riastrad */
876 1.10 riastrad void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
877 1.10 riastrad struct sg_table *sgt,
878 1.10 riastrad enum dma_data_direction dir)
879 1.10 riastrad {
880 1.10 riastrad if (!sgt)
881 1.10 riastrad return;
882 1.1 riastrad
883 1.14 riastrad #ifndef __NetBSD__ /* We map/unmap elsewhere. */
884 1.10 riastrad dma_unmap_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
885 1.10 riastrad DMA_ATTR_SKIP_CPU_SYNC);
886 1.14 riastrad #endif
887 1.10 riastrad sg_free_table(sgt);
888 1.10 riastrad kfree(sgt);
889 1.10 riastrad }
890 1.10 riastrad EXPORT_SYMBOL(drm_gem_unmap_dma_buf);
891 1.2 riastrad
892 1.10 riastrad /**
893 1.10 riastrad * drm_gem_dmabuf_vmap - dma_buf vmap implementation for GEM
894 1.10 riastrad * @dma_buf: buffer to be mapped
895 1.10 riastrad *
896 1.10 riastrad * Sets up a kernel virtual mapping. This can be used as the &dma_buf_ops.vmap
897 1.10 riastrad * callback. Calls into &drm_gem_object_funcs.vmap for device specific handling.
898 1.10 riastrad *
899 1.10 riastrad * Returns the kernel virtual address or NULL on failure.
900 1.10 riastrad */
901 1.10 riastrad void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
902 1.10 riastrad {
903 1.10 riastrad struct drm_gem_object *obj = dma_buf->priv;
904 1.10 riastrad void *vaddr;
905 1.2 riastrad
906 1.10 riastrad vaddr = drm_gem_vmap(obj);
907 1.10 riastrad if (IS_ERR(vaddr))
908 1.10 riastrad vaddr = NULL;
909 1.2 riastrad
910 1.10 riastrad return vaddr;
911 1.1 riastrad }
912 1.10 riastrad EXPORT_SYMBOL(drm_gem_dmabuf_vmap);
913 1.1 riastrad
914 1.2 riastrad /**
915 1.10 riastrad * drm_gem_dmabuf_vunmap - dma_buf vunmap implementation for GEM
916 1.10 riastrad * @dma_buf: buffer to be unmapped
917 1.10 riastrad * @vaddr: the virtual address of the buffer
918 1.2 riastrad *
919 1.10 riastrad * Releases a kernel virtual mapping. This can be used as the
920 1.10 riastrad * &dma_buf_ops.vunmap callback. Calls into &drm_gem_object_funcs.vunmap for device specific handling.
921 1.2 riastrad */
922 1.10 riastrad void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
923 1.1 riastrad {
924 1.10 riastrad struct drm_gem_object *obj = dma_buf->priv;
925 1.1 riastrad
926 1.10 riastrad drm_gem_vunmap(obj, vaddr);
927 1.10 riastrad }
928 1.10 riastrad EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
929 1.1 riastrad
930 1.10 riastrad /**
931 1.10 riastrad * drm_gem_prime_mmap - PRIME mmap function for GEM drivers
932 1.10 riastrad * @obj: GEM object
933 1.10 riastrad * @vma: Virtual address range
934 1.10 riastrad *
935 1.10 riastrad * This function sets up a userspace mapping for PRIME exported buffers using
936 1.10 riastrad * the same codepath that is used for regular GEM buffer mapping on the DRM fd.
937 1.10 riastrad * The fake GEM offset is added to vma->vm_pgoff and &drm_driver->fops->mmap is
938 1.10 riastrad * called to set up the mapping.
939 1.10 riastrad *
940 1.10 riastrad * Drivers can use this as their &drm_driver.gem_prime_mmap callback.
941 1.10 riastrad */
942 1.14 riastrad #ifdef __NetBSD__
943 1.14 riastrad int drm_gem_prime_mmap(struct drm_gem_object *obj, off_t *offp, size_t size,
944 1.14 riastrad int prot, int *flagsp, int *advicep, struct uvm_object **uobjp,
945 1.14 riastrad int *maxprotp)
946 1.14 riastrad #else
947 1.10 riastrad int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
948 1.14 riastrad #endif
949 1.10 riastrad {
950 1.10 riastrad struct drm_file *priv;
951 1.10 riastrad struct file *fil;
952 1.10 riastrad int ret;
953 1.1 riastrad
954 1.10 riastrad /* Add the fake offset */
955 1.14 riastrad #ifdef __NetBSD__
956 1.14 riastrad *offp += drm_vma_node_start(&obj->vma_node);
957 1.14 riastrad #else
958 1.10 riastrad vma->vm_pgoff += drm_vma_node_start(&obj->vma_node);
959 1.14 riastrad #endif
960 1.1 riastrad
961 1.10 riastrad if (obj->funcs && obj->funcs->mmap) {
962 1.14 riastrad #ifdef __NetBSD__
963 1.14 riastrad ret = obj->funcs->mmap(obj, offp, size, prot, flagsp, advicep,
964 1.14 riastrad uobjp, maxprotp);
965 1.14 riastrad #else
966 1.10 riastrad ret = obj->funcs->mmap(obj, vma);
967 1.14 riastrad #endif
968 1.10 riastrad if (ret)
969 1.10 riastrad return ret;
970 1.14 riastrad #ifndef __NetBSD__
971 1.10 riastrad vma->vm_private_data = obj;
972 1.14 riastrad #endif
973 1.10 riastrad drm_gem_object_get(obj);
974 1.10 riastrad return 0;
975 1.2 riastrad }
976 1.2 riastrad
977 1.10 riastrad priv = kzalloc(sizeof(*priv), GFP_KERNEL);
978 1.10 riastrad fil = kzalloc(sizeof(*fil), GFP_KERNEL);
979 1.10 riastrad if (!priv || !fil) {
980 1.10 riastrad ret = -ENOMEM;
981 1.10 riastrad goto out;
982 1.1 riastrad }
983 1.1 riastrad
984 1.10 riastrad /* Used by drm_gem_mmap() to lookup the GEM object */
985 1.10 riastrad priv->minor = obj->dev->primary;
986 1.14 riastrad #ifdef __NetBSD__
987 1.14 riastrad fil->f_data = priv;
988 1.14 riastrad #else
989 1.10 riastrad fil->private_data = priv;
990 1.14 riastrad #endif
991 1.1 riastrad
992 1.10 riastrad ret = drm_vma_node_allow(&obj->vma_node, priv);
993 1.1 riastrad if (ret)
994 1.10 riastrad goto out;
995 1.1 riastrad
996 1.14 riastrad #ifdef __NetBSD__
997 1.14 riastrad ret = obj->dev->driver->mmap_object(obj->dev, *offp, size, prot, uobjp,
998 1.14 riastrad offp, fil);
999 1.14 riastrad #else
1000 1.10 riastrad ret = obj->dev->driver->fops->mmap(fil, vma);
1001 1.14 riastrad #endif
1002 1.2 riastrad
1003 1.10 riastrad drm_vma_node_revoke(&obj->vma_node, priv);
1004 1.10 riastrad out:
1005 1.10 riastrad kfree(priv);
1006 1.10 riastrad kfree(fil);
1007 1.2 riastrad
1008 1.1 riastrad return ret;
1009 1.1 riastrad }
1010 1.10 riastrad EXPORT_SYMBOL(drm_gem_prime_mmap);
1011 1.1 riastrad
1012 1.10 riastrad /**
1013 1.10 riastrad * drm_gem_dmabuf_mmap - dma_buf mmap implementation for GEM
1014 1.10 riastrad * @dma_buf: buffer to be mapped
1015 1.10 riastrad * @vma: virtual address range
1016 1.10 riastrad *
1017 1.10 riastrad * Provides memory mapping for the buffer. This can be used as the
1018 1.10 riastrad * &dma_buf_ops.mmap callback. It just forwards to &drm_driver.gem_prime_mmap,
1019 1.10 riastrad * which should be set to drm_gem_prime_mmap().
1020 1.10 riastrad *
1021 1.10 riastrad * FIXME: There's really no point to this wrapper, drivers which need anything
1022 1.10 riastrad * else but drm_gem_prime_mmap can roll their own &dma_buf_ops.mmap callback.
1023 1.10 riastrad *
1024 1.10 riastrad * Returns 0 on success or a negative error code on failure.
1025 1.10 riastrad */
1026 1.10 riastrad #ifdef __NetBSD__
1027 1.13 riastrad int
1028 1.10 riastrad drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, off_t *offp, size_t size,
1029 1.10 riastrad int prot, int *flagsp, int *advicep, struct uvm_object **uobjp,
1030 1.10 riastrad int *maxprotp)
1031 1.10 riastrad #else
1032 1.10 riastrad int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
1033 1.10 riastrad #endif
1034 1.1 riastrad {
1035 1.10 riastrad struct drm_gem_object *obj = dma_buf->priv;
1036 1.10 riastrad struct drm_device *dev = obj->dev;
1037 1.1 riastrad
1038 1.10 riastrad if (!dev->driver->gem_prime_mmap)
1039 1.1 riastrad return -ENOSYS;
1040 1.1 riastrad
1041 1.10 riastrad #ifdef __NetBSD__
1042 1.10 riastrad return dev->driver->gem_prime_mmap(obj, offp, size, prot, flagsp,
1043 1.10 riastrad advicep, uobjp, maxprotp);
1044 1.10 riastrad #else
1045 1.10 riastrad return dev->driver->gem_prime_mmap(obj, vma);
1046 1.10 riastrad #endif
1047 1.1 riastrad }
1048 1.10 riastrad EXPORT_SYMBOL(drm_gem_dmabuf_mmap);
1049 1.1 riastrad
1050 1.10 riastrad static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
1051 1.10 riastrad .cache_sgt_mapping = true,
1052 1.10 riastrad .attach = drm_gem_map_attach,
1053 1.10 riastrad .detach = drm_gem_map_detach,
1054 1.10 riastrad .map_dma_buf = drm_gem_map_dma_buf,
1055 1.10 riastrad .unmap_dma_buf = drm_gem_unmap_dma_buf,
1056 1.10 riastrad .release = drm_gem_dmabuf_release,
1057 1.10 riastrad .mmap = drm_gem_dmabuf_mmap,
1058 1.10 riastrad .vmap = drm_gem_dmabuf_vmap,
1059 1.10 riastrad .vunmap = drm_gem_dmabuf_vunmap,
1060 1.10 riastrad };
1061 1.1 riastrad
1062 1.2 riastrad /**
1063 1.2 riastrad * drm_prime_pages_to_sg - converts a page array into an sg list
1064 1.2 riastrad * @pages: pointer to the array of page pointers to convert
1065 1.2 riastrad * @nr_pages: length of the page vector
1066 1.1 riastrad *
1067 1.2 riastrad * This helper creates an sg table object from a set of pages
1068 1.1 riastrad * the driver is responsible for mapping the pages into the
1069 1.2 riastrad * importers address space for use with dma_buf itself.
1070 1.10 riastrad *
1071 1.10 riastrad * This is useful for implementing &drm_gem_object_funcs.get_sg_table.
1072 1.1 riastrad */
1073 1.2 riastrad struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages)
1074 1.1 riastrad {
1075 1.1 riastrad struct sg_table *sg = NULL;
1076 1.1 riastrad int ret;
1077 1.1 riastrad
1078 1.1 riastrad sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
1079 1.2 riastrad if (!sg) {
1080 1.2 riastrad ret = -ENOMEM;
1081 1.1 riastrad goto out;
1082 1.2 riastrad }
1083 1.1 riastrad
1084 1.2 riastrad ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
1085 1.2 riastrad nr_pages << PAGE_SHIFT, GFP_KERNEL);
1086 1.1 riastrad if (ret)
1087 1.1 riastrad goto out;
1088 1.1 riastrad
1089 1.1 riastrad return sg;
1090 1.1 riastrad out:
1091 1.1 riastrad kfree(sg);
1092 1.2 riastrad return ERR_PTR(ret);
1093 1.1 riastrad }
1094 1.1 riastrad EXPORT_SYMBOL(drm_prime_pages_to_sg);
1095 1.1 riastrad
1096 1.10 riastrad /**
1097 1.10 riastrad * drm_gem_prime_export - helper library implementation of the export callback
1098 1.10 riastrad * @obj: GEM object to export
1099 1.10 riastrad * @flags: flags like DRM_CLOEXEC and DRM_RDWR
1100 1.10 riastrad *
1101 1.10 riastrad * This is the implementation of the &drm_gem_object_funcs.export functions for GEM drivers
1102 1.10 riastrad * using the PRIME helpers. It is used as the default in
1103 1.10 riastrad * drm_gem_prime_handle_to_fd().
1104 1.10 riastrad */
1105 1.10 riastrad struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj,
1106 1.10 riastrad int flags)
1107 1.10 riastrad {
1108 1.10 riastrad struct drm_device *dev = obj->dev;
1109 1.10 riastrad struct dma_buf_export_info exp_info = {
1110 1.10 riastrad #ifndef __NetBSD__
1111 1.10 riastrad .exp_name = KBUILD_MODNAME, /* white lie for debug */
1112 1.10 riastrad .owner = dev->driver->fops->owner,
1113 1.10 riastrad #endif
1114 1.10 riastrad .ops = &drm_gem_prime_dmabuf_ops,
1115 1.10 riastrad .size = obj->size,
1116 1.10 riastrad .flags = flags,
1117 1.10 riastrad .priv = obj,
1118 1.10 riastrad .resv = obj->resv,
1119 1.10 riastrad };
1120 1.10 riastrad
1121 1.10 riastrad return drm_gem_dmabuf_export(dev, &exp_info);
1122 1.10 riastrad }
1123 1.10 riastrad EXPORT_SYMBOL(drm_gem_prime_export);
1124 1.10 riastrad
1125 1.10 riastrad /**
1126 1.10 riastrad * drm_gem_prime_import_dev - core implementation of the import callback
1127 1.10 riastrad * @dev: drm_device to import into
1128 1.10 riastrad * @dma_buf: dma-buf object to import
1129 1.10 riastrad * @attach_dev: struct device to dma_buf attach
1130 1.10 riastrad *
1131 1.10 riastrad * This is the core of drm_gem_prime_import(). It's designed to be called by
1132 1.10 riastrad * drivers who want to use a different device structure than &drm_device.dev for
1133 1.10 riastrad * attaching via dma_buf. This function calls
1134 1.10 riastrad * &drm_driver.gem_prime_import_sg_table internally.
1135 1.10 riastrad *
1136 1.10 riastrad * Drivers must arrange to call drm_prime_gem_destroy() from their
1137 1.10 riastrad * &drm_gem_object_funcs.free hook when using this function.
1138 1.10 riastrad */
1139 1.10 riastrad struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev,
1140 1.10 riastrad struct dma_buf *dma_buf,
1141 1.10 riastrad struct device *attach_dev)
1142 1.10 riastrad {
1143 1.10 riastrad struct dma_buf_attachment *attach;
1144 1.10 riastrad struct sg_table *sgt;
1145 1.10 riastrad struct drm_gem_object *obj;
1146 1.10 riastrad int ret;
1147 1.10 riastrad
1148 1.10 riastrad if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) {
1149 1.10 riastrad obj = dma_buf->priv;
1150 1.10 riastrad if (obj->dev == dev) {
1151 1.10 riastrad /*
1152 1.10 riastrad * Importing dmabuf exported from out own gem increases
1153 1.10 riastrad * refcount on gem itself instead of f_count of dmabuf.
1154 1.10 riastrad */
1155 1.10 riastrad drm_gem_object_get(obj);
1156 1.10 riastrad return obj;
1157 1.10 riastrad }
1158 1.10 riastrad }
1159 1.10 riastrad
1160 1.10 riastrad if (!dev->driver->gem_prime_import_sg_table)
1161 1.10 riastrad return ERR_PTR(-EINVAL);
1162 1.10 riastrad
1163 1.10 riastrad attach = dma_buf_attach(dma_buf, attach_dev);
1164 1.10 riastrad if (IS_ERR(attach))
1165 1.10 riastrad return ERR_CAST(attach);
1166 1.10 riastrad
1167 1.10 riastrad get_dma_buf(dma_buf);
1168 1.10 riastrad
1169 1.10 riastrad sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
1170 1.10 riastrad if (IS_ERR(sgt)) {
1171 1.10 riastrad ret = PTR_ERR(sgt);
1172 1.10 riastrad goto fail_detach;
1173 1.10 riastrad }
1174 1.10 riastrad
1175 1.10 riastrad obj = dev->driver->gem_prime_import_sg_table(dev, attach, sgt);
1176 1.10 riastrad if (IS_ERR(obj)) {
1177 1.10 riastrad ret = PTR_ERR(obj);
1178 1.10 riastrad goto fail_unmap;
1179 1.10 riastrad }
1180 1.10 riastrad
1181 1.10 riastrad obj->import_attach = attach;
1182 1.10 riastrad obj->resv = dma_buf->resv;
1183 1.10 riastrad
1184 1.10 riastrad return obj;
1185 1.10 riastrad
1186 1.10 riastrad fail_unmap:
1187 1.10 riastrad dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
1188 1.10 riastrad fail_detach:
1189 1.10 riastrad dma_buf_detach(dma_buf, attach);
1190 1.10 riastrad dma_buf_put(dma_buf);
1191 1.10 riastrad
1192 1.10 riastrad return ERR_PTR(ret);
1193 1.10 riastrad }
1194 1.10 riastrad EXPORT_SYMBOL(drm_gem_prime_import_dev);
1195 1.10 riastrad
1196 1.10 riastrad /**
1197 1.10 riastrad * drm_gem_prime_import - helper library implementation of the import callback
1198 1.10 riastrad * @dev: drm_device to import into
1199 1.10 riastrad * @dma_buf: dma-buf object to import
1200 1.10 riastrad *
1201 1.10 riastrad * This is the implementation of the gem_prime_import functions for GEM drivers
1202 1.10 riastrad * using the PRIME helpers. Drivers can use this as their
1203 1.10 riastrad * &drm_driver.gem_prime_import implementation. It is used as the default
1204 1.10 riastrad * implementation in drm_gem_prime_fd_to_handle().
1205 1.10 riastrad *
1206 1.10 riastrad * Drivers must arrange to call drm_prime_gem_destroy() from their
1207 1.10 riastrad * &drm_gem_object_funcs.free hook when using this function.
1208 1.10 riastrad */
1209 1.10 riastrad struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
1210 1.10 riastrad struct dma_buf *dma_buf)
1211 1.10 riastrad {
1212 1.10 riastrad return drm_gem_prime_import_dev(dev, dma_buf, dev->dev);
1213 1.10 riastrad }
1214 1.10 riastrad EXPORT_SYMBOL(drm_gem_prime_import);
1215 1.10 riastrad
1216 1.12 riastrad #ifdef __NetBSD__
1217 1.3 riastrad
1218 1.3 riastrad struct sg_table *
1219 1.5 riastrad drm_prime_bus_dmamem_to_sg(bus_dma_tag_t dmat, const bus_dma_segment_t *segs,
1220 1.5 riastrad int nsegs)
1221 1.4 riastrad {
1222 1.4 riastrad struct sg_table *sg;
1223 1.4 riastrad int ret;
1224 1.4 riastrad
1225 1.4 riastrad sg = kmalloc(sizeof(*sg), GFP_KERNEL);
1226 1.4 riastrad if (sg == NULL) {
1227 1.4 riastrad ret = -ENOMEM;
1228 1.4 riastrad goto out;
1229 1.4 riastrad }
1230 1.4 riastrad
1231 1.5 riastrad ret = sg_alloc_table_from_bus_dmamem(sg, dmat, segs, nsegs,
1232 1.5 riastrad GFP_KERNEL);
1233 1.4 riastrad if (ret)
1234 1.4 riastrad goto out;
1235 1.4 riastrad
1236 1.4 riastrad return sg;
1237 1.4 riastrad out:
1238 1.4 riastrad kfree(sg);
1239 1.4 riastrad return ERR_PTR(ret);
1240 1.4 riastrad }
1241 1.4 riastrad
1242 1.4 riastrad struct sg_table *
1243 1.3 riastrad drm_prime_pglist_to_sg(struct pglist *pglist, unsigned npages)
1244 1.3 riastrad {
1245 1.3 riastrad struct sg_table *sg;
1246 1.3 riastrad int ret;
1247 1.3 riastrad
1248 1.3 riastrad sg = kmalloc(sizeof(*sg), GFP_KERNEL);
1249 1.3 riastrad if (sg == NULL) {
1250 1.3 riastrad ret = -ENOMEM;
1251 1.3 riastrad goto out;
1252 1.3 riastrad }
1253 1.3 riastrad
1254 1.3 riastrad ret = sg_alloc_table_from_pglist(sg, pglist, 0, npages << PAGE_SHIFT,
1255 1.3 riastrad npages, GFP_KERNEL);
1256 1.3 riastrad if (ret)
1257 1.3 riastrad goto out;
1258 1.3 riastrad
1259 1.3 riastrad return sg;
1260 1.3 riastrad
1261 1.3 riastrad out:
1262 1.3 riastrad kfree(sg);
1263 1.3 riastrad return ERR_PTR(ret);
1264 1.3 riastrad }
1265 1.3 riastrad
1266 1.4 riastrad bus_size_t
1267 1.4 riastrad drm_prime_sg_size(struct sg_table *sg)
1268 1.4 riastrad {
1269 1.4 riastrad
1270 1.5 riastrad return sg->sgt_npgs << PAGE_SHIFT;
1271 1.4 riastrad }
1272 1.4 riastrad
1273 1.3 riastrad void
1274 1.3 riastrad drm_prime_sg_free(struct sg_table *sg)
1275 1.3 riastrad {
1276 1.3 riastrad
1277 1.3 riastrad sg_free_table(sg);
1278 1.3 riastrad kfree(sg);
1279 1.3 riastrad }
1280 1.3 riastrad
1281 1.3 riastrad int
1282 1.5 riastrad drm_prime_sg_to_bus_dmamem(bus_dma_tag_t dmat, bus_dma_segment_t *segs,
1283 1.5 riastrad int nsegs, int *rsegs, const struct sg_table *sgt)
1284 1.3 riastrad {
1285 1.3 riastrad
1286 1.3 riastrad /* XXX errno NetBSD->Linux */
1287 1.5 riastrad return -bus_dmamem_import_pages(dmat, segs, nsegs, rsegs, sgt->sgt_pgs,
1288 1.5 riastrad sgt->sgt_npgs);
1289 1.3 riastrad }
1290 1.3 riastrad
1291 1.4 riastrad int
1292 1.5 riastrad drm_prime_bus_dmamap_load_sgt(bus_dma_tag_t dmat, bus_dmamap_t map,
1293 1.5 riastrad struct sg_table *sgt)
1294 1.4 riastrad {
1295 1.5 riastrad bus_dma_segment_t *segs;
1296 1.5 riastrad bus_size_t size = drm_prime_sg_size(sgt);
1297 1.5 riastrad int nsegs = sgt->sgt_npgs;
1298 1.5 riastrad int ret;
1299 1.5 riastrad
1300 1.5 riastrad segs = kcalloc(sgt->sgt_npgs, sizeof(segs[0]), GFP_KERNEL);
1301 1.5 riastrad if (segs == NULL) {
1302 1.5 riastrad ret = -ENOMEM;
1303 1.5 riastrad goto out0;
1304 1.5 riastrad }
1305 1.5 riastrad
1306 1.5 riastrad ret = drm_prime_sg_to_bus_dmamem(dmat, segs, nsegs, &nsegs, sgt);
1307 1.5 riastrad if (ret)
1308 1.5 riastrad goto out1;
1309 1.5 riastrad KASSERT(nsegs <= sgt->sgt_npgs);
1310 1.5 riastrad
1311 1.5 riastrad /* XXX errno NetBSD->Linux */
1312 1.5 riastrad ret = -bus_dmamap_load_raw(dmat, map, segs, nsegs, size,
1313 1.5 riastrad BUS_DMA_NOWAIT);
1314 1.5 riastrad if (ret)
1315 1.5 riastrad goto out1;
1316 1.4 riastrad
1317 1.5 riastrad out1: kfree(segs);
1318 1.5 riastrad out0: return ret;
1319 1.4 riastrad }
1320 1.4 riastrad
1321 1.7 riastrad bool
1322 1.7 riastrad drm_prime_sg_importable(bus_dma_tag_t dmat, struct sg_table *sgt)
1323 1.7 riastrad {
1324 1.7 riastrad unsigned i;
1325 1.7 riastrad
1326 1.7 riastrad for (i = 0; i < sgt->sgt_npgs; i++) {
1327 1.7 riastrad if (bus_dmatag_bounces_paddr(dmat, sgt->sgt_pgs[i]))
1328 1.7 riastrad return false;
1329 1.7 riastrad }
1330 1.7 riastrad return true;
1331 1.7 riastrad }
1332 1.7 riastrad
1333 1.3 riastrad #else /* !__NetBSD__ */
1334 1.3 riastrad
1335 1.2 riastrad /**
1336 1.2 riastrad * drm_prime_sg_to_page_addr_arrays - convert an sg table into a page array
1337 1.2 riastrad * @sgt: scatter-gather table to convert
1338 1.10 riastrad * @pages: optional array of page pointers to store the page array in
1339 1.2 riastrad * @addrs: optional array to store the dma bus address of each page
1340 1.10 riastrad * @max_entries: size of both the passed-in arrays
1341 1.2 riastrad *
1342 1.2 riastrad * Exports an sg table into an array of pages and addresses. This is currently
1343 1.2 riastrad * required by the TTM driver in order to do correct fault handling.
1344 1.10 riastrad *
1345 1.10 riastrad * Drivers can use this in their &drm_driver.gem_prime_import_sg_table
1346 1.10 riastrad * implementation.
1347 1.2 riastrad */
1348 1.1 riastrad int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
1349 1.10 riastrad dma_addr_t *addrs, int max_entries)
1350 1.1 riastrad {
1351 1.1 riastrad unsigned count;
1352 1.1 riastrad struct scatterlist *sg;
1353 1.1 riastrad struct page *page;
1354 1.10 riastrad u32 len, index;
1355 1.1 riastrad dma_addr_t addr;
1356 1.1 riastrad
1357 1.10 riastrad index = 0;
1358 1.1 riastrad for_each_sg(sgt->sgl, sg, sgt->nents, count) {
1359 1.1 riastrad len = sg->length;
1360 1.1 riastrad page = sg_page(sg);
1361 1.1 riastrad addr = sg_dma_address(sg);
1362 1.1 riastrad
1363 1.1 riastrad while (len > 0) {
1364 1.10 riastrad if (WARN_ON(index >= max_entries))
1365 1.1 riastrad return -1;
1366 1.10 riastrad if (pages)
1367 1.10 riastrad pages[index] = page;
1368 1.1 riastrad if (addrs)
1369 1.10 riastrad addrs[index] = addr;
1370 1.1 riastrad
1371 1.1 riastrad page++;
1372 1.1 riastrad addr += PAGE_SIZE;
1373 1.1 riastrad len -= PAGE_SIZE;
1374 1.10 riastrad index++;
1375 1.1 riastrad }
1376 1.1 riastrad }
1377 1.1 riastrad return 0;
1378 1.1 riastrad }
1379 1.1 riastrad EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
1380 1.2 riastrad
1381 1.3 riastrad #endif /* __NetBSD__ */
1382 1.3 riastrad
1383 1.2 riastrad /**
1384 1.2 riastrad * drm_prime_gem_destroy - helper to clean up a PRIME-imported GEM object
1385 1.2 riastrad * @obj: GEM object which was created from a dma-buf
1386 1.2 riastrad * @sg: the sg-table which was pinned at import time
1387 1.2 riastrad *
1388 1.2 riastrad * This is the cleanup functions which GEM drivers need to call when they use
1389 1.10 riastrad * drm_gem_prime_import() or drm_gem_prime_import_dev() to import dma-bufs.
1390 1.2 riastrad */
1391 1.1 riastrad void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
1392 1.1 riastrad {
1393 1.1 riastrad struct dma_buf_attachment *attach;
1394 1.1 riastrad struct dma_buf *dma_buf;
1395 1.1 riastrad attach = obj->import_attach;
1396 1.1 riastrad if (sg)
1397 1.1 riastrad dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
1398 1.1 riastrad dma_buf = attach->dmabuf;
1399 1.1 riastrad dma_buf_detach(attach->dmabuf, attach);
1400 1.1 riastrad /* remove the reference */
1401 1.1 riastrad dma_buf_put(dma_buf);
1402 1.1 riastrad }
1403 1.1 riastrad EXPORT_SYMBOL(drm_prime_gem_destroy);
1404