drm_prime.c revision 1.1.1.4 1 /* $NetBSD: drm_prime.c,v 1.1.1.4 2021/12/18 20:11:03 riastradh Exp $ */
2
3 /*
4 * Copyright 2012 Red Hat
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23 * IN THE SOFTWARE.
24 *
25 * Authors:
26 * Dave Airlie <airlied (at) redhat.com>
27 * Rob Clark <rob.clark (at) linaro.org>
28 *
29 */
30
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: drm_prime.c,v 1.1.1.4 2021/12/18 20:11:03 riastradh Exp $");
33
34 #include <linux/export.h>
35 #include <linux/dma-buf.h>
36 #include <linux/rbtree.h>
37
38 #include <drm/drm.h>
39 #include <drm/drm_drv.h>
40 #include <drm/drm_file.h>
41 #include <drm/drm_framebuffer.h>
42 #include <drm/drm_gem.h>
43 #include <drm/drm_prime.h>
44
45 #include "drm_internal.h"
46
47 /**
48 * DOC: overview and lifetime rules
49 *
50 * Similar to GEM global names, PRIME file descriptors are also used to share
51 * buffer objects across processes. They offer additional security: as file
52 * descriptors must be explicitly sent over UNIX domain sockets to be shared
53 * between applications, they can't be guessed like the globally unique GEM
54 * names.
55 *
56 * Drivers that support the PRIME API implement the
57 * &drm_driver.prime_handle_to_fd and &drm_driver.prime_fd_to_handle operations.
58 * GEM based drivers must use drm_gem_prime_handle_to_fd() and
59 * drm_gem_prime_fd_to_handle() to implement these. For GEM based drivers the
60 * actual driver interfaces is provided through the &drm_gem_object_funcs.export
61 * and &drm_driver.gem_prime_import hooks.
62 *
63 * &dma_buf_ops implementations for GEM drivers are all individually exported
64 * for drivers which need to overwrite or reimplement some of them.
65 *
66 * Reference Counting for GEM Drivers
67 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
68 *
69 * On the export the &dma_buf holds a reference to the exported buffer object,
70 * usually a &drm_gem_object. It takes this reference in the PRIME_HANDLE_TO_FD
71 * IOCTL, when it first calls &drm_gem_object_funcs.export
72 * and stores the exporting GEM object in the &dma_buf.priv field. This
73 * reference needs to be released when the final reference to the &dma_buf
74 * itself is dropped and its &dma_buf_ops.release function is called. For
75 * GEM-based drivers, the &dma_buf should be exported using
76 * drm_gem_dmabuf_export() and then released by drm_gem_dmabuf_release().
77 *
78 * Thus the chain of references always flows in one direction, avoiding loops:
79 * importing GEM object -> dma-buf -> exported GEM bo. A further complication
80 * are the lookup caches for import and export. These are required to guarantee
81 * that any given object will always have only one uniqe userspace handle. This
82 * is required to allow userspace to detect duplicated imports, since some GEM
83 * drivers do fail command submissions if a given buffer object is listed more
84 * than once. These import and export caches in &drm_prime_file_private only
85 * retain a weak reference, which is cleaned up when the corresponding object is
86 * released.
87 *
88 * Self-importing: If userspace is using PRIME as a replacement for flink then
89 * it will get a fd->handle request for a GEM object that it created. Drivers
90 * should detect this situation and return back the underlying object from the
91 * dma-buf private. For GEM based drivers this is handled in
92 * drm_gem_prime_import() already.
93 */
94
95 struct drm_prime_member {
96 struct dma_buf *dma_buf;
97 uint32_t handle;
98
99 struct rb_node dmabuf_rb;
100 struct rb_node handle_rb;
101 };
102
103 static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
104 struct dma_buf *dma_buf, uint32_t handle)
105 {
106 struct drm_prime_member *member;
107 struct rb_node **p, *rb;
108
109 member = kmalloc(sizeof(*member), GFP_KERNEL);
110 if (!member)
111 return -ENOMEM;
112
113 get_dma_buf(dma_buf);
114 member->dma_buf = dma_buf;
115 member->handle = handle;
116
117 rb = NULL;
118 p = &prime_fpriv->dmabufs.rb_node;
119 while (*p) {
120 struct drm_prime_member *pos;
121
122 rb = *p;
123 pos = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
124 if (dma_buf > pos->dma_buf)
125 p = &rb->rb_right;
126 else
127 p = &rb->rb_left;
128 }
129 rb_link_node(&member->dmabuf_rb, rb, p);
130 rb_insert_color(&member->dmabuf_rb, &prime_fpriv->dmabufs);
131
132 rb = NULL;
133 p = &prime_fpriv->handles.rb_node;
134 while (*p) {
135 struct drm_prime_member *pos;
136
137 rb = *p;
138 pos = rb_entry(rb, struct drm_prime_member, handle_rb);
139 if (handle > pos->handle)
140 p = &rb->rb_right;
141 else
142 p = &rb->rb_left;
143 }
144 rb_link_node(&member->handle_rb, rb, p);
145 rb_insert_color(&member->handle_rb, &prime_fpriv->handles);
146
147 return 0;
148 }
149
150 static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv,
151 uint32_t handle)
152 {
153 struct rb_node *rb;
154
155 rb = prime_fpriv->handles.rb_node;
156 while (rb) {
157 struct drm_prime_member *member;
158
159 member = rb_entry(rb, struct drm_prime_member, handle_rb);
160 if (member->handle == handle)
161 return member->dma_buf;
162 else if (member->handle < handle)
163 rb = rb->rb_right;
164 else
165 rb = rb->rb_left;
166 }
167
168 return NULL;
169 }
170
171 static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv,
172 struct dma_buf *dma_buf,
173 uint32_t *handle)
174 {
175 struct rb_node *rb;
176
177 rb = prime_fpriv->dmabufs.rb_node;
178 while (rb) {
179 struct drm_prime_member *member;
180
181 member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
182 if (member->dma_buf == dma_buf) {
183 *handle = member->handle;
184 return 0;
185 } else if (member->dma_buf < dma_buf) {
186 rb = rb->rb_right;
187 } else {
188 rb = rb->rb_left;
189 }
190 }
191
192 return -ENOENT;
193 }
194
195 void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
196 struct dma_buf *dma_buf)
197 {
198 struct rb_node *rb;
199
200 rb = prime_fpriv->dmabufs.rb_node;
201 while (rb) {
202 struct drm_prime_member *member;
203
204 member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
205 if (member->dma_buf == dma_buf) {
206 rb_erase(&member->handle_rb, &prime_fpriv->handles);
207 rb_erase(&member->dmabuf_rb, &prime_fpriv->dmabufs);
208
209 dma_buf_put(dma_buf);
210 kfree(member);
211 return;
212 } else if (member->dma_buf < dma_buf) {
213 rb = rb->rb_right;
214 } else {
215 rb = rb->rb_left;
216 }
217 }
218 }
219
220 void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
221 {
222 mutex_init(&prime_fpriv->lock);
223 prime_fpriv->dmabufs = RB_ROOT;
224 prime_fpriv->handles = RB_ROOT;
225 }
226
227 void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
228 {
229 /* by now drm_gem_release should've made sure the list is empty */
230 WARN_ON(!RB_EMPTY_ROOT(&prime_fpriv->dmabufs));
231 }
232
233 /**
234 * drm_gem_dmabuf_export - &dma_buf export implementation for GEM
235 * @dev: parent device for the exported dmabuf
236 * @exp_info: the export information used by dma_buf_export()
237 *
238 * This wraps dma_buf_export() for use by generic GEM drivers that are using
239 * drm_gem_dmabuf_release(). In addition to calling dma_buf_export(), we take
240 * a reference to the &drm_device and the exported &drm_gem_object (stored in
241 * &dma_buf_export_info.priv) which is released by drm_gem_dmabuf_release().
242 *
243 * Returns the new dmabuf.
244 */
245 struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
246 struct dma_buf_export_info *exp_info)
247 {
248 struct drm_gem_object *obj = exp_info->priv;
249 struct dma_buf *dma_buf;
250
251 dma_buf = dma_buf_export(exp_info);
252 if (IS_ERR(dma_buf))
253 return dma_buf;
254
255 drm_dev_get(dev);
256 drm_gem_object_get(obj);
257 dma_buf->file->f_mapping = obj->dev->anon_inode->i_mapping;
258
259 return dma_buf;
260 }
261 EXPORT_SYMBOL(drm_gem_dmabuf_export);
262
263 /**
264 * drm_gem_dmabuf_release - &dma_buf release implementation for GEM
265 * @dma_buf: buffer to be released
266 *
267 * Generic release function for dma_bufs exported as PRIME buffers. GEM drivers
268 * must use this in their &dma_buf_ops structure as the release callback.
269 * drm_gem_dmabuf_release() should be used in conjunction with
270 * drm_gem_dmabuf_export().
271 */
272 void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
273 {
274 struct drm_gem_object *obj = dma_buf->priv;
275 struct drm_device *dev = obj->dev;
276
277 /* drop the reference on the export fd holds */
278 drm_gem_object_put_unlocked(obj);
279
280 drm_dev_put(dev);
281 }
282 EXPORT_SYMBOL(drm_gem_dmabuf_release);
283
284 /**
285 * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers
286 * @dev: dev to export the buffer from
287 * @file_priv: drm file-private structure
288 * @prime_fd: fd id of the dma-buf which should be imported
289 * @handle: pointer to storage for the handle of the imported buffer object
290 *
291 * This is the PRIME import function which must be used mandatorily by GEM
292 * drivers to ensure correct lifetime management of the underlying GEM object.
293 * The actual importing of GEM object from the dma-buf is done through the
294 * &drm_driver.gem_prime_import driver callback.
295 *
296 * Returns 0 on success or a negative error code on failure.
297 */
298 int drm_gem_prime_fd_to_handle(struct drm_device *dev,
299 struct drm_file *file_priv, int prime_fd,
300 uint32_t *handle)
301 {
302 struct dma_buf *dma_buf;
303 struct drm_gem_object *obj;
304 int ret;
305
306 dma_buf = dma_buf_get(prime_fd);
307 if (IS_ERR(dma_buf))
308 return PTR_ERR(dma_buf);
309
310 mutex_lock(&file_priv->prime.lock);
311
312 ret = drm_prime_lookup_buf_handle(&file_priv->prime,
313 dma_buf, handle);
314 if (ret == 0)
315 goto out_put;
316
317 /* never seen this one, need to import */
318 mutex_lock(&dev->object_name_lock);
319 if (dev->driver->gem_prime_import)
320 obj = dev->driver->gem_prime_import(dev, dma_buf);
321 else
322 obj = drm_gem_prime_import(dev, dma_buf);
323 if (IS_ERR(obj)) {
324 ret = PTR_ERR(obj);
325 goto out_unlock;
326 }
327
328 if (obj->dma_buf) {
329 WARN_ON(obj->dma_buf != dma_buf);
330 } else {
331 obj->dma_buf = dma_buf;
332 get_dma_buf(dma_buf);
333 }
334
335 /* _handle_create_tail unconditionally unlocks dev->object_name_lock. */
336 ret = drm_gem_handle_create_tail(file_priv, obj, handle);
337 drm_gem_object_put_unlocked(obj);
338 if (ret)
339 goto out_put;
340
341 ret = drm_prime_add_buf_handle(&file_priv->prime,
342 dma_buf, *handle);
343 mutex_unlock(&file_priv->prime.lock);
344 if (ret)
345 goto fail;
346
347 dma_buf_put(dma_buf);
348
349 return 0;
350
351 fail:
352 /* hmm, if driver attached, we are relying on the free-object path
353 * to detach.. which seems ok..
354 */
355 drm_gem_handle_delete(file_priv, *handle);
356 dma_buf_put(dma_buf);
357 return ret;
358
359 out_unlock:
360 mutex_unlock(&dev->object_name_lock);
361 out_put:
362 mutex_unlock(&file_priv->prime.lock);
363 dma_buf_put(dma_buf);
364 return ret;
365 }
366 EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
367
368 int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
369 struct drm_file *file_priv)
370 {
371 struct drm_prime_handle *args = data;
372
373 if (!dev->driver->prime_fd_to_handle)
374 return -ENOSYS;
375
376 return dev->driver->prime_fd_to_handle(dev, file_priv,
377 args->fd, &args->handle);
378 }
379
380 static struct dma_buf *export_and_register_object(struct drm_device *dev,
381 struct drm_gem_object *obj,
382 uint32_t flags)
383 {
384 struct dma_buf *dmabuf;
385
386 /* prevent races with concurrent gem_close. */
387 if (obj->handle_count == 0) {
388 dmabuf = ERR_PTR(-ENOENT);
389 return dmabuf;
390 }
391
392 if (obj->funcs && obj->funcs->export)
393 dmabuf = obj->funcs->export(obj, flags);
394 else if (dev->driver->gem_prime_export)
395 dmabuf = dev->driver->gem_prime_export(obj, flags);
396 else
397 dmabuf = drm_gem_prime_export(obj, flags);
398 if (IS_ERR(dmabuf)) {
399 /* normally the created dma-buf takes ownership of the ref,
400 * but if that fails then drop the ref
401 */
402 return dmabuf;
403 }
404
405 /*
406 * Note that callers do not need to clean up the export cache
407 * since the check for obj->handle_count guarantees that someone
408 * will clean it up.
409 */
410 obj->dma_buf = dmabuf;
411 get_dma_buf(obj->dma_buf);
412
413 return dmabuf;
414 }
415
416 /**
417 * drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers
418 * @dev: dev to export the buffer from
419 * @file_priv: drm file-private structure
420 * @handle: buffer handle to export
421 * @flags: flags like DRM_CLOEXEC
422 * @prime_fd: pointer to storage for the fd id of the create dma-buf
423 *
424 * This is the PRIME export function which must be used mandatorily by GEM
425 * drivers to ensure correct lifetime management of the underlying GEM object.
426 * The actual exporting from GEM object to a dma-buf is done through the
427 * &drm_driver.gem_prime_export driver callback.
428 */
429 int drm_gem_prime_handle_to_fd(struct drm_device *dev,
430 struct drm_file *file_priv, uint32_t handle,
431 uint32_t flags,
432 int *prime_fd)
433 {
434 struct drm_gem_object *obj;
435 int ret = 0;
436 struct dma_buf *dmabuf;
437
438 mutex_lock(&file_priv->prime.lock);
439 obj = drm_gem_object_lookup(file_priv, handle);
440 if (!obj) {
441 ret = -ENOENT;
442 goto out_unlock;
443 }
444
445 dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle);
446 if (dmabuf) {
447 get_dma_buf(dmabuf);
448 goto out_have_handle;
449 }
450
451 mutex_lock(&dev->object_name_lock);
452 /* re-export the original imported object */
453 if (obj->import_attach) {
454 dmabuf = obj->import_attach->dmabuf;
455 get_dma_buf(dmabuf);
456 goto out_have_obj;
457 }
458
459 if (obj->dma_buf) {
460 get_dma_buf(obj->dma_buf);
461 dmabuf = obj->dma_buf;
462 goto out_have_obj;
463 }
464
465 dmabuf = export_and_register_object(dev, obj, flags);
466 if (IS_ERR(dmabuf)) {
467 /* normally the created dma-buf takes ownership of the ref,
468 * but if that fails then drop the ref
469 */
470 ret = PTR_ERR(dmabuf);
471 mutex_unlock(&dev->object_name_lock);
472 goto out;
473 }
474
475 out_have_obj:
476 /*
477 * If we've exported this buffer then cheat and add it to the import list
478 * so we get the correct handle back. We must do this under the
479 * protection of dev->object_name_lock to ensure that a racing gem close
480 * ioctl doesn't miss to remove this buffer handle from the cache.
481 */
482 ret = drm_prime_add_buf_handle(&file_priv->prime,
483 dmabuf, handle);
484 mutex_unlock(&dev->object_name_lock);
485 if (ret)
486 goto fail_put_dmabuf;
487
488 out_have_handle:
489 ret = dma_buf_fd(dmabuf, flags);
490 /*
491 * We must _not_ remove the buffer from the handle cache since the newly
492 * created dma buf is already linked in the global obj->dma_buf pointer,
493 * and that is invariant as long as a userspace gem handle exists.
494 * Closing the handle will clean out the cache anyway, so we don't leak.
495 */
496 if (ret < 0) {
497 goto fail_put_dmabuf;
498 } else {
499 *prime_fd = ret;
500 ret = 0;
501 }
502
503 goto out;
504
505 fail_put_dmabuf:
506 dma_buf_put(dmabuf);
507 out:
508 drm_gem_object_put_unlocked(obj);
509 out_unlock:
510 mutex_unlock(&file_priv->prime.lock);
511
512 return ret;
513 }
514 EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
515
516 int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
517 struct drm_file *file_priv)
518 {
519 struct drm_prime_handle *args = data;
520
521 if (!dev->driver->prime_handle_to_fd)
522 return -ENOSYS;
523
524 /* check flags are valid */
525 if (args->flags & ~(DRM_CLOEXEC | DRM_RDWR))
526 return -EINVAL;
527
528 return dev->driver->prime_handle_to_fd(dev, file_priv,
529 args->handle, args->flags, &args->fd);
530 }
531
532 /**
533 * DOC: PRIME Helpers
534 *
535 * Drivers can implement &drm_gem_object_funcs.export and
536 * &drm_driver.gem_prime_import in terms of simpler APIs by using the helper
537 * functions drm_gem_prime_export() and drm_gem_prime_import(). These functions
538 * implement dma-buf support in terms of some lower-level helpers, which are
539 * again exported for drivers to use individually:
540 *
541 * Exporting buffers
542 * ~~~~~~~~~~~~~~~~~
543 *
544 * Optional pinning of buffers is handled at dma-buf attach and detach time in
545 * drm_gem_map_attach() and drm_gem_map_detach(). Backing storage itself is
546 * handled by drm_gem_map_dma_buf() and drm_gem_unmap_dma_buf(), which relies on
547 * &drm_gem_object_funcs.get_sg_table.
548 *
549 * For kernel-internal access there's drm_gem_dmabuf_vmap() and
550 * drm_gem_dmabuf_vunmap(). Userspace mmap support is provided by
551 * drm_gem_dmabuf_mmap().
552 *
553 * Note that these export helpers can only be used if the underlying backing
554 * storage is fully coherent and either permanently pinned, or it is safe to pin
555 * it indefinitely.
556 *
557 * FIXME: The underlying helper functions are named rather inconsistently.
558 *
559 * Exporting buffers
560 * ~~~~~~~~~~~~~~~~~
561 *
562 * Importing dma-bufs using drm_gem_prime_import() relies on
563 * &drm_driver.gem_prime_import_sg_table.
564 *
565 * Note that similarly to the export helpers this permanently pins the
566 * underlying backing storage. Which is ok for scanout, but is not the best
567 * option for sharing lots of buffers for rendering.
568 */
569
570 /**
571 * drm_gem_map_attach - dma_buf attach implementation for GEM
572 * @dma_buf: buffer to attach device to
573 * @attach: buffer attachment data
574 *
575 * Calls &drm_gem_object_funcs.pin for device specific handling. This can be
576 * used as the &dma_buf_ops.attach callback. Must be used together with
577 * drm_gem_map_detach().
578 *
579 * Returns 0 on success, negative error code on failure.
580 */
581 int drm_gem_map_attach(struct dma_buf *dma_buf,
582 struct dma_buf_attachment *attach)
583 {
584 struct drm_gem_object *obj = dma_buf->priv;
585
586 return drm_gem_pin(obj);
587 }
588 EXPORT_SYMBOL(drm_gem_map_attach);
589
590 /**
591 * drm_gem_map_detach - dma_buf detach implementation for GEM
592 * @dma_buf: buffer to detach from
593 * @attach: attachment to be detached
594 *
595 * Calls &drm_gem_object_funcs.pin for device specific handling. Cleans up
596 * &dma_buf_attachment from drm_gem_map_attach(). This can be used as the
597 * &dma_buf_ops.detach callback.
598 */
599 void drm_gem_map_detach(struct dma_buf *dma_buf,
600 struct dma_buf_attachment *attach)
601 {
602 struct drm_gem_object *obj = dma_buf->priv;
603
604 drm_gem_unpin(obj);
605 }
606 EXPORT_SYMBOL(drm_gem_map_detach);
607
608 /**
609 * drm_gem_map_dma_buf - map_dma_buf implementation for GEM
610 * @attach: attachment whose scatterlist is to be returned
611 * @dir: direction of DMA transfer
612 *
613 * Calls &drm_gem_object_funcs.get_sg_table and then maps the scatterlist. This
614 * can be used as the &dma_buf_ops.map_dma_buf callback. Should be used together
615 * with drm_gem_unmap_dma_buf().
616 *
617 * Returns:sg_table containing the scatterlist to be returned; returns ERR_PTR
618 * on error. May return -EINTR if it is interrupted by a signal.
619 */
620 struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
621 enum dma_data_direction dir)
622 {
623 struct drm_gem_object *obj = attach->dmabuf->priv;
624 struct sg_table *sgt;
625
626 if (WARN_ON(dir == DMA_NONE))
627 return ERR_PTR(-EINVAL);
628
629 if (obj->funcs)
630 sgt = obj->funcs->get_sg_table(obj);
631 else
632 sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
633
634 if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
635 DMA_ATTR_SKIP_CPU_SYNC)) {
636 sg_free_table(sgt);
637 kfree(sgt);
638 sgt = ERR_PTR(-ENOMEM);
639 }
640
641 return sgt;
642 }
643 EXPORT_SYMBOL(drm_gem_map_dma_buf);
644
645 /**
646 * drm_gem_unmap_dma_buf - unmap_dma_buf implementation for GEM
647 * @attach: attachment to unmap buffer from
648 * @sgt: scatterlist info of the buffer to unmap
649 * @dir: direction of DMA transfer
650 *
651 * This can be used as the &dma_buf_ops.unmap_dma_buf callback.
652 */
653 void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
654 struct sg_table *sgt,
655 enum dma_data_direction dir)
656 {
657 if (!sgt)
658 return;
659
660 dma_unmap_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
661 DMA_ATTR_SKIP_CPU_SYNC);
662 sg_free_table(sgt);
663 kfree(sgt);
664 }
665 EXPORT_SYMBOL(drm_gem_unmap_dma_buf);
666
667 /**
668 * drm_gem_dmabuf_vmap - dma_buf vmap implementation for GEM
669 * @dma_buf: buffer to be mapped
670 *
671 * Sets up a kernel virtual mapping. This can be used as the &dma_buf_ops.vmap
672 * callback. Calls into &drm_gem_object_funcs.vmap for device specific handling.
673 *
674 * Returns the kernel virtual address or NULL on failure.
675 */
676 void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
677 {
678 struct drm_gem_object *obj = dma_buf->priv;
679 void *vaddr;
680
681 vaddr = drm_gem_vmap(obj);
682 if (IS_ERR(vaddr))
683 vaddr = NULL;
684
685 return vaddr;
686 }
687 EXPORT_SYMBOL(drm_gem_dmabuf_vmap);
688
689 /**
690 * drm_gem_dmabuf_vunmap - dma_buf vunmap implementation for GEM
691 * @dma_buf: buffer to be unmapped
692 * @vaddr: the virtual address of the buffer
693 *
694 * Releases a kernel virtual mapping. This can be used as the
695 * &dma_buf_ops.vunmap callback. Calls into &drm_gem_object_funcs.vunmap for device specific handling.
696 */
697 void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
698 {
699 struct drm_gem_object *obj = dma_buf->priv;
700
701 drm_gem_vunmap(obj, vaddr);
702 }
703 EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
704
705 /**
706 * drm_gem_prime_mmap - PRIME mmap function for GEM drivers
707 * @obj: GEM object
708 * @vma: Virtual address range
709 *
710 * This function sets up a userspace mapping for PRIME exported buffers using
711 * the same codepath that is used for regular GEM buffer mapping on the DRM fd.
712 * The fake GEM offset is added to vma->vm_pgoff and &drm_driver->fops->mmap is
713 * called to set up the mapping.
714 *
715 * Drivers can use this as their &drm_driver.gem_prime_mmap callback.
716 */
717 int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
718 {
719 struct drm_file *priv;
720 struct file *fil;
721 int ret;
722
723 /* Add the fake offset */
724 vma->vm_pgoff += drm_vma_node_start(&obj->vma_node);
725
726 if (obj->funcs && obj->funcs->mmap) {
727 ret = obj->funcs->mmap(obj, vma);
728 if (ret)
729 return ret;
730 vma->vm_private_data = obj;
731 drm_gem_object_get(obj);
732 return 0;
733 }
734
735 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
736 fil = kzalloc(sizeof(*fil), GFP_KERNEL);
737 if (!priv || !fil) {
738 ret = -ENOMEM;
739 goto out;
740 }
741
742 /* Used by drm_gem_mmap() to lookup the GEM object */
743 priv->minor = obj->dev->primary;
744 fil->private_data = priv;
745
746 ret = drm_vma_node_allow(&obj->vma_node, priv);
747 if (ret)
748 goto out;
749
750 ret = obj->dev->driver->fops->mmap(fil, vma);
751
752 drm_vma_node_revoke(&obj->vma_node, priv);
753 out:
754 kfree(priv);
755 kfree(fil);
756
757 return ret;
758 }
759 EXPORT_SYMBOL(drm_gem_prime_mmap);
760
761 /**
762 * drm_gem_dmabuf_mmap - dma_buf mmap implementation for GEM
763 * @dma_buf: buffer to be mapped
764 * @vma: virtual address range
765 *
766 * Provides memory mapping for the buffer. This can be used as the
767 * &dma_buf_ops.mmap callback. It just forwards to &drm_driver.gem_prime_mmap,
768 * which should be set to drm_gem_prime_mmap().
769 *
770 * FIXME: There's really no point to this wrapper, drivers which need anything
771 * else but drm_gem_prime_mmap can roll their own &dma_buf_ops.mmap callback.
772 *
773 * Returns 0 on success or a negative error code on failure.
774 */
775 int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
776 {
777 struct drm_gem_object *obj = dma_buf->priv;
778 struct drm_device *dev = obj->dev;
779
780 if (!dev->driver->gem_prime_mmap)
781 return -ENOSYS;
782
783 return dev->driver->gem_prime_mmap(obj, vma);
784 }
785 EXPORT_SYMBOL(drm_gem_dmabuf_mmap);
786
787 static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
788 .cache_sgt_mapping = true,
789 .attach = drm_gem_map_attach,
790 .detach = drm_gem_map_detach,
791 .map_dma_buf = drm_gem_map_dma_buf,
792 .unmap_dma_buf = drm_gem_unmap_dma_buf,
793 .release = drm_gem_dmabuf_release,
794 .mmap = drm_gem_dmabuf_mmap,
795 .vmap = drm_gem_dmabuf_vmap,
796 .vunmap = drm_gem_dmabuf_vunmap,
797 };
798
799 /**
800 * drm_prime_pages_to_sg - converts a page array into an sg list
801 * @pages: pointer to the array of page pointers to convert
802 * @nr_pages: length of the page vector
803 *
804 * This helper creates an sg table object from a set of pages
805 * the driver is responsible for mapping the pages into the
806 * importers address space for use with dma_buf itself.
807 *
808 * This is useful for implementing &drm_gem_object_funcs.get_sg_table.
809 */
810 struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages)
811 {
812 struct sg_table *sg = NULL;
813 int ret;
814
815 sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
816 if (!sg) {
817 ret = -ENOMEM;
818 goto out;
819 }
820
821 ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
822 nr_pages << PAGE_SHIFT, GFP_KERNEL);
823 if (ret)
824 goto out;
825
826 return sg;
827 out:
828 kfree(sg);
829 return ERR_PTR(ret);
830 }
831 EXPORT_SYMBOL(drm_prime_pages_to_sg);
832
833 /**
834 * drm_gem_prime_export - helper library implementation of the export callback
835 * @obj: GEM object to export
836 * @flags: flags like DRM_CLOEXEC and DRM_RDWR
837 *
838 * This is the implementation of the &drm_gem_object_funcs.export functions for GEM drivers
839 * using the PRIME helpers. It is used as the default in
840 * drm_gem_prime_handle_to_fd().
841 */
842 struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj,
843 int flags)
844 {
845 struct drm_device *dev = obj->dev;
846 struct dma_buf_export_info exp_info = {
847 .exp_name = KBUILD_MODNAME, /* white lie for debug */
848 .owner = dev->driver->fops->owner,
849 .ops = &drm_gem_prime_dmabuf_ops,
850 .size = obj->size,
851 .flags = flags,
852 .priv = obj,
853 .resv = obj->resv,
854 };
855
856 return drm_gem_dmabuf_export(dev, &exp_info);
857 }
858 EXPORT_SYMBOL(drm_gem_prime_export);
859
860 /**
861 * drm_gem_prime_import_dev - core implementation of the import callback
862 * @dev: drm_device to import into
863 * @dma_buf: dma-buf object to import
864 * @attach_dev: struct device to dma_buf attach
865 *
866 * This is the core of drm_gem_prime_import(). It's designed to be called by
867 * drivers who want to use a different device structure than &drm_device.dev for
868 * attaching via dma_buf. This function calls
869 * &drm_driver.gem_prime_import_sg_table internally.
870 *
871 * Drivers must arrange to call drm_prime_gem_destroy() from their
872 * &drm_gem_object_funcs.free hook when using this function.
873 */
874 struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev,
875 struct dma_buf *dma_buf,
876 struct device *attach_dev)
877 {
878 struct dma_buf_attachment *attach;
879 struct sg_table *sgt;
880 struct drm_gem_object *obj;
881 int ret;
882
883 if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) {
884 obj = dma_buf->priv;
885 if (obj->dev == dev) {
886 /*
887 * Importing dmabuf exported from out own gem increases
888 * refcount on gem itself instead of f_count of dmabuf.
889 */
890 drm_gem_object_get(obj);
891 return obj;
892 }
893 }
894
895 if (!dev->driver->gem_prime_import_sg_table)
896 return ERR_PTR(-EINVAL);
897
898 attach = dma_buf_attach(dma_buf, attach_dev);
899 if (IS_ERR(attach))
900 return ERR_CAST(attach);
901
902 get_dma_buf(dma_buf);
903
904 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
905 if (IS_ERR(sgt)) {
906 ret = PTR_ERR(sgt);
907 goto fail_detach;
908 }
909
910 obj = dev->driver->gem_prime_import_sg_table(dev, attach, sgt);
911 if (IS_ERR(obj)) {
912 ret = PTR_ERR(obj);
913 goto fail_unmap;
914 }
915
916 obj->import_attach = attach;
917 obj->resv = dma_buf->resv;
918
919 return obj;
920
921 fail_unmap:
922 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
923 fail_detach:
924 dma_buf_detach(dma_buf, attach);
925 dma_buf_put(dma_buf);
926
927 return ERR_PTR(ret);
928 }
929 EXPORT_SYMBOL(drm_gem_prime_import_dev);
930
931 /**
932 * drm_gem_prime_import - helper library implementation of the import callback
933 * @dev: drm_device to import into
934 * @dma_buf: dma-buf object to import
935 *
936 * This is the implementation of the gem_prime_import functions for GEM drivers
937 * using the PRIME helpers. Drivers can use this as their
938 * &drm_driver.gem_prime_import implementation. It is used as the default
939 * implementation in drm_gem_prime_fd_to_handle().
940 *
941 * Drivers must arrange to call drm_prime_gem_destroy() from their
942 * &drm_gem_object_funcs.free hook when using this function.
943 */
944 struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
945 struct dma_buf *dma_buf)
946 {
947 return drm_gem_prime_import_dev(dev, dma_buf, dev->dev);
948 }
949 EXPORT_SYMBOL(drm_gem_prime_import);
950
951 /**
952 * drm_prime_sg_to_page_addr_arrays - convert an sg table into a page array
953 * @sgt: scatter-gather table to convert
954 * @pages: optional array of page pointers to store the page array in
955 * @addrs: optional array to store the dma bus address of each page
956 * @max_entries: size of both the passed-in arrays
957 *
958 * Exports an sg table into an array of pages and addresses. This is currently
959 * required by the TTM driver in order to do correct fault handling.
960 *
961 * Drivers can use this in their &drm_driver.gem_prime_import_sg_table
962 * implementation.
963 */
964 int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
965 dma_addr_t *addrs, int max_entries)
966 {
967 unsigned count;
968 struct scatterlist *sg;
969 struct page *page;
970 u32 len, index;
971 dma_addr_t addr;
972
973 index = 0;
974 for_each_sg(sgt->sgl, sg, sgt->nents, count) {
975 len = sg->length;
976 page = sg_page(sg);
977 addr = sg_dma_address(sg);
978
979 while (len > 0) {
980 if (WARN_ON(index >= max_entries))
981 return -1;
982 if (pages)
983 pages[index] = page;
984 if (addrs)
985 addrs[index] = addr;
986
987 page++;
988 addr += PAGE_SIZE;
989 len -= PAGE_SIZE;
990 index++;
991 }
992 }
993 return 0;
994 }
995 EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
996
997 /**
998 * drm_prime_gem_destroy - helper to clean up a PRIME-imported GEM object
999 * @obj: GEM object which was created from a dma-buf
1000 * @sg: the sg-table which was pinned at import time
1001 *
1002 * This is the cleanup functions which GEM drivers need to call when they use
1003 * drm_gem_prime_import() or drm_gem_prime_import_dev() to import dma-bufs.
1004 */
1005 void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
1006 {
1007 struct dma_buf_attachment *attach;
1008 struct dma_buf *dma_buf;
1009 attach = obj->import_attach;
1010 if (sg)
1011 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
1012 dma_buf = attach->dmabuf;
1013 dma_buf_detach(attach->dmabuf, attach);
1014 /* remove the reference */
1015 dma_buf_put(dma_buf);
1016 }
1017 EXPORT_SYMBOL(drm_prime_gem_destroy);
1018