drm_prime.c revision 1.1.1.3 1 /* $NetBSD: drm_prime.c,v 1.1.1.3 2018/08/27 01:34:42 riastradh Exp $ */
2
3 /*
4 * Copyright 2012 Red Hat
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23 * IN THE SOFTWARE.
24 *
25 * Authors:
26 * Dave Airlie <airlied (at) redhat.com>
27 * Rob Clark <rob.clark (at) linaro.org>
28 *
29 */
30
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: drm_prime.c,v 1.1.1.3 2018/08/27 01:34:42 riastradh Exp $");
33
34 #include <linux/export.h>
35 #include <linux/dma-buf.h>
36 #include <drm/drmP.h>
37 #include <drm/drm_gem.h>
38
39 #include "drm_internal.h"
40
41 /*
42 * DMA-BUF/GEM Object references and lifetime overview:
43 *
44 * On the export the dma_buf holds a reference to the exporting GEM
45 * object. It takes this reference in handle_to_fd_ioctl, when it
46 * first calls .prime_export and stores the exporting GEM object in
47 * the dma_buf priv. This reference is released when the dma_buf
48 * object goes away in the driver .release function.
49 *
50 * On the import the importing GEM object holds a reference to the
51 * dma_buf (which in turn holds a ref to the exporting GEM object).
52 * It takes that reference in the fd_to_handle ioctl.
53 * It calls dma_buf_get, creates an attachment to it and stores the
54 * attachment in the GEM object. When this attachment is destroyed
55 * when the imported object is destroyed, we remove the attachment
56 * and drop the reference to the dma_buf.
57 *
58 * Thus the chain of references always flows in one direction
59 * (avoiding loops): importing_gem -> dmabuf -> exporting_gem
60 *
61 * Self-importing: if userspace is using PRIME as a replacement for flink
62 * then it will get a fd->handle request for a GEM object that it created.
63 * Drivers should detect this situation and return back the gem object
64 * from the dma-buf private. Prime will do this automatically for drivers that
65 * use the drm_gem_prime_{import,export} helpers.
66 */
67
68 struct drm_prime_member {
69 struct list_head entry;
70 struct dma_buf *dma_buf;
71 uint32_t handle;
72 };
73
74 struct drm_prime_attachment {
75 struct sg_table *sgt;
76 enum dma_data_direction dir;
77 };
78
79 static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
80 struct dma_buf *dma_buf, uint32_t handle)
81 {
82 struct drm_prime_member *member;
83
84 member = kmalloc(sizeof(*member), GFP_KERNEL);
85 if (!member)
86 return -ENOMEM;
87
88 get_dma_buf(dma_buf);
89 member->dma_buf = dma_buf;
90 member->handle = handle;
91 list_add(&member->entry, &prime_fpriv->head);
92 return 0;
93 }
94
95 static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv,
96 uint32_t handle)
97 {
98 struct drm_prime_member *member;
99
100 list_for_each_entry(member, &prime_fpriv->head, entry) {
101 if (member->handle == handle)
102 return member->dma_buf;
103 }
104
105 return NULL;
106 }
107
108 static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv,
109 struct dma_buf *dma_buf,
110 uint32_t *handle)
111 {
112 struct drm_prime_member *member;
113
114 list_for_each_entry(member, &prime_fpriv->head, entry) {
115 if (member->dma_buf == dma_buf) {
116 *handle = member->handle;
117 return 0;
118 }
119 }
120 return -ENOENT;
121 }
122
123 static int drm_gem_map_attach(struct dma_buf *dma_buf,
124 struct device *target_dev,
125 struct dma_buf_attachment *attach)
126 {
127 struct drm_prime_attachment *prime_attach;
128 struct drm_gem_object *obj = dma_buf->priv;
129 struct drm_device *dev = obj->dev;
130
131 prime_attach = kzalloc(sizeof(*prime_attach), GFP_KERNEL);
132 if (!prime_attach)
133 return -ENOMEM;
134
135 prime_attach->dir = DMA_NONE;
136 attach->priv = prime_attach;
137
138 if (!dev->driver->gem_prime_pin)
139 return 0;
140
141 return dev->driver->gem_prime_pin(obj);
142 }
143
144 static void drm_gem_map_detach(struct dma_buf *dma_buf,
145 struct dma_buf_attachment *attach)
146 {
147 struct drm_prime_attachment *prime_attach = attach->priv;
148 struct drm_gem_object *obj = dma_buf->priv;
149 struct drm_device *dev = obj->dev;
150 struct sg_table *sgt;
151
152 if (dev->driver->gem_prime_unpin)
153 dev->driver->gem_prime_unpin(obj);
154
155 if (!prime_attach)
156 return;
157
158 sgt = prime_attach->sgt;
159 if (sgt) {
160 if (prime_attach->dir != DMA_NONE)
161 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
162 prime_attach->dir);
163 sg_free_table(sgt);
164 }
165
166 kfree(sgt);
167 kfree(prime_attach);
168 attach->priv = NULL;
169 }
170
171 void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
172 struct dma_buf *dma_buf)
173 {
174 struct drm_prime_member *member, *safe;
175
176 list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) {
177 if (member->dma_buf == dma_buf) {
178 dma_buf_put(dma_buf);
179 list_del(&member->entry);
180 kfree(member);
181 }
182 }
183 }
184
185 static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
186 enum dma_data_direction dir)
187 {
188 struct drm_prime_attachment *prime_attach = attach->priv;
189 struct drm_gem_object *obj = attach->dmabuf->priv;
190 struct sg_table *sgt;
191
192 if (WARN_ON(dir == DMA_NONE || !prime_attach))
193 return ERR_PTR(-EINVAL);
194
195 /* return the cached mapping when possible */
196 if (prime_attach->dir == dir)
197 return prime_attach->sgt;
198
199 /*
200 * two mappings with different directions for the same attachment are
201 * not allowed
202 */
203 if (WARN_ON(prime_attach->dir != DMA_NONE))
204 return ERR_PTR(-EBUSY);
205
206 sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
207
208 if (!IS_ERR(sgt)) {
209 if (!dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir)) {
210 sg_free_table(sgt);
211 kfree(sgt);
212 sgt = ERR_PTR(-ENOMEM);
213 } else {
214 prime_attach->sgt = sgt;
215 prime_attach->dir = dir;
216 }
217 }
218
219 return sgt;
220 }
221
222 static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
223 struct sg_table *sgt,
224 enum dma_data_direction dir)
225 {
226 /* nothing to be done here */
227 }
228
229 /**
230 * drm_gem_dmabuf_release - dma_buf release implementation for GEM
231 * @dma_buf: buffer to be released
232 *
233 * Generic release function for dma_bufs exported as PRIME buffers. GEM drivers
234 * must use this in their dma_buf ops structure as the release callback.
235 */
236 void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
237 {
238 struct drm_gem_object *obj = dma_buf->priv;
239
240 /* drop the reference on the export fd holds */
241 drm_gem_object_unreference_unlocked(obj);
242 }
243 EXPORT_SYMBOL(drm_gem_dmabuf_release);
244
245 static void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
246 {
247 struct drm_gem_object *obj = dma_buf->priv;
248 struct drm_device *dev = obj->dev;
249
250 return dev->driver->gem_prime_vmap(obj);
251 }
252
253 static void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
254 {
255 struct drm_gem_object *obj = dma_buf->priv;
256 struct drm_device *dev = obj->dev;
257
258 dev->driver->gem_prime_vunmap(obj, vaddr);
259 }
260
261 static void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
262 unsigned long page_num)
263 {
264 return NULL;
265 }
266
267 static void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
268 unsigned long page_num, void *addr)
269 {
270
271 }
272 static void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf,
273 unsigned long page_num)
274 {
275 return NULL;
276 }
277
278 static void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
279 unsigned long page_num, void *addr)
280 {
281
282 }
283
284 static int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf,
285 struct vm_area_struct *vma)
286 {
287 struct drm_gem_object *obj = dma_buf->priv;
288 struct drm_device *dev = obj->dev;
289
290 if (!dev->driver->gem_prime_mmap)
291 return -ENOSYS;
292
293 return dev->driver->gem_prime_mmap(obj, vma);
294 }
295
296 static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
297 .attach = drm_gem_map_attach,
298 .detach = drm_gem_map_detach,
299 .map_dma_buf = drm_gem_map_dma_buf,
300 .unmap_dma_buf = drm_gem_unmap_dma_buf,
301 .release = drm_gem_dmabuf_release,
302 .kmap = drm_gem_dmabuf_kmap,
303 .kmap_atomic = drm_gem_dmabuf_kmap_atomic,
304 .kunmap = drm_gem_dmabuf_kunmap,
305 .kunmap_atomic = drm_gem_dmabuf_kunmap_atomic,
306 .mmap = drm_gem_dmabuf_mmap,
307 .vmap = drm_gem_dmabuf_vmap,
308 .vunmap = drm_gem_dmabuf_vunmap,
309 };
310
311 /**
312 * DOC: PRIME Helpers
313 *
314 * Drivers can implement @gem_prime_export and @gem_prime_import in terms of
315 * simpler APIs by using the helper functions @drm_gem_prime_export and
316 * @drm_gem_prime_import. These functions implement dma-buf support in terms of
317 * six lower-level driver callbacks:
318 *
319 * Export callbacks:
320 *
321 * - @gem_prime_pin (optional): prepare a GEM object for exporting
322 *
323 * - @gem_prime_get_sg_table: provide a scatter/gather table of pinned pages
324 *
325 * - @gem_prime_vmap: vmap a buffer exported by your driver
326 *
327 * - @gem_prime_vunmap: vunmap a buffer exported by your driver
328 *
329 * - @gem_prime_mmap (optional): mmap a buffer exported by your driver
330 *
331 * Import callback:
332 *
333 * - @gem_prime_import_sg_table (import): produce a GEM object from another
334 * driver's scatter/gather table
335 */
336
337 /**
338 * drm_gem_prime_export - helper library implementation of the export callback
339 * @dev: drm_device to export from
340 * @obj: GEM object to export
341 * @flags: flags like DRM_CLOEXEC
342 *
343 * This is the implementation of the gem_prime_export functions for GEM drivers
344 * using the PRIME helpers.
345 */
346 struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
347 struct drm_gem_object *obj,
348 int flags)
349 {
350 struct dma_buf_export_info exp_info = {
351 .exp_name = KBUILD_MODNAME, /* white lie for debug */
352 .owner = dev->driver->fops->owner,
353 .ops = &drm_gem_prime_dmabuf_ops,
354 .size = obj->size,
355 .flags = flags,
356 .priv = obj,
357 };
358
359 if (dev->driver->gem_prime_res_obj)
360 exp_info.resv = dev->driver->gem_prime_res_obj(obj);
361
362 return dma_buf_export(&exp_info);
363 }
364 EXPORT_SYMBOL(drm_gem_prime_export);
365
366 static struct dma_buf *export_and_register_object(struct drm_device *dev,
367 struct drm_gem_object *obj,
368 uint32_t flags)
369 {
370 struct dma_buf *dmabuf;
371
372 /* prevent races with concurrent gem_close. */
373 if (obj->handle_count == 0) {
374 dmabuf = ERR_PTR(-ENOENT);
375 return dmabuf;
376 }
377
378 dmabuf = dev->driver->gem_prime_export(dev, obj, flags);
379 if (IS_ERR(dmabuf)) {
380 /* normally the created dma-buf takes ownership of the ref,
381 * but if that fails then drop the ref
382 */
383 return dmabuf;
384 }
385
386 /*
387 * Note that callers do not need to clean up the export cache
388 * since the check for obj->handle_count guarantees that someone
389 * will clean it up.
390 */
391 obj->dma_buf = dmabuf;
392 get_dma_buf(obj->dma_buf);
393 /* Grab a new ref since the callers is now used by the dma-buf */
394 drm_gem_object_reference(obj);
395
396 return dmabuf;
397 }
398
399 /**
400 * drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers
401 * @dev: dev to export the buffer from
402 * @file_priv: drm file-private structure
403 * @handle: buffer handle to export
404 * @flags: flags like DRM_CLOEXEC
405 * @prime_fd: pointer to storage for the fd id of the create dma-buf
406 *
407 * This is the PRIME export function which must be used mandatorily by GEM
408 * drivers to ensure correct lifetime management of the underlying GEM object.
409 * The actual exporting from GEM object to a dma-buf is done through the
410 * gem_prime_export driver callback.
411 */
412 int drm_gem_prime_handle_to_fd(struct drm_device *dev,
413 struct drm_file *file_priv, uint32_t handle,
414 uint32_t flags,
415 int *prime_fd)
416 {
417 struct drm_gem_object *obj;
418 int ret = 0;
419 struct dma_buf *dmabuf;
420
421 mutex_lock(&file_priv->prime.lock);
422 obj = drm_gem_object_lookup(dev, file_priv, handle);
423 if (!obj) {
424 ret = -ENOENT;
425 goto out_unlock;
426 }
427
428 dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle);
429 if (dmabuf) {
430 get_dma_buf(dmabuf);
431 goto out_have_handle;
432 }
433
434 mutex_lock(&dev->object_name_lock);
435 /* re-export the original imported object */
436 if (obj->import_attach) {
437 dmabuf = obj->import_attach->dmabuf;
438 get_dma_buf(dmabuf);
439 goto out_have_obj;
440 }
441
442 if (obj->dma_buf) {
443 get_dma_buf(obj->dma_buf);
444 dmabuf = obj->dma_buf;
445 goto out_have_obj;
446 }
447
448 dmabuf = export_and_register_object(dev, obj, flags);
449 if (IS_ERR(dmabuf)) {
450 /* normally the created dma-buf takes ownership of the ref,
451 * but if that fails then drop the ref
452 */
453 ret = PTR_ERR(dmabuf);
454 mutex_unlock(&dev->object_name_lock);
455 goto out;
456 }
457
458 out_have_obj:
459 /*
460 * If we've exported this buffer then cheat and add it to the import list
461 * so we get the correct handle back. We must do this under the
462 * protection of dev->object_name_lock to ensure that a racing gem close
463 * ioctl doesn't miss to remove this buffer handle from the cache.
464 */
465 ret = drm_prime_add_buf_handle(&file_priv->prime,
466 dmabuf, handle);
467 mutex_unlock(&dev->object_name_lock);
468 if (ret)
469 goto fail_put_dmabuf;
470
471 out_have_handle:
472 ret = dma_buf_fd(dmabuf, flags);
473 /*
474 * We must _not_ remove the buffer from the handle cache since the newly
475 * created dma buf is already linked in the global obj->dma_buf pointer,
476 * and that is invariant as long as a userspace gem handle exists.
477 * Closing the handle will clean out the cache anyway, so we don't leak.
478 */
479 if (ret < 0) {
480 goto fail_put_dmabuf;
481 } else {
482 *prime_fd = ret;
483 ret = 0;
484 }
485
486 goto out;
487
488 fail_put_dmabuf:
489 dma_buf_put(dmabuf);
490 out:
491 drm_gem_object_unreference_unlocked(obj);
492 out_unlock:
493 mutex_unlock(&file_priv->prime.lock);
494
495 return ret;
496 }
497 EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
498
499 /**
500 * drm_gem_prime_import - helper library implementation of the import callback
501 * @dev: drm_device to import into
502 * @dma_buf: dma-buf object to import
503 *
504 * This is the implementation of the gem_prime_import functions for GEM drivers
505 * using the PRIME helpers.
506 */
507 struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
508 struct dma_buf *dma_buf)
509 {
510 struct dma_buf_attachment *attach;
511 struct sg_table *sgt;
512 struct drm_gem_object *obj;
513 int ret;
514
515 if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) {
516 obj = dma_buf->priv;
517 if (obj->dev == dev) {
518 /*
519 * Importing dmabuf exported from out own gem increases
520 * refcount on gem itself instead of f_count of dmabuf.
521 */
522 drm_gem_object_reference(obj);
523 return obj;
524 }
525 }
526
527 if (!dev->driver->gem_prime_import_sg_table)
528 return ERR_PTR(-EINVAL);
529
530 attach = dma_buf_attach(dma_buf, dev->dev);
531 if (IS_ERR(attach))
532 return ERR_CAST(attach);
533
534 get_dma_buf(dma_buf);
535
536 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
537 if (IS_ERR(sgt)) {
538 ret = PTR_ERR(sgt);
539 goto fail_detach;
540 }
541
542 obj = dev->driver->gem_prime_import_sg_table(dev, attach, sgt);
543 if (IS_ERR(obj)) {
544 ret = PTR_ERR(obj);
545 goto fail_unmap;
546 }
547
548 obj->import_attach = attach;
549
550 return obj;
551
552 fail_unmap:
553 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
554 fail_detach:
555 dma_buf_detach(dma_buf, attach);
556 dma_buf_put(dma_buf);
557
558 return ERR_PTR(ret);
559 }
560 EXPORT_SYMBOL(drm_gem_prime_import);
561
562 /**
563 * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers
564 * @dev: dev to export the buffer from
565 * @file_priv: drm file-private structure
566 * @prime_fd: fd id of the dma-buf which should be imported
567 * @handle: pointer to storage for the handle of the imported buffer object
568 *
569 * This is the PRIME import function which must be used mandatorily by GEM
570 * drivers to ensure correct lifetime management of the underlying GEM object.
571 * The actual importing of GEM object from the dma-buf is done through the
572 * gem_import_export driver callback.
573 */
574 int drm_gem_prime_fd_to_handle(struct drm_device *dev,
575 struct drm_file *file_priv, int prime_fd,
576 uint32_t *handle)
577 {
578 struct dma_buf *dma_buf;
579 struct drm_gem_object *obj;
580 int ret;
581
582 dma_buf = dma_buf_get(prime_fd);
583 if (IS_ERR(dma_buf))
584 return PTR_ERR(dma_buf);
585
586 mutex_lock(&file_priv->prime.lock);
587
588 ret = drm_prime_lookup_buf_handle(&file_priv->prime,
589 dma_buf, handle);
590 if (ret == 0)
591 goto out_put;
592
593 /* never seen this one, need to import */
594 mutex_lock(&dev->object_name_lock);
595 obj = dev->driver->gem_prime_import(dev, dma_buf);
596 if (IS_ERR(obj)) {
597 ret = PTR_ERR(obj);
598 goto out_unlock;
599 }
600
601 if (obj->dma_buf) {
602 WARN_ON(obj->dma_buf != dma_buf);
603 } else {
604 obj->dma_buf = dma_buf;
605 get_dma_buf(dma_buf);
606 }
607
608 /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
609 ret = drm_gem_handle_create_tail(file_priv, obj, handle);
610 drm_gem_object_unreference_unlocked(obj);
611 if (ret)
612 goto out_put;
613
614 ret = drm_prime_add_buf_handle(&file_priv->prime,
615 dma_buf, *handle);
616 if (ret)
617 goto fail;
618
619 mutex_unlock(&file_priv->prime.lock);
620
621 dma_buf_put(dma_buf);
622
623 return 0;
624
625 fail:
626 /* hmm, if driver attached, we are relying on the free-object path
627 * to detach.. which seems ok..
628 */
629 drm_gem_handle_delete(file_priv, *handle);
630 out_unlock:
631 mutex_unlock(&dev->object_name_lock);
632 out_put:
633 dma_buf_put(dma_buf);
634 mutex_unlock(&file_priv->prime.lock);
635 return ret;
636 }
637 EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
638
639 int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
640 struct drm_file *file_priv)
641 {
642 struct drm_prime_handle *args = data;
643 uint32_t flags;
644
645 if (!drm_core_check_feature(dev, DRIVER_PRIME))
646 return -EINVAL;
647
648 if (!dev->driver->prime_handle_to_fd)
649 return -ENOSYS;
650
651 /* check flags are valid */
652 if (args->flags & ~DRM_CLOEXEC)
653 return -EINVAL;
654
655 /* we only want to pass DRM_CLOEXEC which is == O_CLOEXEC */
656 flags = args->flags & DRM_CLOEXEC;
657
658 return dev->driver->prime_handle_to_fd(dev, file_priv,
659 args->handle, flags, &args->fd);
660 }
661
662 int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
663 struct drm_file *file_priv)
664 {
665 struct drm_prime_handle *args = data;
666
667 if (!drm_core_check_feature(dev, DRIVER_PRIME))
668 return -EINVAL;
669
670 if (!dev->driver->prime_fd_to_handle)
671 return -ENOSYS;
672
673 return dev->driver->prime_fd_to_handle(dev, file_priv,
674 args->fd, &args->handle);
675 }
676
677 /**
678 * drm_prime_pages_to_sg - converts a page array into an sg list
679 * @pages: pointer to the array of page pointers to convert
680 * @nr_pages: length of the page vector
681 *
682 * This helper creates an sg table object from a set of pages
683 * the driver is responsible for mapping the pages into the
684 * importers address space for use with dma_buf itself.
685 */
686 struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages)
687 {
688 struct sg_table *sg = NULL;
689 int ret;
690
691 sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
692 if (!sg) {
693 ret = -ENOMEM;
694 goto out;
695 }
696
697 ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
698 nr_pages << PAGE_SHIFT, GFP_KERNEL);
699 if (ret)
700 goto out;
701
702 return sg;
703 out:
704 kfree(sg);
705 return ERR_PTR(ret);
706 }
707 EXPORT_SYMBOL(drm_prime_pages_to_sg);
708
709 /**
710 * drm_prime_sg_to_page_addr_arrays - convert an sg table into a page array
711 * @sgt: scatter-gather table to convert
712 * @pages: array of page pointers to store the page array in
713 * @addrs: optional array to store the dma bus address of each page
714 * @max_pages: size of both the passed-in arrays
715 *
716 * Exports an sg table into an array of pages and addresses. This is currently
717 * required by the TTM driver in order to do correct fault handling.
718 */
719 int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
720 dma_addr_t *addrs, int max_pages)
721 {
722 unsigned count;
723 struct scatterlist *sg;
724 struct page *page;
725 u32 len;
726 int pg_index;
727 dma_addr_t addr;
728
729 pg_index = 0;
730 for_each_sg(sgt->sgl, sg, sgt->nents, count) {
731 len = sg->length;
732 page = sg_page(sg);
733 addr = sg_dma_address(sg);
734
735 while (len > 0) {
736 if (WARN_ON(pg_index >= max_pages))
737 return -1;
738 pages[pg_index] = page;
739 if (addrs)
740 addrs[pg_index] = addr;
741
742 page++;
743 addr += PAGE_SIZE;
744 len -= PAGE_SIZE;
745 pg_index++;
746 }
747 }
748 return 0;
749 }
750 EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
751
752 /**
753 * drm_prime_gem_destroy - helper to clean up a PRIME-imported GEM object
754 * @obj: GEM object which was created from a dma-buf
755 * @sg: the sg-table which was pinned at import time
756 *
757 * This is the cleanup functions which GEM drivers need to call when they use
758 * @drm_gem_prime_import to import dma-bufs.
759 */
760 void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
761 {
762 struct dma_buf_attachment *attach;
763 struct dma_buf *dma_buf;
764 attach = obj->import_attach;
765 if (sg)
766 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
767 dma_buf = attach->dmabuf;
768 dma_buf_detach(attach->dmabuf, attach);
769 /* remove the reference */
770 dma_buf_put(dma_buf);
771 }
772 EXPORT_SYMBOL(drm_prime_gem_destroy);
773
774 void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
775 {
776 INIT_LIST_HEAD(&prime_fpriv->head);
777 mutex_init(&prime_fpriv->lock);
778 }
779
780 void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
781 {
782 /* by now drm_gem_release should've made sure the list is empty */
783 WARN_ON(!list_empty(&prime_fpriv->head));
784 }
785