drm_prime.c revision 1.1.1.2.30.1 1 /* $NetBSD: drm_prime.c,v 1.1.1.2.30.1 2019/06/10 22:07:57 christos Exp $ */
2
3 /*
4 * Copyright 2012 Red Hat
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23 * IN THE SOFTWARE.
24 *
25 * Authors:
26 * Dave Airlie <airlied (at) redhat.com>
27 * Rob Clark <rob.clark (at) linaro.org>
28 *
29 */
30
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: drm_prime.c,v 1.1.1.2.30.1 2019/06/10 22:07:57 christos Exp $");
33
34 #include <linux/export.h>
35 #include <linux/dma-buf.h>
36 #include <drm/drmP.h>
37 #include <drm/drm_gem.h>
38
39 #include "drm_internal.h"
40
41 #ifdef __NetBSD__
42
43 #include <drm/bus_dma_hacks.h>
44
45 /*
46 * We use struct sg_table just to pass around an array of pages from
47 * one device to another in drm prime. Since this is _not_ a complete
48 * implementation of Linux's sg table abstraction (e.g., it does not
49 * remember DMA addresses and RAM pages separately, and it doesn't
50 * support the nested chained iteration of Linux scatterlists), we
51 * isolate it to this file and make all callers go through a few extra
52 * subroutines (drm_prime_sg_size, drm_prime_sg_free, &c.) to use it.
53 * Don't use this outside drm prime!
54 */
55
56 struct sg_table {
57 paddr_t *sgt_pgs;
58 unsigned sgt_npgs;
59 };
60
61 static int
62 sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
63 unsigned npages, bus_size_t offset, bus_size_t size, gfp_t gfp)
64 {
65 unsigned i;
66
67 KASSERT(offset == 0);
68 KASSERT(size == npages << PAGE_SHIFT);
69
70 sgt->sgt_pgs = kcalloc(npages, sizeof(sgt->sgt_pgs[0]), gfp);
71 if (sgt->sgt_pgs == NULL)
72 return -ENOMEM;
73 sgt->sgt_npgs = npages;
74
75 for (i = 0; i < npages; i++)
76 sgt->sgt_pgs[i] = VM_PAGE_TO_PHYS(&pages[i]->p_vmp);
77
78 return 0;
79 }
80
81 static int
82 sg_alloc_table_from_pglist(struct sg_table *sgt, const struct pglist *pglist,
83 unsigned npages, bus_size_t offset, bus_size_t size, gfp_t gfp)
84 {
85 struct vm_page *pg;
86 unsigned i;
87
88 KASSERT(offset == 0);
89 KASSERT(size == npages << PAGE_SHIFT);
90
91 sgt->sgt_pgs = kcalloc(npages, sizeof(sgt->sgt_pgs[0]), gfp);
92 if (sgt->sgt_pgs == NULL)
93 return -ENOMEM;
94 sgt->sgt_npgs = npages;
95
96 i = 0;
97 TAILQ_FOREACH(pg, pglist, pageq.queue) {
98 KASSERT(i < npages);
99 sgt->sgt_pgs[i] = VM_PAGE_TO_PHYS(pg);
100 }
101 KASSERT(i == npages);
102
103 return 0;
104 }
105
106 static int
107 sg_alloc_table_from_bus_dmamem(struct sg_table *sgt, bus_dma_tag_t dmat,
108 const bus_dma_segment_t *segs, int nsegs, gfp_t gfp)
109 {
110 int ret;
111
112 KASSERT(nsegs > 0);
113 sgt->sgt_pgs = kcalloc(nsegs, sizeof(sgt->sgt_pgs[0]), gfp);
114 if (sgt->sgt_pgs == NULL)
115 return -ENOMEM;
116 sgt->sgt_npgs = nsegs;
117
118 /* XXX errno NetBSD->Linux */
119 ret = -bus_dmamem_export_pages(dmat, segs, nsegs, sgt->sgt_pgs,
120 sgt->sgt_npgs);
121 if (ret)
122 return ret;
123
124 return 0;
125 }
126
127 static void
128 sg_free_table(struct sg_table *sgt)
129 {
130
131 kfree(sgt->sgt_pgs);
132 sgt->sgt_pgs = NULL;
133 sgt->sgt_npgs = 0;
134 }
135
136 #endif /* __NetBSD__ */
137
138 /*
139 * DMA-BUF/GEM Object references and lifetime overview:
140 *
141 * On the export the dma_buf holds a reference to the exporting GEM
142 * object. It takes this reference in handle_to_fd_ioctl, when it
143 * first calls .prime_export and stores the exporting GEM object in
144 * the dma_buf priv. This reference is released when the dma_buf
145 * object goes away in the driver .release function.
146 *
147 * On the import the importing GEM object holds a reference to the
148 * dma_buf (which in turn holds a ref to the exporting GEM object).
149 * It takes that reference in the fd_to_handle ioctl.
150 * It calls dma_buf_get, creates an attachment to it and stores the
151 * attachment in the GEM object. When this attachment is destroyed
152 * when the imported object is destroyed, we remove the attachment
153 * and drop the reference to the dma_buf.
154 *
155 * Thus the chain of references always flows in one direction
156 * (avoiding loops): importing_gem -> dmabuf -> exporting_gem
157 *
158 * Self-importing: if userspace is using PRIME as a replacement for flink
159 * then it will get a fd->handle request for a GEM object that it created.
160 * Drivers should detect this situation and return back the gem object
161 * from the dma-buf private. Prime will do this automatically for drivers that
162 * use the drm_gem_prime_{import,export} helpers.
163 */
164
165 struct drm_prime_member {
166 struct list_head entry;
167 struct dma_buf *dma_buf;
168 uint32_t handle;
169 };
170
171 struct drm_prime_attachment {
172 struct sg_table *sgt;
173 enum dma_data_direction dir;
174 };
175
176 static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
177 struct dma_buf *dma_buf, uint32_t handle)
178 {
179 struct drm_prime_member *member;
180
181 member = kmalloc(sizeof(*member), GFP_KERNEL);
182 if (!member)
183 return -ENOMEM;
184
185 get_dma_buf(dma_buf);
186 member->dma_buf = dma_buf;
187 member->handle = handle;
188 list_add(&member->entry, &prime_fpriv->head);
189 return 0;
190 }
191
192 static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv,
193 uint32_t handle)
194 {
195 struct drm_prime_member *member;
196
197 list_for_each_entry(member, &prime_fpriv->head, entry) {
198 if (member->handle == handle)
199 return member->dma_buf;
200 }
201
202 return NULL;
203 }
204
205 static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv,
206 struct dma_buf *dma_buf,
207 uint32_t *handle)
208 {
209 struct drm_prime_member *member;
210
211 list_for_each_entry(member, &prime_fpriv->head, entry) {
212 if (member->dma_buf == dma_buf) {
213 *handle = member->handle;
214 return 0;
215 }
216 }
217 return -ENOENT;
218 }
219
220 static int drm_gem_map_attach(struct dma_buf *dma_buf,
221 struct device *target_dev,
222 struct dma_buf_attachment *attach)
223 {
224 struct drm_prime_attachment *prime_attach;
225 struct drm_gem_object *obj = dma_buf->priv;
226 struct drm_device *dev = obj->dev;
227
228 prime_attach = kzalloc(sizeof(*prime_attach), GFP_KERNEL);
229 if (!prime_attach)
230 return -ENOMEM;
231
232 prime_attach->dir = DMA_NONE;
233 attach->priv = prime_attach;
234
235 if (!dev->driver->gem_prime_pin)
236 return 0;
237
238 return dev->driver->gem_prime_pin(obj);
239 }
240
241 static void drm_gem_map_detach(struct dma_buf *dma_buf,
242 struct dma_buf_attachment *attach)
243 {
244 struct drm_prime_attachment *prime_attach = attach->priv;
245 struct drm_gem_object *obj = dma_buf->priv;
246 struct drm_device *dev = obj->dev;
247 struct sg_table *sgt;
248
249 if (dev->driver->gem_prime_unpin)
250 dev->driver->gem_prime_unpin(obj);
251
252 if (!prime_attach)
253 return;
254
255 sgt = prime_attach->sgt;
256 if (sgt) {
257 #ifndef __NetBSD__ /* We map/unmap elsewhere. */
258 if (prime_attach->dir != DMA_NONE)
259 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
260 prime_attach->dir);
261 #endif
262 sg_free_table(sgt);
263 }
264
265 kfree(sgt);
266 kfree(prime_attach);
267 attach->priv = NULL;
268 }
269
270 void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
271 struct dma_buf *dma_buf)
272 {
273 struct drm_prime_member *member, *safe;
274
275 list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) {
276 if (member->dma_buf == dma_buf) {
277 dma_buf_put(dma_buf);
278 list_del(&member->entry);
279 kfree(member);
280 }
281 }
282 }
283
284 static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
285 enum dma_data_direction dir)
286 {
287 struct drm_prime_attachment *prime_attach = attach->priv;
288 struct drm_gem_object *obj = attach->dmabuf->priv;
289 struct sg_table *sgt;
290
291 if (WARN_ON(dir == DMA_NONE || !prime_attach))
292 return ERR_PTR(-EINVAL);
293
294 /* return the cached mapping when possible */
295 if (prime_attach->dir == dir)
296 return prime_attach->sgt;
297
298 /*
299 * two mappings with different directions for the same attachment are
300 * not allowed
301 */
302 if (WARN_ON(prime_attach->dir != DMA_NONE))
303 return ERR_PTR(-EBUSY);
304
305 sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
306 if (!IS_ERR(sgt)) {
307 #ifdef __NetBSD__ /* We map/unmap elsewhere. */
308 prime_attach->sgt = sgt;
309 prime_attach->dir = dir;
310 #else
311 if (!dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir)) {
312 sg_free_table(sgt);
313 kfree(sgt);
314 sgt = ERR_PTR(-ENOMEM);
315 } else {
316 prime_attach->sgt = sgt;
317 prime_attach->dir = dir;
318 }
319 #endif
320 }
321
322 return sgt;
323 }
324
325 static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
326 struct sg_table *sgt,
327 enum dma_data_direction dir)
328 {
329 /* nothing to be done here */
330 }
331
332 /**
333 * drm_gem_dmabuf_release - dma_buf release implementation for GEM
334 * @dma_buf: buffer to be released
335 *
336 * Generic release function for dma_bufs exported as PRIME buffers. GEM drivers
337 * must use this in their dma_buf ops structure as the release callback.
338 */
339 void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
340 {
341 struct drm_gem_object *obj = dma_buf->priv;
342
343 /* drop the reference on the export fd holds */
344 drm_gem_object_unreference_unlocked(obj);
345 }
346 EXPORT_SYMBOL(drm_gem_dmabuf_release);
347
348 static void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
349 {
350 struct drm_gem_object *obj = dma_buf->priv;
351 struct drm_device *dev = obj->dev;
352
353 return dev->driver->gem_prime_vmap(obj);
354 }
355
356 static void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
357 {
358 struct drm_gem_object *obj = dma_buf->priv;
359 struct drm_device *dev = obj->dev;
360
361 dev->driver->gem_prime_vunmap(obj, vaddr);
362 }
363
364 static void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
365 unsigned long page_num)
366 {
367 return NULL;
368 }
369
370 static void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
371 unsigned long page_num, void *addr)
372 {
373
374 }
375 static void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf,
376 unsigned long page_num)
377 {
378 return NULL;
379 }
380
381 static void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
382 unsigned long page_num, void *addr)
383 {
384
385 }
386
387 #ifdef __NetBSD__
388 static int
389 drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, off_t *offp, size_t size,
390 int prot, int *flagsp, int *advicep, struct uvm_object **uobjp,
391 int *maxprotp)
392 #else
393 static int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf,
394 struct vm_area_struct *vma)
395 #endif
396 {
397 struct drm_gem_object *obj = dma_buf->priv;
398 struct drm_device *dev = obj->dev;
399
400 if (!dev->driver->gem_prime_mmap)
401 return -ENOSYS;
402
403 #ifdef __NetBSD__
404 return dev->driver->gem_prime_mmap(obj, offp, size, prot, flagsp,
405 advicep, uobjp, maxprotp);
406 #else
407 return dev->driver->gem_prime_mmap(obj, vma);
408 #endif
409 }
410
411 static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
412 .attach = drm_gem_map_attach,
413 .detach = drm_gem_map_detach,
414 .map_dma_buf = drm_gem_map_dma_buf,
415 .unmap_dma_buf = drm_gem_unmap_dma_buf,
416 .release = drm_gem_dmabuf_release,
417 .kmap = drm_gem_dmabuf_kmap,
418 .kmap_atomic = drm_gem_dmabuf_kmap_atomic,
419 .kunmap = drm_gem_dmabuf_kunmap,
420 .kunmap_atomic = drm_gem_dmabuf_kunmap_atomic,
421 .mmap = drm_gem_dmabuf_mmap,
422 .vmap = drm_gem_dmabuf_vmap,
423 .vunmap = drm_gem_dmabuf_vunmap,
424 };
425
426 /**
427 * DOC: PRIME Helpers
428 *
429 * Drivers can implement @gem_prime_export and @gem_prime_import in terms of
430 * simpler APIs by using the helper functions @drm_gem_prime_export and
431 * @drm_gem_prime_import. These functions implement dma-buf support in terms of
432 * six lower-level driver callbacks:
433 *
434 * Export callbacks:
435 *
436 * - @gem_prime_pin (optional): prepare a GEM object for exporting
437 *
438 * - @gem_prime_get_sg_table: provide a scatter/gather table of pinned pages
439 *
440 * - @gem_prime_vmap: vmap a buffer exported by your driver
441 *
442 * - @gem_prime_vunmap: vunmap a buffer exported by your driver
443 *
444 * - @gem_prime_mmap (optional): mmap a buffer exported by your driver
445 *
446 * Import callback:
447 *
448 * - @gem_prime_import_sg_table (import): produce a GEM object from another
449 * driver's scatter/gather table
450 */
451
452 /**
453 * drm_gem_prime_export - helper library implementation of the export callback
454 * @dev: drm_device to export from
455 * @obj: GEM object to export
456 * @flags: flags like DRM_CLOEXEC
457 *
458 * This is the implementation of the gem_prime_export functions for GEM drivers
459 * using the PRIME helpers.
460 */
461 struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
462 struct drm_gem_object *obj,
463 int flags)
464 {
465 struct dma_buf_export_info exp_info = {
466 #ifndef __NetBSD__
467 .exp_name = KBUILD_MODNAME, /* white lie for debug */
468 .owner = dev->driver->fops->owner,
469 #endif
470 .ops = &drm_gem_prime_dmabuf_ops,
471 .size = obj->size,
472 .flags = flags,
473 .priv = obj,
474 };
475
476 if (dev->driver->gem_prime_res_obj)
477 exp_info.resv = dev->driver->gem_prime_res_obj(obj);
478
479 return dma_buf_export(&exp_info);
480 }
481 EXPORT_SYMBOL(drm_gem_prime_export);
482
483 static struct dma_buf *export_and_register_object(struct drm_device *dev,
484 struct drm_gem_object *obj,
485 uint32_t flags)
486 {
487 struct dma_buf *dmabuf;
488
489 /* prevent races with concurrent gem_close. */
490 if (obj->handle_count == 0) {
491 dmabuf = ERR_PTR(-ENOENT);
492 return dmabuf;
493 }
494
495 dmabuf = dev->driver->gem_prime_export(dev, obj, flags);
496 if (IS_ERR(dmabuf)) {
497 /* normally the created dma-buf takes ownership of the ref,
498 * but if that fails then drop the ref
499 */
500 return dmabuf;
501 }
502
503 /*
504 * Note that callers do not need to clean up the export cache
505 * since the check for obj->handle_count guarantees that someone
506 * will clean it up.
507 */
508 obj->dma_buf = dmabuf;
509 get_dma_buf(obj->dma_buf);
510 /* Grab a new ref since the callers is now used by the dma-buf */
511 drm_gem_object_reference(obj);
512
513 return dmabuf;
514 }
515
516 /**
517 * drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers
518 * @dev: dev to export the buffer from
519 * @file_priv: drm file-private structure
520 * @handle: buffer handle to export
521 * @flags: flags like DRM_CLOEXEC
522 * @prime_fd: pointer to storage for the fd id of the create dma-buf
523 *
524 * This is the PRIME export function which must be used mandatorily by GEM
525 * drivers to ensure correct lifetime management of the underlying GEM object.
526 * The actual exporting from GEM object to a dma-buf is done through the
527 * gem_prime_export driver callback.
528 */
529 int drm_gem_prime_handle_to_fd(struct drm_device *dev,
530 struct drm_file *file_priv, uint32_t handle,
531 uint32_t flags,
532 int *prime_fd)
533 {
534 struct drm_gem_object *obj;
535 int ret = 0;
536 struct dma_buf *dmabuf;
537
538 mutex_lock(&file_priv->prime.lock);
539 obj = drm_gem_object_lookup(dev, file_priv, handle);
540 if (!obj) {
541 ret = -ENOENT;
542 goto out_unlock;
543 }
544
545 dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle);
546 if (dmabuf) {
547 get_dma_buf(dmabuf);
548 goto out_have_handle;
549 }
550
551 mutex_lock(&dev->object_name_lock);
552 /* re-export the original imported object */
553 if (obj->import_attach) {
554 dmabuf = obj->import_attach->dmabuf;
555 get_dma_buf(dmabuf);
556 goto out_have_obj;
557 }
558
559 if (obj->dma_buf) {
560 get_dma_buf(obj->dma_buf);
561 dmabuf = obj->dma_buf;
562 goto out_have_obj;
563 }
564
565 dmabuf = export_and_register_object(dev, obj, flags);
566 if (IS_ERR(dmabuf)) {
567 /* normally the created dma-buf takes ownership of the ref,
568 * but if that fails then drop the ref
569 */
570 ret = PTR_ERR(dmabuf);
571 mutex_unlock(&dev->object_name_lock);
572 goto out;
573 }
574
575 out_have_obj:
576 /*
577 * If we've exported this buffer then cheat and add it to the import list
578 * so we get the correct handle back. We must do this under the
579 * protection of dev->object_name_lock to ensure that a racing gem close
580 * ioctl doesn't miss to remove this buffer handle from the cache.
581 */
582 ret = drm_prime_add_buf_handle(&file_priv->prime,
583 dmabuf, handle);
584 mutex_unlock(&dev->object_name_lock);
585 if (ret)
586 goto fail_put_dmabuf;
587
588 out_have_handle:
589 ret = dma_buf_fd(dmabuf, flags);
590 /*
591 * We must _not_ remove the buffer from the handle cache since the newly
592 * created dma buf is already linked in the global obj->dma_buf pointer,
593 * and that is invariant as long as a userspace gem handle exists.
594 * Closing the handle will clean out the cache anyway, so we don't leak.
595 */
596 if (ret < 0) {
597 goto fail_put_dmabuf;
598 } else {
599 *prime_fd = ret;
600 ret = 0;
601 }
602
603 goto out;
604
605 fail_put_dmabuf:
606 dma_buf_put(dmabuf);
607 out:
608 drm_gem_object_unreference_unlocked(obj);
609 out_unlock:
610 mutex_unlock(&file_priv->prime.lock);
611
612 return ret;
613 }
614 EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
615
616 /**
617 * drm_gem_prime_import - helper library implementation of the import callback
618 * @dev: drm_device to import into
619 * @dma_buf: dma-buf object to import
620 *
621 * This is the implementation of the gem_prime_import functions for GEM drivers
622 * using the PRIME helpers.
623 */
624 struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
625 struct dma_buf *dma_buf)
626 {
627 struct dma_buf_attachment *attach;
628 struct sg_table *sgt;
629 struct drm_gem_object *obj;
630 int ret;
631
632 if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) {
633 obj = dma_buf->priv;
634 if (obj->dev == dev) {
635 /*
636 * Importing dmabuf exported from out own gem increases
637 * refcount on gem itself instead of f_count of dmabuf.
638 */
639 drm_gem_object_reference(obj);
640 return obj;
641 }
642 }
643
644 if (!dev->driver->gem_prime_import_sg_table)
645 return ERR_PTR(-EINVAL);
646
647 attach = dma_buf_attach(dma_buf, dev->dev);
648 if (IS_ERR(attach))
649 return ERR_CAST(attach);
650
651 get_dma_buf(dma_buf);
652
653 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
654 if (IS_ERR(sgt)) {
655 ret = PTR_ERR(sgt);
656 goto fail_detach;
657 }
658
659 obj = dev->driver->gem_prime_import_sg_table(dev, attach, sgt);
660 if (IS_ERR(obj)) {
661 ret = PTR_ERR(obj);
662 goto fail_unmap;
663 }
664
665 obj->import_attach = attach;
666
667 return obj;
668
669 fail_unmap:
670 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
671 fail_detach:
672 dma_buf_detach(dma_buf, attach);
673 dma_buf_put(dma_buf);
674
675 return ERR_PTR(ret);
676 }
677 EXPORT_SYMBOL(drm_gem_prime_import);
678
679 /**
680 * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers
681 * @dev: dev to export the buffer from
682 * @file_priv: drm file-private structure
683 * @prime_fd: fd id of the dma-buf which should be imported
684 * @handle: pointer to storage for the handle of the imported buffer object
685 *
686 * This is the PRIME import function which must be used mandatorily by GEM
687 * drivers to ensure correct lifetime management of the underlying GEM object.
688 * The actual importing of GEM object from the dma-buf is done through the
689 * gem_import_export driver callback.
690 */
691 int drm_gem_prime_fd_to_handle(struct drm_device *dev,
692 struct drm_file *file_priv, int prime_fd,
693 uint32_t *handle)
694 {
695 struct dma_buf *dma_buf;
696 struct drm_gem_object *obj;
697 int ret;
698
699 dma_buf = dma_buf_get(prime_fd);
700 if (IS_ERR(dma_buf))
701 return PTR_ERR(dma_buf);
702
703 mutex_lock(&file_priv->prime.lock);
704
705 ret = drm_prime_lookup_buf_handle(&file_priv->prime,
706 dma_buf, handle);
707 if (ret == 0)
708 goto out_put;
709
710 /* never seen this one, need to import */
711 mutex_lock(&dev->object_name_lock);
712 obj = dev->driver->gem_prime_import(dev, dma_buf);
713 if (IS_ERR(obj)) {
714 ret = PTR_ERR(obj);
715 goto out_unlock;
716 }
717
718 if (obj->dma_buf) {
719 WARN_ON(obj->dma_buf != dma_buf);
720 } else {
721 obj->dma_buf = dma_buf;
722 get_dma_buf(dma_buf);
723 }
724
725 /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
726 ret = drm_gem_handle_create_tail(file_priv, obj, handle);
727 drm_gem_object_unreference_unlocked(obj);
728 if (ret)
729 goto out_put;
730
731 ret = drm_prime_add_buf_handle(&file_priv->prime,
732 dma_buf, *handle);
733 if (ret)
734 goto fail;
735
736 mutex_unlock(&file_priv->prime.lock);
737
738 dma_buf_put(dma_buf);
739
740 return 0;
741
742 fail:
743 /* hmm, if driver attached, we are relying on the free-object path
744 * to detach.. which seems ok..
745 */
746 drm_gem_handle_delete(file_priv, *handle);
747 out_unlock:
748 mutex_unlock(&dev->object_name_lock);
749 out_put:
750 dma_buf_put(dma_buf);
751 mutex_unlock(&file_priv->prime.lock);
752 return ret;
753 }
754 EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
755
756 int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
757 struct drm_file *file_priv)
758 {
759 struct drm_prime_handle *args = data;
760 uint32_t flags;
761
762 if (!drm_core_check_feature(dev, DRIVER_PRIME))
763 return -EINVAL;
764
765 if (!dev->driver->prime_handle_to_fd)
766 return -ENOSYS;
767
768 /* check flags are valid */
769 if (args->flags & ~DRM_CLOEXEC)
770 return -EINVAL;
771
772 /* we only want to pass DRM_CLOEXEC which is == O_CLOEXEC */
773 flags = args->flags & DRM_CLOEXEC;
774
775 return dev->driver->prime_handle_to_fd(dev, file_priv,
776 args->handle, flags, &args->fd);
777 }
778
779 int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
780 struct drm_file *file_priv)
781 {
782 struct drm_prime_handle *args = data;
783
784 if (!drm_core_check_feature(dev, DRIVER_PRIME))
785 return -EINVAL;
786
787 if (!dev->driver->prime_fd_to_handle)
788 return -ENOSYS;
789
790 return dev->driver->prime_fd_to_handle(dev, file_priv,
791 args->fd, &args->handle);
792 }
793
794 /**
795 * drm_prime_pages_to_sg - converts a page array into an sg list
796 * @pages: pointer to the array of page pointers to convert
797 * @nr_pages: length of the page vector
798 *
799 * This helper creates an sg table object from a set of pages
800 * the driver is responsible for mapping the pages into the
801 * importers address space for use with dma_buf itself.
802 */
803 struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages)
804 {
805 struct sg_table *sg = NULL;
806 int ret;
807
808 sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
809 if (!sg) {
810 ret = -ENOMEM;
811 goto out;
812 }
813
814 ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
815 nr_pages << PAGE_SHIFT, GFP_KERNEL);
816 if (ret)
817 goto out;
818
819 return sg;
820 out:
821 kfree(sg);
822 return ERR_PTR(ret);
823 }
824 EXPORT_SYMBOL(drm_prime_pages_to_sg);
825
826 #ifdef __NetBSD__
827
828 struct sg_table *
829 drm_prime_bus_dmamem_to_sg(bus_dma_tag_t dmat, const bus_dma_segment_t *segs,
830 int nsegs)
831 {
832 struct sg_table *sg;
833 int ret;
834
835 sg = kmalloc(sizeof(*sg), GFP_KERNEL);
836 if (sg == NULL) {
837 ret = -ENOMEM;
838 goto out;
839 }
840
841 ret = sg_alloc_table_from_bus_dmamem(sg, dmat, segs, nsegs,
842 GFP_KERNEL);
843 if (ret)
844 goto out;
845
846 return sg;
847 out:
848 kfree(sg);
849 return ERR_PTR(ret);
850 }
851
852 struct sg_table *
853 drm_prime_pglist_to_sg(struct pglist *pglist, unsigned npages)
854 {
855 struct sg_table *sg;
856 int ret;
857
858 sg = kmalloc(sizeof(*sg), GFP_KERNEL);
859 if (sg == NULL) {
860 ret = -ENOMEM;
861 goto out;
862 }
863
864 ret = sg_alloc_table_from_pglist(sg, pglist, 0, npages << PAGE_SHIFT,
865 npages, GFP_KERNEL);
866 if (ret)
867 goto out;
868
869 return sg;
870
871 out:
872 kfree(sg);
873 return ERR_PTR(ret);
874 }
875
876 bus_size_t
877 drm_prime_sg_size(struct sg_table *sg)
878 {
879
880 return sg->sgt_npgs << PAGE_SHIFT;
881 }
882
883 void
884 drm_prime_sg_free(struct sg_table *sg)
885 {
886
887 sg_free_table(sg);
888 kfree(sg);
889 }
890
891 int
892 drm_prime_sg_to_bus_dmamem(bus_dma_tag_t dmat, bus_dma_segment_t *segs,
893 int nsegs, int *rsegs, const struct sg_table *sgt)
894 {
895
896 /* XXX errno NetBSD->Linux */
897 return -bus_dmamem_import_pages(dmat, segs, nsegs, rsegs, sgt->sgt_pgs,
898 sgt->sgt_npgs);
899 }
900
901 int
902 drm_prime_bus_dmamap_load_sgt(bus_dma_tag_t dmat, bus_dmamap_t map,
903 struct sg_table *sgt)
904 {
905 bus_dma_segment_t *segs;
906 bus_size_t size = drm_prime_sg_size(sgt);
907 int nsegs = sgt->sgt_npgs;
908 int ret;
909
910 segs = kcalloc(sgt->sgt_npgs, sizeof(segs[0]), GFP_KERNEL);
911 if (segs == NULL) {
912 ret = -ENOMEM;
913 goto out0;
914 }
915
916 ret = drm_prime_sg_to_bus_dmamem(dmat, segs, nsegs, &nsegs, sgt);
917 if (ret)
918 goto out1;
919 KASSERT(nsegs <= sgt->sgt_npgs);
920
921 /* XXX errno NetBSD->Linux */
922 ret = -bus_dmamap_load_raw(dmat, map, segs, nsegs, size,
923 BUS_DMA_NOWAIT);
924 if (ret)
925 goto out1;
926
927 out1: kfree(segs);
928 out0: return ret;
929 }
930
931 bool
932 drm_prime_sg_importable(bus_dma_tag_t dmat, struct sg_table *sgt)
933 {
934 unsigned i;
935
936 for (i = 0; i < sgt->sgt_npgs; i++) {
937 if (bus_dmatag_bounces_paddr(dmat, sgt->sgt_pgs[i]))
938 return false;
939 }
940 return true;
941 }
942
943 #else /* !__NetBSD__ */
944
945 /**
946 * drm_prime_sg_to_page_addr_arrays - convert an sg table into a page array
947 * @sgt: scatter-gather table to convert
948 * @pages: array of page pointers to store the page array in
949 * @addrs: optional array to store the dma bus address of each page
950 * @max_pages: size of both the passed-in arrays
951 *
952 * Exports an sg table into an array of pages and addresses. This is currently
953 * required by the TTM driver in order to do correct fault handling.
954 */
955 int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
956 dma_addr_t *addrs, int max_pages)
957 {
958 unsigned count;
959 struct scatterlist *sg;
960 struct page *page;
961 u32 len;
962 int pg_index;
963 dma_addr_t addr;
964
965 pg_index = 0;
966 for_each_sg(sgt->sgl, sg, sgt->nents, count) {
967 len = sg->length;
968 page = sg_page(sg);
969 addr = sg_dma_address(sg);
970
971 while (len > 0) {
972 if (WARN_ON(pg_index >= max_pages))
973 return -1;
974 pages[pg_index] = page;
975 if (addrs)
976 addrs[pg_index] = addr;
977
978 page++;
979 addr += PAGE_SIZE;
980 len -= PAGE_SIZE;
981 pg_index++;
982 }
983 }
984 return 0;
985 }
986 EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
987
988 #endif /* __NetBSD__ */
989
990 /**
991 * drm_prime_gem_destroy - helper to clean up a PRIME-imported GEM object
992 * @obj: GEM object which was created from a dma-buf
993 * @sg: the sg-table which was pinned at import time
994 *
995 * This is the cleanup functions which GEM drivers need to call when they use
996 * @drm_gem_prime_import to import dma-bufs.
997 */
998 void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
999 {
1000 struct dma_buf_attachment *attach;
1001 struct dma_buf *dma_buf;
1002 attach = obj->import_attach;
1003 if (sg)
1004 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
1005 dma_buf = attach->dmabuf;
1006 dma_buf_detach(attach->dmabuf, attach);
1007 /* remove the reference */
1008 dma_buf_put(dma_buf);
1009 }
1010 EXPORT_SYMBOL(drm_prime_gem_destroy);
1011
1012 void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
1013 {
1014 INIT_LIST_HEAD(&prime_fpriv->head);
1015 #ifdef __NetBSD__
1016 linux_mutex_init(&prime_fpriv->lock);
1017 #else
1018 mutex_init(&prime_fpriv->lock);
1019 #endif
1020 }
1021
1022 void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
1023 {
1024 /* by now drm_gem_release should've made sure the list is empty */
1025 WARN_ON(!list_empty(&prime_fpriv->head));
1026 #ifdef __NetBSD__
1027 linux_mutex_destroy(&prime_fpriv->lock);
1028 #else
1029 mutex_destroy(&prime_fpriv->lock);
1030 #endif
1031 }
1032