drm_prime.c revision 1.7.6.1 1 /* $NetBSD: drm_prime.c,v 1.7.6.1 2020/02/29 20:20:13 ad Exp $ */
2
3 /*
4 * Copyright 2012 Red Hat
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23 * IN THE SOFTWARE.
24 *
25 * Authors:
26 * Dave Airlie <airlied (at) redhat.com>
27 * Rob Clark <rob.clark (at) linaro.org>
28 *
29 */
30
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: drm_prime.c,v 1.7.6.1 2020/02/29 20:20:13 ad Exp $");
33
34 #include <linux/export.h>
35 #include <linux/dma-buf.h>
36 #include <drm/drmP.h>
37 #include <drm/drm_gem.h>
38
39 #include "drm_internal.h"
40
41 #ifdef __NetBSD__
42
43 #include <drm/bus_dma_hacks.h>
44
45 #include <linux/nbsd-namespace.h>
46
47 /*
48 * We use struct sg_table just to pass around an array of pages from
49 * one device to another in drm prime. Since this is _not_ a complete
50 * implementation of Linux's sg table abstraction (e.g., it does not
51 * remember DMA addresses and RAM pages separately, and it doesn't
52 * support the nested chained iteration of Linux scatterlists), we
53 * isolate it to this file and make all callers go through a few extra
54 * subroutines (drm_prime_sg_size, drm_prime_sg_free, &c.) to use it.
55 * Don't use this outside drm prime!
56 */
57
58 struct sg_table {
59 paddr_t *sgt_pgs;
60 unsigned sgt_npgs;
61 };
62
63 static int
64 sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
65 unsigned npages, bus_size_t offset, bus_size_t size, gfp_t gfp)
66 {
67 unsigned i;
68
69 KASSERT(offset == 0);
70 KASSERT(size == npages << PAGE_SHIFT);
71
72 sgt->sgt_pgs = kcalloc(npages, sizeof(sgt->sgt_pgs[0]), gfp);
73 if (sgt->sgt_pgs == NULL)
74 return -ENOMEM;
75 sgt->sgt_npgs = npages;
76
77 for (i = 0; i < npages; i++)
78 sgt->sgt_pgs[i] = VM_PAGE_TO_PHYS(&pages[i]->p_vmp);
79
80 return 0;
81 }
82
83 static int
84 sg_alloc_table_from_pglist(struct sg_table *sgt, const struct pglist *pglist,
85 unsigned npages, bus_size_t offset, bus_size_t size, gfp_t gfp)
86 {
87 struct vm_page *pg;
88 unsigned i;
89
90 KASSERT(offset == 0);
91 KASSERT(size == npages << PAGE_SHIFT);
92
93 sgt->sgt_pgs = kcalloc(npages, sizeof(sgt->sgt_pgs[0]), gfp);
94 if (sgt->sgt_pgs == NULL)
95 return -ENOMEM;
96 sgt->sgt_npgs = npages;
97
98 i = 0;
99 TAILQ_FOREACH(pg, pglist, pageq.queue) {
100 KASSERT(i < npages);
101 sgt->sgt_pgs[i] = VM_PAGE_TO_PHYS(pg);
102 }
103 KASSERT(i == npages);
104
105 return 0;
106 }
107
108 static int
109 sg_alloc_table_from_bus_dmamem(struct sg_table *sgt, bus_dma_tag_t dmat,
110 const bus_dma_segment_t *segs, int nsegs, gfp_t gfp)
111 {
112 int ret;
113
114 KASSERT(nsegs > 0);
115 sgt->sgt_pgs = kcalloc(nsegs, sizeof(sgt->sgt_pgs[0]), gfp);
116 if (sgt->sgt_pgs == NULL)
117 return -ENOMEM;
118 sgt->sgt_npgs = nsegs;
119
120 /* XXX errno NetBSD->Linux */
121 ret = -bus_dmamem_export_pages(dmat, segs, nsegs, sgt->sgt_pgs,
122 sgt->sgt_npgs);
123 if (ret)
124 return ret;
125
126 return 0;
127 }
128
129 static void
130 sg_free_table(struct sg_table *sgt)
131 {
132
133 kfree(sgt->sgt_pgs);
134 sgt->sgt_pgs = NULL;
135 sgt->sgt_npgs = 0;
136 }
137
138 #endif /* __NetBSD__ */
139
140 /*
141 * DMA-BUF/GEM Object references and lifetime overview:
142 *
143 * On the export the dma_buf holds a reference to the exporting GEM
144 * object. It takes this reference in handle_to_fd_ioctl, when it
145 * first calls .prime_export and stores the exporting GEM object in
146 * the dma_buf priv. This reference is released when the dma_buf
147 * object goes away in the driver .release function.
148 *
149 * On the import the importing GEM object holds a reference to the
150 * dma_buf (which in turn holds a ref to the exporting GEM object).
151 * It takes that reference in the fd_to_handle ioctl.
152 * It calls dma_buf_get, creates an attachment to it and stores the
153 * attachment in the GEM object. When this attachment is destroyed
154 * when the imported object is destroyed, we remove the attachment
155 * and drop the reference to the dma_buf.
156 *
157 * Thus the chain of references always flows in one direction
158 * (avoiding loops): importing_gem -> dmabuf -> exporting_gem
159 *
160 * Self-importing: if userspace is using PRIME as a replacement for flink
161 * then it will get a fd->handle request for a GEM object that it created.
162 * Drivers should detect this situation and return back the gem object
163 * from the dma-buf private. Prime will do this automatically for drivers that
164 * use the drm_gem_prime_{import,export} helpers.
165 */
166
167 struct drm_prime_member {
168 struct list_head entry;
169 struct dma_buf *dma_buf;
170 uint32_t handle;
171 };
172
173 struct drm_prime_attachment {
174 struct sg_table *sgt;
175 enum dma_data_direction dir;
176 };
177
178 static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
179 struct dma_buf *dma_buf, uint32_t handle)
180 {
181 struct drm_prime_member *member;
182
183 member = kmalloc(sizeof(*member), GFP_KERNEL);
184 if (!member)
185 return -ENOMEM;
186
187 get_dma_buf(dma_buf);
188 member->dma_buf = dma_buf;
189 member->handle = handle;
190 list_add(&member->entry, &prime_fpriv->head);
191 return 0;
192 }
193
194 static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv,
195 uint32_t handle)
196 {
197 struct drm_prime_member *member;
198
199 list_for_each_entry(member, &prime_fpriv->head, entry) {
200 if (member->handle == handle)
201 return member->dma_buf;
202 }
203
204 return NULL;
205 }
206
207 static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv,
208 struct dma_buf *dma_buf,
209 uint32_t *handle)
210 {
211 struct drm_prime_member *member;
212
213 list_for_each_entry(member, &prime_fpriv->head, entry) {
214 if (member->dma_buf == dma_buf) {
215 *handle = member->handle;
216 return 0;
217 }
218 }
219 return -ENOENT;
220 }
221
222 static int drm_gem_map_attach(struct dma_buf *dma_buf,
223 struct device *target_dev,
224 struct dma_buf_attachment *attach)
225 {
226 struct drm_prime_attachment *prime_attach;
227 struct drm_gem_object *obj = dma_buf->priv;
228 struct drm_device *dev = obj->dev;
229
230 prime_attach = kzalloc(sizeof(*prime_attach), GFP_KERNEL);
231 if (!prime_attach)
232 return -ENOMEM;
233
234 prime_attach->dir = DMA_NONE;
235 attach->priv = prime_attach;
236
237 if (!dev->driver->gem_prime_pin)
238 return 0;
239
240 return dev->driver->gem_prime_pin(obj);
241 }
242
243 static void drm_gem_map_detach(struct dma_buf *dma_buf,
244 struct dma_buf_attachment *attach)
245 {
246 struct drm_prime_attachment *prime_attach = attach->priv;
247 struct drm_gem_object *obj = dma_buf->priv;
248 struct drm_device *dev = obj->dev;
249 struct sg_table *sgt;
250
251 if (dev->driver->gem_prime_unpin)
252 dev->driver->gem_prime_unpin(obj);
253
254 if (!prime_attach)
255 return;
256
257 sgt = prime_attach->sgt;
258 if (sgt) {
259 #ifndef __NetBSD__ /* We map/unmap elsewhere. */
260 if (prime_attach->dir != DMA_NONE)
261 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
262 prime_attach->dir);
263 #endif
264 sg_free_table(sgt);
265 }
266
267 kfree(sgt);
268 kfree(prime_attach);
269 attach->priv = NULL;
270 }
271
272 void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
273 struct dma_buf *dma_buf)
274 {
275 struct drm_prime_member *member, *safe;
276
277 list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) {
278 if (member->dma_buf == dma_buf) {
279 dma_buf_put(dma_buf);
280 list_del(&member->entry);
281 kfree(member);
282 }
283 }
284 }
285
286 static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
287 enum dma_data_direction dir)
288 {
289 struct drm_prime_attachment *prime_attach = attach->priv;
290 struct drm_gem_object *obj = attach->dmabuf->priv;
291 struct sg_table *sgt;
292
293 if (WARN_ON(dir == DMA_NONE || !prime_attach))
294 return ERR_PTR(-EINVAL);
295
296 /* return the cached mapping when possible */
297 if (prime_attach->dir == dir)
298 return prime_attach->sgt;
299
300 /*
301 * two mappings with different directions for the same attachment are
302 * not allowed
303 */
304 if (WARN_ON(prime_attach->dir != DMA_NONE))
305 return ERR_PTR(-EBUSY);
306
307 sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
308 if (!IS_ERR(sgt)) {
309 #ifdef __NetBSD__ /* We map/unmap elsewhere. */
310 prime_attach->sgt = sgt;
311 prime_attach->dir = dir;
312 #else
313 if (!dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir)) {
314 sg_free_table(sgt);
315 kfree(sgt);
316 sgt = ERR_PTR(-ENOMEM);
317 } else {
318 prime_attach->sgt = sgt;
319 prime_attach->dir = dir;
320 }
321 #endif
322 }
323
324 return sgt;
325 }
326
327 static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
328 struct sg_table *sgt,
329 enum dma_data_direction dir)
330 {
331 /* nothing to be done here */
332 }
333
334 /**
335 * drm_gem_dmabuf_release - dma_buf release implementation for GEM
336 * @dma_buf: buffer to be released
337 *
338 * Generic release function for dma_bufs exported as PRIME buffers. GEM drivers
339 * must use this in their dma_buf ops structure as the release callback.
340 */
341 void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
342 {
343 struct drm_gem_object *obj = dma_buf->priv;
344
345 /* drop the reference on the export fd holds */
346 drm_gem_object_unreference_unlocked(obj);
347 }
348 EXPORT_SYMBOL(drm_gem_dmabuf_release);
349
350 static void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
351 {
352 struct drm_gem_object *obj = dma_buf->priv;
353 struct drm_device *dev = obj->dev;
354
355 return dev->driver->gem_prime_vmap(obj);
356 }
357
358 static void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
359 {
360 struct drm_gem_object *obj = dma_buf->priv;
361 struct drm_device *dev = obj->dev;
362
363 dev->driver->gem_prime_vunmap(obj, vaddr);
364 }
365
366 static void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
367 unsigned long page_num)
368 {
369 return NULL;
370 }
371
372 static void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
373 unsigned long page_num, void *addr)
374 {
375
376 }
377 static void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf,
378 unsigned long page_num)
379 {
380 return NULL;
381 }
382
383 static void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
384 unsigned long page_num, void *addr)
385 {
386
387 }
388
389 #ifdef __NetBSD__
390 static int
391 drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, off_t *offp, size_t size,
392 int prot, int *flagsp, int *advicep, struct uvm_object **uobjp,
393 int *maxprotp)
394 #else
395 static int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf,
396 struct vm_area_struct *vma)
397 #endif
398 {
399 struct drm_gem_object *obj = dma_buf->priv;
400 struct drm_device *dev = obj->dev;
401
402 if (!dev->driver->gem_prime_mmap)
403 return -ENOSYS;
404
405 #ifdef __NetBSD__
406 return dev->driver->gem_prime_mmap(obj, offp, size, prot, flagsp,
407 advicep, uobjp, maxprotp);
408 #else
409 return dev->driver->gem_prime_mmap(obj, vma);
410 #endif
411 }
412
413 static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
414 .attach = drm_gem_map_attach,
415 .detach = drm_gem_map_detach,
416 .map_dma_buf = drm_gem_map_dma_buf,
417 .unmap_dma_buf = drm_gem_unmap_dma_buf,
418 .release = drm_gem_dmabuf_release,
419 .kmap = drm_gem_dmabuf_kmap,
420 .kmap_atomic = drm_gem_dmabuf_kmap_atomic,
421 .kunmap = drm_gem_dmabuf_kunmap,
422 .kunmap_atomic = drm_gem_dmabuf_kunmap_atomic,
423 .mmap = drm_gem_dmabuf_mmap,
424 .vmap = drm_gem_dmabuf_vmap,
425 .vunmap = drm_gem_dmabuf_vunmap,
426 };
427
428 /**
429 * DOC: PRIME Helpers
430 *
431 * Drivers can implement @gem_prime_export and @gem_prime_import in terms of
432 * simpler APIs by using the helper functions @drm_gem_prime_export and
433 * @drm_gem_prime_import. These functions implement dma-buf support in terms of
434 * six lower-level driver callbacks:
435 *
436 * Export callbacks:
437 *
438 * - @gem_prime_pin (optional): prepare a GEM object for exporting
439 *
440 * - @gem_prime_get_sg_table: provide a scatter/gather table of pinned pages
441 *
442 * - @gem_prime_vmap: vmap a buffer exported by your driver
443 *
444 * - @gem_prime_vunmap: vunmap a buffer exported by your driver
445 *
446 * - @gem_prime_mmap (optional): mmap a buffer exported by your driver
447 *
448 * Import callback:
449 *
450 * - @gem_prime_import_sg_table (import): produce a GEM object from another
451 * driver's scatter/gather table
452 */
453
454 /**
455 * drm_gem_prime_export - helper library implementation of the export callback
456 * @dev: drm_device to export from
457 * @obj: GEM object to export
458 * @flags: flags like DRM_CLOEXEC
459 *
460 * This is the implementation of the gem_prime_export functions for GEM drivers
461 * using the PRIME helpers.
462 */
463 struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
464 struct drm_gem_object *obj,
465 int flags)
466 {
467 struct dma_buf_export_info exp_info = {
468 #ifndef __NetBSD__
469 .exp_name = KBUILD_MODNAME, /* white lie for debug */
470 .owner = dev->driver->fops->owner,
471 #endif
472 .ops = &drm_gem_prime_dmabuf_ops,
473 .size = obj->size,
474 .flags = flags,
475 .priv = obj,
476 };
477
478 if (dev->driver->gem_prime_res_obj)
479 exp_info.resv = dev->driver->gem_prime_res_obj(obj);
480
481 return dma_buf_export(&exp_info);
482 }
483 EXPORT_SYMBOL(drm_gem_prime_export);
484
485 static struct dma_buf *export_and_register_object(struct drm_device *dev,
486 struct drm_gem_object *obj,
487 uint32_t flags)
488 {
489 struct dma_buf *dmabuf;
490
491 /* prevent races with concurrent gem_close. */
492 if (obj->handle_count == 0) {
493 dmabuf = ERR_PTR(-ENOENT);
494 return dmabuf;
495 }
496
497 dmabuf = dev->driver->gem_prime_export(dev, obj, flags);
498 if (IS_ERR(dmabuf)) {
499 /* normally the created dma-buf takes ownership of the ref,
500 * but if that fails then drop the ref
501 */
502 return dmabuf;
503 }
504
505 /*
506 * Note that callers do not need to clean up the export cache
507 * since the check for obj->handle_count guarantees that someone
508 * will clean it up.
509 */
510 obj->dma_buf = dmabuf;
511 get_dma_buf(obj->dma_buf);
512 /* Grab a new ref since the callers is now used by the dma-buf */
513 drm_gem_object_reference(obj);
514
515 return dmabuf;
516 }
517
518 /**
519 * drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers
520 * @dev: dev to export the buffer from
521 * @file_priv: drm file-private structure
522 * @handle: buffer handle to export
523 * @flags: flags like DRM_CLOEXEC
524 * @prime_fd: pointer to storage for the fd id of the create dma-buf
525 *
526 * This is the PRIME export function which must be used mandatorily by GEM
527 * drivers to ensure correct lifetime management of the underlying GEM object.
528 * The actual exporting from GEM object to a dma-buf is done through the
529 * gem_prime_export driver callback.
530 */
531 int drm_gem_prime_handle_to_fd(struct drm_device *dev,
532 struct drm_file *file_priv, uint32_t handle,
533 uint32_t flags,
534 int *prime_fd)
535 {
536 struct drm_gem_object *obj;
537 int ret = 0;
538 struct dma_buf *dmabuf;
539
540 mutex_lock(&file_priv->prime.lock);
541 obj = drm_gem_object_lookup(dev, file_priv, handle);
542 if (!obj) {
543 ret = -ENOENT;
544 goto out_unlock;
545 }
546
547 dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle);
548 if (dmabuf) {
549 get_dma_buf(dmabuf);
550 goto out_have_handle;
551 }
552
553 mutex_lock(&dev->object_name_lock);
554 /* re-export the original imported object */
555 if (obj->import_attach) {
556 dmabuf = obj->import_attach->dmabuf;
557 get_dma_buf(dmabuf);
558 goto out_have_obj;
559 }
560
561 if (obj->dma_buf) {
562 get_dma_buf(obj->dma_buf);
563 dmabuf = obj->dma_buf;
564 goto out_have_obj;
565 }
566
567 dmabuf = export_and_register_object(dev, obj, flags);
568 if (IS_ERR(dmabuf)) {
569 /* normally the created dma-buf takes ownership of the ref,
570 * but if that fails then drop the ref
571 */
572 ret = PTR_ERR(dmabuf);
573 mutex_unlock(&dev->object_name_lock);
574 goto out;
575 }
576
577 out_have_obj:
578 /*
579 * If we've exported this buffer then cheat and add it to the import list
580 * so we get the correct handle back. We must do this under the
581 * protection of dev->object_name_lock to ensure that a racing gem close
582 * ioctl doesn't miss to remove this buffer handle from the cache.
583 */
584 ret = drm_prime_add_buf_handle(&file_priv->prime,
585 dmabuf, handle);
586 mutex_unlock(&dev->object_name_lock);
587 if (ret)
588 goto fail_put_dmabuf;
589
590 out_have_handle:
591 ret = dma_buf_fd(dmabuf, flags);
592 /*
593 * We must _not_ remove the buffer from the handle cache since the newly
594 * created dma buf is already linked in the global obj->dma_buf pointer,
595 * and that is invariant as long as a userspace gem handle exists.
596 * Closing the handle will clean out the cache anyway, so we don't leak.
597 */
598 if (ret < 0) {
599 goto fail_put_dmabuf;
600 } else {
601 *prime_fd = ret;
602 ret = 0;
603 }
604
605 goto out;
606
607 fail_put_dmabuf:
608 dma_buf_put(dmabuf);
609 out:
610 drm_gem_object_unreference_unlocked(obj);
611 out_unlock:
612 mutex_unlock(&file_priv->prime.lock);
613
614 return ret;
615 }
616 EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
617
618 /**
619 * drm_gem_prime_import - helper library implementation of the import callback
620 * @dev: drm_device to import into
621 * @dma_buf: dma-buf object to import
622 *
623 * This is the implementation of the gem_prime_import functions for GEM drivers
624 * using the PRIME helpers.
625 */
626 struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
627 struct dma_buf *dma_buf)
628 {
629 struct dma_buf_attachment *attach;
630 struct sg_table *sgt;
631 struct drm_gem_object *obj;
632 int ret;
633
634 if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) {
635 obj = dma_buf->priv;
636 if (obj->dev == dev) {
637 /*
638 * Importing dmabuf exported from out own gem increases
639 * refcount on gem itself instead of f_count of dmabuf.
640 */
641 drm_gem_object_reference(obj);
642 return obj;
643 }
644 }
645
646 if (!dev->driver->gem_prime_import_sg_table)
647 return ERR_PTR(-EINVAL);
648
649 attach = dma_buf_attach(dma_buf, dev->dev);
650 if (IS_ERR(attach))
651 return ERR_CAST(attach);
652
653 get_dma_buf(dma_buf);
654
655 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
656 if (IS_ERR(sgt)) {
657 ret = PTR_ERR(sgt);
658 goto fail_detach;
659 }
660
661 obj = dev->driver->gem_prime_import_sg_table(dev, attach, sgt);
662 if (IS_ERR(obj)) {
663 ret = PTR_ERR(obj);
664 goto fail_unmap;
665 }
666
667 obj->import_attach = attach;
668
669 return obj;
670
671 fail_unmap:
672 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
673 fail_detach:
674 dma_buf_detach(dma_buf, attach);
675 dma_buf_put(dma_buf);
676
677 return ERR_PTR(ret);
678 }
679 EXPORT_SYMBOL(drm_gem_prime_import);
680
681 /**
682 * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers
683 * @dev: dev to export the buffer from
684 * @file_priv: drm file-private structure
685 * @prime_fd: fd id of the dma-buf which should be imported
686 * @handle: pointer to storage for the handle of the imported buffer object
687 *
688 * This is the PRIME import function which must be used mandatorily by GEM
689 * drivers to ensure correct lifetime management of the underlying GEM object.
690 * The actual importing of GEM object from the dma-buf is done through the
691 * gem_import_export driver callback.
692 */
693 int drm_gem_prime_fd_to_handle(struct drm_device *dev,
694 struct drm_file *file_priv, int prime_fd,
695 uint32_t *handle)
696 {
697 struct dma_buf *dma_buf;
698 struct drm_gem_object *obj;
699 int ret;
700
701 dma_buf = dma_buf_get(prime_fd);
702 if (IS_ERR(dma_buf))
703 return PTR_ERR(dma_buf);
704
705 mutex_lock(&file_priv->prime.lock);
706
707 ret = drm_prime_lookup_buf_handle(&file_priv->prime,
708 dma_buf, handle);
709 if (ret == 0)
710 goto out_put;
711
712 /* never seen this one, need to import */
713 mutex_lock(&dev->object_name_lock);
714 obj = dev->driver->gem_prime_import(dev, dma_buf);
715 if (IS_ERR(obj)) {
716 ret = PTR_ERR(obj);
717 goto out_unlock;
718 }
719
720 if (obj->dma_buf) {
721 WARN_ON(obj->dma_buf != dma_buf);
722 } else {
723 obj->dma_buf = dma_buf;
724 get_dma_buf(dma_buf);
725 }
726
727 /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
728 ret = drm_gem_handle_create_tail(file_priv, obj, handle);
729 drm_gem_object_unreference_unlocked(obj);
730 if (ret)
731 goto out_put;
732
733 ret = drm_prime_add_buf_handle(&file_priv->prime,
734 dma_buf, *handle);
735 if (ret)
736 goto fail;
737
738 mutex_unlock(&file_priv->prime.lock);
739
740 dma_buf_put(dma_buf);
741
742 return 0;
743
744 fail:
745 /* hmm, if driver attached, we are relying on the free-object path
746 * to detach.. which seems ok..
747 */
748 drm_gem_handle_delete(file_priv, *handle);
749 out_unlock:
750 mutex_unlock(&dev->object_name_lock);
751 out_put:
752 dma_buf_put(dma_buf);
753 mutex_unlock(&file_priv->prime.lock);
754 return ret;
755 }
756 EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
757
758 int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
759 struct drm_file *file_priv)
760 {
761 struct drm_prime_handle *args = data;
762 uint32_t flags;
763
764 if (!drm_core_check_feature(dev, DRIVER_PRIME))
765 return -EINVAL;
766
767 if (!dev->driver->prime_handle_to_fd)
768 return -ENOSYS;
769
770 /* check flags are valid */
771 if (args->flags & ~DRM_CLOEXEC)
772 return -EINVAL;
773
774 /* we only want to pass DRM_CLOEXEC which is == O_CLOEXEC */
775 flags = args->flags & DRM_CLOEXEC;
776
777 return dev->driver->prime_handle_to_fd(dev, file_priv,
778 args->handle, flags, &args->fd);
779 }
780
781 int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
782 struct drm_file *file_priv)
783 {
784 struct drm_prime_handle *args = data;
785
786 if (!drm_core_check_feature(dev, DRIVER_PRIME))
787 return -EINVAL;
788
789 if (!dev->driver->prime_fd_to_handle)
790 return -ENOSYS;
791
792 return dev->driver->prime_fd_to_handle(dev, file_priv,
793 args->fd, &args->handle);
794 }
795
796 /**
797 * drm_prime_pages_to_sg - converts a page array into an sg list
798 * @pages: pointer to the array of page pointers to convert
799 * @nr_pages: length of the page vector
800 *
801 * This helper creates an sg table object from a set of pages
802 * the driver is responsible for mapping the pages into the
803 * importers address space for use with dma_buf itself.
804 */
805 struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages)
806 {
807 struct sg_table *sg = NULL;
808 int ret;
809
810 sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
811 if (!sg) {
812 ret = -ENOMEM;
813 goto out;
814 }
815
816 ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
817 nr_pages << PAGE_SHIFT, GFP_KERNEL);
818 if (ret)
819 goto out;
820
821 return sg;
822 out:
823 kfree(sg);
824 return ERR_PTR(ret);
825 }
826 EXPORT_SYMBOL(drm_prime_pages_to_sg);
827
828 #ifdef __NetBSD__
829
830 struct sg_table *
831 drm_prime_bus_dmamem_to_sg(bus_dma_tag_t dmat, const bus_dma_segment_t *segs,
832 int nsegs)
833 {
834 struct sg_table *sg;
835 int ret;
836
837 sg = kmalloc(sizeof(*sg), GFP_KERNEL);
838 if (sg == NULL) {
839 ret = -ENOMEM;
840 goto out;
841 }
842
843 ret = sg_alloc_table_from_bus_dmamem(sg, dmat, segs, nsegs,
844 GFP_KERNEL);
845 if (ret)
846 goto out;
847
848 return sg;
849 out:
850 kfree(sg);
851 return ERR_PTR(ret);
852 }
853
854 struct sg_table *
855 drm_prime_pglist_to_sg(struct pglist *pglist, unsigned npages)
856 {
857 struct sg_table *sg;
858 int ret;
859
860 sg = kmalloc(sizeof(*sg), GFP_KERNEL);
861 if (sg == NULL) {
862 ret = -ENOMEM;
863 goto out;
864 }
865
866 ret = sg_alloc_table_from_pglist(sg, pglist, 0, npages << PAGE_SHIFT,
867 npages, GFP_KERNEL);
868 if (ret)
869 goto out;
870
871 return sg;
872
873 out:
874 kfree(sg);
875 return ERR_PTR(ret);
876 }
877
878 bus_size_t
879 drm_prime_sg_size(struct sg_table *sg)
880 {
881
882 return sg->sgt_npgs << PAGE_SHIFT;
883 }
884
885 void
886 drm_prime_sg_free(struct sg_table *sg)
887 {
888
889 sg_free_table(sg);
890 kfree(sg);
891 }
892
893 int
894 drm_prime_sg_to_bus_dmamem(bus_dma_tag_t dmat, bus_dma_segment_t *segs,
895 int nsegs, int *rsegs, const struct sg_table *sgt)
896 {
897
898 /* XXX errno NetBSD->Linux */
899 return -bus_dmamem_import_pages(dmat, segs, nsegs, rsegs, sgt->sgt_pgs,
900 sgt->sgt_npgs);
901 }
902
903 int
904 drm_prime_bus_dmamap_load_sgt(bus_dma_tag_t dmat, bus_dmamap_t map,
905 struct sg_table *sgt)
906 {
907 bus_dma_segment_t *segs;
908 bus_size_t size = drm_prime_sg_size(sgt);
909 int nsegs = sgt->sgt_npgs;
910 int ret;
911
912 segs = kcalloc(sgt->sgt_npgs, sizeof(segs[0]), GFP_KERNEL);
913 if (segs == NULL) {
914 ret = -ENOMEM;
915 goto out0;
916 }
917
918 ret = drm_prime_sg_to_bus_dmamem(dmat, segs, nsegs, &nsegs, sgt);
919 if (ret)
920 goto out1;
921 KASSERT(nsegs <= sgt->sgt_npgs);
922
923 /* XXX errno NetBSD->Linux */
924 ret = -bus_dmamap_load_raw(dmat, map, segs, nsegs, size,
925 BUS_DMA_NOWAIT);
926 if (ret)
927 goto out1;
928
929 out1: kfree(segs);
930 out0: return ret;
931 }
932
933 bool
934 drm_prime_sg_importable(bus_dma_tag_t dmat, struct sg_table *sgt)
935 {
936 unsigned i;
937
938 for (i = 0; i < sgt->sgt_npgs; i++) {
939 if (bus_dmatag_bounces_paddr(dmat, sgt->sgt_pgs[i]))
940 return false;
941 }
942 return true;
943 }
944
945 #else /* !__NetBSD__ */
946
947 /**
948 * drm_prime_sg_to_page_addr_arrays - convert an sg table into a page array
949 * @sgt: scatter-gather table to convert
950 * @pages: array of page pointers to store the page array in
951 * @addrs: optional array to store the dma bus address of each page
952 * @max_pages: size of both the passed-in arrays
953 *
954 * Exports an sg table into an array of pages and addresses. This is currently
955 * required by the TTM driver in order to do correct fault handling.
956 */
957 int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
958 dma_addr_t *addrs, int max_pages)
959 {
960 unsigned count;
961 struct scatterlist *sg;
962 struct page *page;
963 u32 len;
964 int pg_index;
965 dma_addr_t addr;
966
967 pg_index = 0;
968 for_each_sg(sgt->sgl, sg, sgt->nents, count) {
969 len = sg->length;
970 page = sg_page(sg);
971 addr = sg_dma_address(sg);
972
973 while (len > 0) {
974 if (WARN_ON(pg_index >= max_pages))
975 return -1;
976 pages[pg_index] = page;
977 if (addrs)
978 addrs[pg_index] = addr;
979
980 page++;
981 addr += PAGE_SIZE;
982 len -= PAGE_SIZE;
983 pg_index++;
984 }
985 }
986 return 0;
987 }
988 EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
989
990 #endif /* __NetBSD__ */
991
992 /**
993 * drm_prime_gem_destroy - helper to clean up a PRIME-imported GEM object
994 * @obj: GEM object which was created from a dma-buf
995 * @sg: the sg-table which was pinned at import time
996 *
997 * This is the cleanup functions which GEM drivers need to call when they use
998 * @drm_gem_prime_import to import dma-bufs.
999 */
1000 void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
1001 {
1002 struct dma_buf_attachment *attach;
1003 struct dma_buf *dma_buf;
1004 attach = obj->import_attach;
1005 if (sg)
1006 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
1007 dma_buf = attach->dmabuf;
1008 dma_buf_detach(attach->dmabuf, attach);
1009 /* remove the reference */
1010 dma_buf_put(dma_buf);
1011 }
1012 EXPORT_SYMBOL(drm_prime_gem_destroy);
1013
1014 void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
1015 {
1016 INIT_LIST_HEAD(&prime_fpriv->head);
1017 mutex_init(&prime_fpriv->lock);
1018 }
1019
1020 void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
1021 {
1022 /* by now drm_gem_release should've made sure the list is empty */
1023 WARN_ON(!list_empty(&prime_fpriv->head));
1024 mutex_destroy(&prime_fpriv->lock);
1025 }
1026