Home | History | Annotate | Line # | Download | only in drm
drm_prime.c revision 1.12
      1 /*	$NetBSD: drm_prime.c,v 1.12 2021/12/19 01:53:39 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright  2012 Red Hat
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the "Software"),
      8  * to deal in the Software without restriction, including without limitation
      9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     10  * and/or sell copies of the Software, and to permit persons to whom the
     11  * Software is furnished to do so, subject to the following conditions:
     12  *
     13  * The above copyright notice and this permission notice (including the next
     14  * paragraph) shall be included in all copies or substantial portions of the
     15  * Software.
     16  *
     17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     22  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
     23  * IN THE SOFTWARE.
     24  *
     25  * Authors:
     26  *      Dave Airlie <airlied (at) redhat.com>
     27  *      Rob Clark <rob.clark (at) linaro.org>
     28  *
     29  */
     30 
     31 #include <sys/cdefs.h>
     32 __KERNEL_RCSID(0, "$NetBSD: drm_prime.c,v 1.12 2021/12/19 01:53:39 riastradh Exp $");
     33 
     34 #include <linux/export.h>
     35 #include <linux/dma-buf.h>
     36 #include <linux/rbtree.h>
     37 
     38 #include <drm/drm.h>
     39 #include <drm/drm_drv.h>
     40 #include <drm/drm_file.h>
     41 #include <drm/drm_framebuffer.h>
     42 #include <drm/drm_gem.h>
     43 #include <drm/drm_prime.h>
     44 
     45 #include "drm_internal.h"
     46 
     47 #ifdef __NetBSD__
     48 
     49 #include <drm/bus_dma_hacks.h>
     50 
     51 #include <linux/nbsd-namespace.h>
     52 
     53 /*
     54  * We use struct sg_table just to pass around an array of pages from
     55  * one device to another in drm prime.  Since this is _not_ a complete
     56  * implementation of Linux's sg table abstraction (e.g., it does not
     57  * remember DMA addresses and RAM pages separately, and it doesn't
     58  * support the nested chained iteration of Linux scatterlists), we
     59  * isolate it to this file and make all callers go through a few extra
     60  * subroutines (drm_prime_sg_size, drm_prime_sg_free, &c.) to use it.
     61  * Don't use this outside drm prime!
     62  */
     63 
     64 struct sg_table {
     65 	paddr_t		*sgt_pgs;
     66 	unsigned	sgt_npgs;
     67 };
     68 
     69 static int
     70 sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
     71     unsigned npages, bus_size_t offset, bus_size_t size, gfp_t gfp)
     72 {
     73 	unsigned i;
     74 
     75 	KASSERT(offset == 0);
     76 	KASSERT(size == npages << PAGE_SHIFT);
     77 
     78 	sgt->sgt_pgs = kcalloc(npages, sizeof(sgt->sgt_pgs[0]), gfp);
     79 	if (sgt->sgt_pgs == NULL)
     80 		return -ENOMEM;
     81 	sgt->sgt_npgs = npages;
     82 
     83 	for (i = 0; i < npages; i++)
     84 		sgt->sgt_pgs[i] = VM_PAGE_TO_PHYS(&pages[i]->p_vmp);
     85 
     86 	return 0;
     87 }
     88 
     89 static int
     90 sg_alloc_table_from_pglist(struct sg_table *sgt, const struct pglist *pglist,
     91     unsigned npages, bus_size_t offset, bus_size_t size, gfp_t gfp)
     92 {
     93 	struct vm_page *pg;
     94 	unsigned i;
     95 
     96 	KASSERT(offset == 0);
     97 	KASSERT(size == npages << PAGE_SHIFT);
     98 
     99 	sgt->sgt_pgs = kcalloc(npages, sizeof(sgt->sgt_pgs[0]), gfp);
    100 	if (sgt->sgt_pgs == NULL)
    101 		return -ENOMEM;
    102 	sgt->sgt_npgs = npages;
    103 
    104 	i = 0;
    105 	TAILQ_FOREACH(pg, pglist, pageq.queue) {
    106 		KASSERT(i < npages);
    107 		sgt->sgt_pgs[i] = VM_PAGE_TO_PHYS(pg);
    108 	}
    109 	KASSERT(i == npages);
    110 
    111 	return 0;
    112 }
    113 
    114 static int
    115 sg_alloc_table_from_bus_dmamem(struct sg_table *sgt, bus_dma_tag_t dmat,
    116     const bus_dma_segment_t *segs, int nsegs, gfp_t gfp)
    117 {
    118 	int ret;
    119 
    120 	KASSERT(nsegs > 0);
    121 	sgt->sgt_pgs = kcalloc(nsegs, sizeof(sgt->sgt_pgs[0]), gfp);
    122 	if (sgt->sgt_pgs == NULL)
    123 		return -ENOMEM;
    124 	sgt->sgt_npgs = nsegs;
    125 
    126 	/* XXX errno NetBSD->Linux */
    127 	ret = -bus_dmamem_export_pages(dmat, segs, nsegs, sgt->sgt_pgs,
    128 	    sgt->sgt_npgs);
    129 	if (ret)
    130 		return ret;
    131 
    132 	return 0;
    133 }
    134 
    135 static void
    136 sg_free_table(struct sg_table *sgt)
    137 {
    138 
    139 	kfree(sgt->sgt_pgs);
    140 	sgt->sgt_pgs = NULL;
    141 	sgt->sgt_npgs = 0;
    142 }
    143 
    144 #endif	/* __NetBSD__ */
    145 
    146 /**
    147  * DOC: overview and lifetime rules
    148  *
    149  * Similar to GEM global names, PRIME file descriptors are also used to share
    150  * buffer objects across processes. They offer additional security: as file
    151  * descriptors must be explicitly sent over UNIX domain sockets to be shared
    152  * between applications, they can't be guessed like the globally unique GEM
    153  * names.
    154  *
    155  * Drivers that support the PRIME API implement the
    156  * &drm_driver.prime_handle_to_fd and &drm_driver.prime_fd_to_handle operations.
    157  * GEM based drivers must use drm_gem_prime_handle_to_fd() and
    158  * drm_gem_prime_fd_to_handle() to implement these. For GEM based drivers the
    159  * actual driver interfaces is provided through the &drm_gem_object_funcs.export
    160  * and &drm_driver.gem_prime_import hooks.
    161  *
    162  * &dma_buf_ops implementations for GEM drivers are all individually exported
    163  * for drivers which need to overwrite or reimplement some of them.
    164  *
    165  * Reference Counting for GEM Drivers
    166  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    167  *
    168  * On the export the &dma_buf holds a reference to the exported buffer object,
    169  * usually a &drm_gem_object. It takes this reference in the PRIME_HANDLE_TO_FD
    170  * IOCTL, when it first calls &drm_gem_object_funcs.export
    171  * and stores the exporting GEM object in the &dma_buf.priv field. This
    172  * reference needs to be released when the final reference to the &dma_buf
    173  * itself is dropped and its &dma_buf_ops.release function is called.  For
    174  * GEM-based drivers, the &dma_buf should be exported using
    175  * drm_gem_dmabuf_export() and then released by drm_gem_dmabuf_release().
    176  *
    177  * Thus the chain of references always flows in one direction, avoiding loops:
    178  * importing GEM object -> dma-buf -> exported GEM bo. A further complication
    179  * are the lookup caches for import and export. These are required to guarantee
    180  * that any given object will always have only one uniqe userspace handle. This
    181  * is required to allow userspace to detect duplicated imports, since some GEM
    182  * drivers do fail command submissions if a given buffer object is listed more
    183  * than once. These import and export caches in &drm_prime_file_private only
    184  * retain a weak reference, which is cleaned up when the corresponding object is
    185  * released.
    186  *
    187  * Self-importing: If userspace is using PRIME as a replacement for flink then
    188  * it will get a fd->handle request for a GEM object that it created.  Drivers
    189  * should detect this situation and return back the underlying object from the
    190  * dma-buf private. For GEM based drivers this is handled in
    191  * drm_gem_prime_import() already.
    192  */
    193 
    194 struct drm_prime_member {
    195 	struct dma_buf *dma_buf;
    196 	uint32_t handle;
    197 
    198 	struct rb_node dmabuf_rb;
    199 	struct rb_node handle_rb;
    200 };
    201 
    202 #ifdef __NetBSD__
    203 static int
    204 compare_dmabufs(void *cookie, const void *va, const void *vb)
    205 {
    206 	const struct drm_prime_member *ma = va;
    207 	const struct drm_prime_member *mb = vb;
    208 
    209 	if (ma->dma_buf < mb->dma_buf)
    210 		return -1;
    211 	if (ma->dma_buf > mb->dma_buf)
    212 		return +1;
    213 	return 0;
    214 }
    215 
    216 static int
    217 compare_dmabuf_key(void *cookie, const void *vm, const void *vk)
    218 {
    219 	const struct drm_prime_member *m = vm;
    220 	const struct dma_buf *const *kp = vk;
    221 
    222 	if (m->dma_buf < *kp)
    223 		return -1;
    224 	if (m->dma_buf > *kp)
    225 		return +1;
    226 	return 0;
    227 }
    228 
    229 static int
    230 compare_handles(void *cookie, const void *va, const void *vb)
    231 {
    232 	const struct drm_prime_member *ma = va;
    233 	const struct drm_prime_member *mb = vb;
    234 
    235 	if (ma->handle < mb->handle)
    236 		return -1;
    237 	if (ma->handle > mb->handle)
    238 		return +1;
    239 	return 0;
    240 }
    241 
    242 static int
    243 compare_handle_key(void *cookie, const void *vm, const void *vk)
    244 {
    245 	const struct drm_prime_member *m = vm;
    246 	const uint32_t *kp = vk;
    247 
    248 	if (m->handle < *kp)
    249 		return -1;
    250 	if (m->handle > *kp)
    251 		return +1;
    252 	return 0;
    253 }
    254 
    255 static const rb_tree_ops_t dmabuf_ops = {
    256 	.rbto_compare_nodes = compare_dmabufs,
    257 	.rbto_compare_key = compare_dmabuf_key,
    258 	.rbto_node_offset = offsetof(struct drm_prime_member, dmabuf_rb),
    259 };
    260 
    261 static const rb_tree_ops_t handle_ops = {
    262 	.rbto_compare_nodes = compare_handles,
    263 	.rbto_compare_key = compare_handle_key,
    264 	.rbto_node_offset = offsetof(struct drm_prime_member, handle_rb),
    265 };
    266 #endif
    267 
    268 static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
    269 				    struct dma_buf *dma_buf, uint32_t handle)
    270 {
    271 	struct drm_prime_member *member;
    272 #ifdef __NetBSD__
    273 	struct drm_prime_member *collision __diagused;
    274 #else
    275 	struct rb_node **p, *rb;
    276 #endif
    277 
    278 	member = kmalloc(sizeof(*member), GFP_KERNEL);
    279 	if (!member)
    280 		return -ENOMEM;
    281 
    282 	get_dma_buf(dma_buf);
    283 	member->dma_buf = dma_buf;
    284 	member->handle = handle;
    285 
    286 #ifdef __NetBSD__
    287 	collision = rb_tree_insert_node(&prime_fpriv->dmabufs.rbr_tree,
    288 	    member);
    289 	KASSERT(collision == NULL);
    290 #else
    291 	rb = NULL;
    292 	p = &prime_fpriv->dmabufs.rb_node;
    293 	while (*p) {
    294 		struct drm_prime_member *pos;
    295 
    296 		rb = *p;
    297 		pos = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
    298 		if (dma_buf > pos->dma_buf)
    299 			p = &rb->rb_right;
    300 		else
    301 			p = &rb->rb_left;
    302 	}
    303 	rb_link_node(&member->dmabuf_rb, rb, p);
    304 	rb_insert_color(&member->dmabuf_rb, &prime_fpriv->dmabufs);
    305 #endif
    306 
    307 #ifdef __NetBSD__
    308 	collision = rb_tree_insert_node(&prime_fpriv->handles.rbr_tree,
    309 	    member);
    310 	KASSERT(collision == NULL);
    311 #else
    312 	rb = NULL;
    313 	p = &prime_fpriv->handles.rb_node;
    314 	while (*p) {
    315 		struct drm_prime_member *pos;
    316 
    317 		rb = *p;
    318 		pos = rb_entry(rb, struct drm_prime_member, handle_rb);
    319 		if (handle > pos->handle)
    320 			p = &rb->rb_right;
    321 		else
    322 			p = &rb->rb_left;
    323 	}
    324 	rb_link_node(&member->handle_rb, rb, p);
    325 	rb_insert_color(&member->handle_rb, &prime_fpriv->handles);
    326 #endif
    327 
    328 	return 0;
    329 }
    330 
    331 static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv,
    332 						      uint32_t handle)
    333 {
    334 #ifdef __NetBSD__
    335 	return rb_tree_find_node(&prime_fpriv->handles.rbr_tree, &handle);
    336 #else
    337 	struct rb_node *rb;
    338 
    339 	rb = prime_fpriv->handles.rb_node;
    340 	while (rb) {
    341 		struct drm_prime_member *member;
    342 
    343 		member = rb_entry(rb, struct drm_prime_member, handle_rb);
    344 		if (member->handle == handle)
    345 			return member->dma_buf;
    346 		else if (member->handle < handle)
    347 			rb = rb->rb_right;
    348 		else
    349 			rb = rb->rb_left;
    350 	}
    351 
    352 	return NULL;
    353 #endif
    354 }
    355 
    356 static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv,
    357 				       struct dma_buf *dma_buf,
    358 				       uint32_t *handle)
    359 {
    360 #ifdef __NetBSD__
    361 	struct drm_prime_member *member;
    362 
    363 	member = rb_tree_find_node(&prime_fpriv->dmabufs.rbr_tree, &dma_buf);
    364 	if (member == NULL)
    365 		return -ENOENT;
    366 	*handle = member->handle;
    367 	return 0;
    368 #else
    369 	struct rb_node *rb;
    370 
    371 	rb = prime_fpriv->dmabufs.rb_node;
    372 	while (rb) {
    373 		struct drm_prime_member *member;
    374 
    375 		member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
    376 		if (member->dma_buf == dma_buf) {
    377 			*handle = member->handle;
    378 			return 0;
    379 		} else if (member->dma_buf < dma_buf) {
    380 			rb = rb->rb_right;
    381 		} else {
    382 			rb = rb->rb_left;
    383 		}
    384 	}
    385 
    386 	return -ENOENT;
    387 #endif
    388 }
    389 
    390 void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
    391 					struct dma_buf *dma_buf)
    392 {
    393 #ifdef __NetBSD__
    394 	struct drm_prime_member *member;
    395 
    396 	member = rb_tree_find_node(&prime_fpriv->dmabufs.rbr_tree, &dma_buf);
    397 	if (member != NULL) {
    398 		rb_tree_remove_node(&prime_fpriv->handles.rbr_tree, member);
    399 		rb_tree_remove_node(&prime_fpriv->dmabufs.rbr_tree, member);
    400 	}
    401 #else
    402 	struct rb_node *rb;
    403 
    404 	rb = prime_fpriv->dmabufs.rb_node;
    405 	while (rb) {
    406 		struct drm_prime_member *member;
    407 
    408 		member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
    409 		if (member->dma_buf == dma_buf) {
    410 			rb_erase(&member->handle_rb, &prime_fpriv->handles);
    411 			rb_erase(&member->dmabuf_rb, &prime_fpriv->dmabufs);
    412 
    413 			dma_buf_put(dma_buf);
    414 			kfree(member);
    415 			return;
    416 		} else if (member->dma_buf < dma_buf) {
    417 			rb = rb->rb_right;
    418 		} else {
    419 			rb = rb->rb_left;
    420 		}
    421 	}
    422 #endif
    423 }
    424 
    425 void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
    426 {
    427 #ifdef __NetBSD__
    428 	linux_mutex_init(&prime_fpriv->lock);
    429 #else
    430 	mutex_init(&prime_fpriv->lock);
    431 #endif
    432 #ifdef __NetBSD__
    433 	rb_tree_init(&prime_fpriv->dmabufs.rbr_tree, &dmabuf_ops);
    434 	rb_tree_init(&prime_fpriv->handles.rbr_tree, &handle_ops);
    435 #else
    436 	prime_fpriv->dmabufs = RB_ROOT;
    437 	prime_fpriv->handles = RB_ROOT;
    438 #endif
    439 }
    440 
    441 void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
    442 {
    443 #ifdef __NetBSD__ /* XXX post-merge linux doesn't destroy it's lock now? */
    444 	linux_mutex_destroy(&prime_fpriv->lock);
    445 #endif
    446 	/* by now drm_gem_release should've made sure the list is empty */
    447 	WARN_ON(!RB_EMPTY_ROOT(&prime_fpriv->dmabufs));
    448 }
    449 
    450 /**
    451  * drm_gem_dmabuf_export - &dma_buf export implementation for GEM
    452  * @dev: parent device for the exported dmabuf
    453  * @exp_info: the export information used by dma_buf_export()
    454  *
    455  * This wraps dma_buf_export() for use by generic GEM drivers that are using
    456  * drm_gem_dmabuf_release(). In addition to calling dma_buf_export(), we take
    457  * a reference to the &drm_device and the exported &drm_gem_object (stored in
    458  * &dma_buf_export_info.priv) which is released by drm_gem_dmabuf_release().
    459  *
    460  * Returns the new dmabuf.
    461  */
    462 struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
    463 				      struct dma_buf_export_info *exp_info)
    464 {
    465 	struct drm_gem_object *obj = exp_info->priv;
    466 	struct dma_buf *dma_buf;
    467 
    468 	dma_buf = dma_buf_export(exp_info);
    469 	if (IS_ERR(dma_buf))
    470 		return dma_buf;
    471 
    472 	drm_dev_get(dev);
    473 	drm_gem_object_get(obj);
    474 	dma_buf->file->f_mapping = obj->dev->anon_inode->i_mapping;
    475 
    476 	return dma_buf;
    477 }
    478 EXPORT_SYMBOL(drm_gem_dmabuf_export);
    479 
    480 /**
    481  * drm_gem_dmabuf_release - &dma_buf release implementation for GEM
    482  * @dma_buf: buffer to be released
    483  *
    484  * Generic release function for dma_bufs exported as PRIME buffers. GEM drivers
    485  * must use this in their &dma_buf_ops structure as the release callback.
    486  * drm_gem_dmabuf_release() should be used in conjunction with
    487  * drm_gem_dmabuf_export().
    488  */
    489 void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
    490 {
    491 	struct drm_gem_object *obj = dma_buf->priv;
    492 	struct drm_device *dev = obj->dev;
    493 
    494 	/* drop the reference on the export fd holds */
    495 	drm_gem_object_put_unlocked(obj);
    496 
    497 	drm_dev_put(dev);
    498 }
    499 EXPORT_SYMBOL(drm_gem_dmabuf_release);
    500 
    501 /**
    502  * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers
    503  * @dev: dev to export the buffer from
    504  * @file_priv: drm file-private structure
    505  * @prime_fd: fd id of the dma-buf which should be imported
    506  * @handle: pointer to storage for the handle of the imported buffer object
    507  *
    508  * This is the PRIME import function which must be used mandatorily by GEM
    509  * drivers to ensure correct lifetime management of the underlying GEM object.
    510  * The actual importing of GEM object from the dma-buf is done through the
    511  * &drm_driver.gem_prime_import driver callback.
    512  *
    513  * Returns 0 on success or a negative error code on failure.
    514  */
    515 int drm_gem_prime_fd_to_handle(struct drm_device *dev,
    516 			       struct drm_file *file_priv, int prime_fd,
    517 			       uint32_t *handle)
    518 {
    519 	struct dma_buf *dma_buf;
    520 	struct drm_gem_object *obj;
    521 	int ret;
    522 
    523 	dma_buf = dma_buf_get(prime_fd);
    524 	if (IS_ERR(dma_buf))
    525 		return PTR_ERR(dma_buf);
    526 
    527 	mutex_lock(&file_priv->prime.lock);
    528 
    529 	ret = drm_prime_lookup_buf_handle(&file_priv->prime,
    530 			dma_buf, handle);
    531 	if (ret == 0)
    532 		goto out_put;
    533 
    534 	/* never seen this one, need to import */
    535 	mutex_lock(&dev->object_name_lock);
    536 	if (dev->driver->gem_prime_import)
    537 		obj = dev->driver->gem_prime_import(dev, dma_buf);
    538 	else
    539 		obj = drm_gem_prime_import(dev, dma_buf);
    540 	if (IS_ERR(obj)) {
    541 		ret = PTR_ERR(obj);
    542 		goto out_unlock;
    543 	}
    544 
    545 	if (obj->dma_buf) {
    546 		WARN_ON(obj->dma_buf != dma_buf);
    547 	} else {
    548 		obj->dma_buf = dma_buf;
    549 		get_dma_buf(dma_buf);
    550 	}
    551 
    552 	/* _handle_create_tail unconditionally unlocks dev->object_name_lock. */
    553 	ret = drm_gem_handle_create_tail(file_priv, obj, handle);
    554 	drm_gem_object_put_unlocked(obj);
    555 	if (ret)
    556 		goto out_put;
    557 
    558 	ret = drm_prime_add_buf_handle(&file_priv->prime,
    559 			dma_buf, *handle);
    560 	mutex_unlock(&file_priv->prime.lock);
    561 	if (ret)
    562 		goto fail;
    563 
    564 	dma_buf_put(dma_buf);
    565 
    566 	return 0;
    567 
    568 fail:
    569 	/* hmm, if driver attached, we are relying on the free-object path
    570 	 * to detach.. which seems ok..
    571 	 */
    572 	drm_gem_handle_delete(file_priv, *handle);
    573 	dma_buf_put(dma_buf);
    574 	return ret;
    575 
    576 out_unlock:
    577 	mutex_unlock(&dev->object_name_lock);
    578 out_put:
    579 	mutex_unlock(&file_priv->prime.lock);
    580 	dma_buf_put(dma_buf);
    581 	return ret;
    582 }
    583 EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
    584 
    585 int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
    586 				 struct drm_file *file_priv)
    587 {
    588 	struct drm_prime_handle *args = data;
    589 
    590 	if (!dev->driver->prime_fd_to_handle)
    591 		return -ENOSYS;
    592 
    593 	return dev->driver->prime_fd_to_handle(dev, file_priv,
    594 			args->fd, &args->handle);
    595 }
    596 
    597 static struct dma_buf *export_and_register_object(struct drm_device *dev,
    598 						  struct drm_gem_object *obj,
    599 						  uint32_t flags)
    600 {
    601 	struct dma_buf *dmabuf;
    602 
    603 	/* prevent races with concurrent gem_close. */
    604 	if (obj->handle_count == 0) {
    605 		dmabuf = ERR_PTR(-ENOENT);
    606 		return dmabuf;
    607 	}
    608 
    609 	if (obj->funcs && obj->funcs->export)
    610 		dmabuf = obj->funcs->export(obj, flags);
    611 	else if (dev->driver->gem_prime_export)
    612 		dmabuf = dev->driver->gem_prime_export(obj, flags);
    613 	else
    614 		dmabuf = drm_gem_prime_export(obj, flags);
    615 	if (IS_ERR(dmabuf)) {
    616 		/* normally the created dma-buf takes ownership of the ref,
    617 		 * but if that fails then drop the ref
    618 		 */
    619 		return dmabuf;
    620 	}
    621 
    622 	/*
    623 	 * Note that callers do not need to clean up the export cache
    624 	 * since the check for obj->handle_count guarantees that someone
    625 	 * will clean it up.
    626 	 */
    627 	obj->dma_buf = dmabuf;
    628 	get_dma_buf(obj->dma_buf);
    629 
    630 	return dmabuf;
    631 }
    632 
    633 /**
    634  * drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers
    635  * @dev: dev to export the buffer from
    636  * @file_priv: drm file-private structure
    637  * @handle: buffer handle to export
    638  * @flags: flags like DRM_CLOEXEC
    639  * @prime_fd: pointer to storage for the fd id of the create dma-buf
    640  *
    641  * This is the PRIME export function which must be used mandatorily by GEM
    642  * drivers to ensure correct lifetime management of the underlying GEM object.
    643  * The actual exporting from GEM object to a dma-buf is done through the
    644  * &drm_driver.gem_prime_export driver callback.
    645  */
    646 int drm_gem_prime_handle_to_fd(struct drm_device *dev,
    647 			       struct drm_file *file_priv, uint32_t handle,
    648 			       uint32_t flags,
    649 			       int *prime_fd)
    650 {
    651 	struct drm_gem_object *obj;
    652 	int ret = 0;
    653 	struct dma_buf *dmabuf;
    654 
    655 	mutex_lock(&file_priv->prime.lock);
    656 	obj = drm_gem_object_lookup(file_priv, handle);
    657 	if (!obj)  {
    658 		ret = -ENOENT;
    659 		goto out_unlock;
    660 	}
    661 
    662 	dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle);
    663 	if (dmabuf) {
    664 		get_dma_buf(dmabuf);
    665 		goto out_have_handle;
    666 	}
    667 
    668 	mutex_lock(&dev->object_name_lock);
    669 	/* re-export the original imported object */
    670 	if (obj->import_attach) {
    671 		dmabuf = obj->import_attach->dmabuf;
    672 		get_dma_buf(dmabuf);
    673 		goto out_have_obj;
    674 	}
    675 
    676 	if (obj->dma_buf) {
    677 		get_dma_buf(obj->dma_buf);
    678 		dmabuf = obj->dma_buf;
    679 		goto out_have_obj;
    680 	}
    681 
    682 	dmabuf = export_and_register_object(dev, obj, flags);
    683 	if (IS_ERR(dmabuf)) {
    684 		/* normally the created dma-buf takes ownership of the ref,
    685 		 * but if that fails then drop the ref
    686 		 */
    687 		ret = PTR_ERR(dmabuf);
    688 		mutex_unlock(&dev->object_name_lock);
    689 		goto out;
    690 	}
    691 
    692 out_have_obj:
    693 	/*
    694 	 * If we've exported this buffer then cheat and add it to the import list
    695 	 * so we get the correct handle back. We must do this under the
    696 	 * protection of dev->object_name_lock to ensure that a racing gem close
    697 	 * ioctl doesn't miss to remove this buffer handle from the cache.
    698 	 */
    699 	ret = drm_prime_add_buf_handle(&file_priv->prime,
    700 				       dmabuf, handle);
    701 	mutex_unlock(&dev->object_name_lock);
    702 	if (ret)
    703 		goto fail_put_dmabuf;
    704 
    705 out_have_handle:
    706 	ret = dma_buf_fd(dmabuf, flags);
    707 	/*
    708 	 * We must _not_ remove the buffer from the handle cache since the newly
    709 	 * created dma buf is already linked in the global obj->dma_buf pointer,
    710 	 * and that is invariant as long as a userspace gem handle exists.
    711 	 * Closing the handle will clean out the cache anyway, so we don't leak.
    712 	 */
    713 	if (ret < 0) {
    714 		goto fail_put_dmabuf;
    715 	} else {
    716 		*prime_fd = ret;
    717 		ret = 0;
    718 	}
    719 
    720 	goto out;
    721 
    722 fail_put_dmabuf:
    723 	dma_buf_put(dmabuf);
    724 out:
    725 	drm_gem_object_put_unlocked(obj);
    726 out_unlock:
    727 	mutex_unlock(&file_priv->prime.lock);
    728 
    729 	return ret;
    730 }
    731 EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
    732 
    733 int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
    734 				 struct drm_file *file_priv)
    735 {
    736 	struct drm_prime_handle *args = data;
    737 
    738 	if (!dev->driver->prime_handle_to_fd)
    739 		return -ENOSYS;
    740 
    741 	/* check flags are valid */
    742 	if (args->flags & ~(DRM_CLOEXEC | DRM_RDWR))
    743 		return -EINVAL;
    744 
    745 	return dev->driver->prime_handle_to_fd(dev, file_priv,
    746 			args->handle, args->flags, &args->fd);
    747 }
    748 
    749 /**
    750  * DOC: PRIME Helpers
    751  *
    752  * Drivers can implement &drm_gem_object_funcs.export and
    753  * &drm_driver.gem_prime_import in terms of simpler APIs by using the helper
    754  * functions drm_gem_prime_export() and drm_gem_prime_import(). These functions
    755  * implement dma-buf support in terms of some lower-level helpers, which are
    756  * again exported for drivers to use individually:
    757  *
    758  * Exporting buffers
    759  * ~~~~~~~~~~~~~~~~~
    760  *
    761  * Optional pinning of buffers is handled at dma-buf attach and detach time in
    762  * drm_gem_map_attach() and drm_gem_map_detach(). Backing storage itself is
    763  * handled by drm_gem_map_dma_buf() and drm_gem_unmap_dma_buf(), which relies on
    764  * &drm_gem_object_funcs.get_sg_table.
    765  *
    766  * For kernel-internal access there's drm_gem_dmabuf_vmap() and
    767  * drm_gem_dmabuf_vunmap(). Userspace mmap support is provided by
    768  * drm_gem_dmabuf_mmap().
    769  *
    770  * Note that these export helpers can only be used if the underlying backing
    771  * storage is fully coherent and either permanently pinned, or it is safe to pin
    772  * it indefinitely.
    773  *
    774  * FIXME: The underlying helper functions are named rather inconsistently.
    775  *
    776  * Exporting buffers
    777  * ~~~~~~~~~~~~~~~~~
    778  *
    779  * Importing dma-bufs using drm_gem_prime_import() relies on
    780  * &drm_driver.gem_prime_import_sg_table.
    781  *
    782  * Note that similarly to the export helpers this permanently pins the
    783  * underlying backing storage. Which is ok for scanout, but is not the best
    784  * option for sharing lots of buffers for rendering.
    785  */
    786 
    787 /**
    788  * drm_gem_map_attach - dma_buf attach implementation for GEM
    789  * @dma_buf: buffer to attach device to
    790  * @attach: buffer attachment data
    791  *
    792  * Calls &drm_gem_object_funcs.pin for device specific handling. This can be
    793  * used as the &dma_buf_ops.attach callback. Must be used together with
    794  * drm_gem_map_detach().
    795  *
    796  * Returns 0 on success, negative error code on failure.
    797  */
    798 int drm_gem_map_attach(struct dma_buf *dma_buf,
    799 		       struct dma_buf_attachment *attach)
    800 {
    801 	struct drm_gem_object *obj = dma_buf->priv;
    802 
    803 	return drm_gem_pin(obj);
    804 }
    805 EXPORT_SYMBOL(drm_gem_map_attach);
    806 
    807 /**
    808  * drm_gem_map_detach - dma_buf detach implementation for GEM
    809  * @dma_buf: buffer to detach from
    810  * @attach: attachment to be detached
    811  *
    812  * Calls &drm_gem_object_funcs.pin for device specific handling.  Cleans up
    813  * &dma_buf_attachment from drm_gem_map_attach(). This can be used as the
    814  * &dma_buf_ops.detach callback.
    815  */
    816 void drm_gem_map_detach(struct dma_buf *dma_buf,
    817 			struct dma_buf_attachment *attach)
    818 {
    819 	struct drm_gem_object *obj = dma_buf->priv;
    820 
    821 	drm_gem_unpin(obj);
    822 }
    823 EXPORT_SYMBOL(drm_gem_map_detach);
    824 
    825 /**
    826  * drm_gem_map_dma_buf - map_dma_buf implementation for GEM
    827  * @attach: attachment whose scatterlist is to be returned
    828  * @dir: direction of DMA transfer
    829  *
    830  * Calls &drm_gem_object_funcs.get_sg_table and then maps the scatterlist. This
    831  * can be used as the &dma_buf_ops.map_dma_buf callback. Should be used together
    832  * with drm_gem_unmap_dma_buf().
    833  *
    834  * Returns:sg_table containing the scatterlist to be returned; returns ERR_PTR
    835  * on error. May return -EINTR if it is interrupted by a signal.
    836  */
    837 struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
    838 				     enum dma_data_direction dir)
    839 {
    840 	struct drm_gem_object *obj = attach->dmabuf->priv;
    841 	struct sg_table *sgt;
    842 
    843 	if (WARN_ON(dir == DMA_NONE))
    844 		return ERR_PTR(-EINVAL);
    845 
    846 	if (obj->funcs)
    847 		sgt = obj->funcs->get_sg_table(obj);
    848 	else
    849 		sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
    850 
    851 	if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
    852 			      DMA_ATTR_SKIP_CPU_SYNC)) {
    853 		sg_free_table(sgt);
    854 		kfree(sgt);
    855 		sgt = ERR_PTR(-ENOMEM);
    856 	}
    857 
    858 	return sgt;
    859 }
    860 EXPORT_SYMBOL(drm_gem_map_dma_buf);
    861 
    862 /**
    863  * drm_gem_unmap_dma_buf - unmap_dma_buf implementation for GEM
    864  * @attach: attachment to unmap buffer from
    865  * @sgt: scatterlist info of the buffer to unmap
    866  * @dir: direction of DMA transfer
    867  *
    868  * This can be used as the &dma_buf_ops.unmap_dma_buf callback.
    869  */
    870 void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
    871 			   struct sg_table *sgt,
    872 			   enum dma_data_direction dir)
    873 {
    874 	if (!sgt)
    875 		return;
    876 
    877 	dma_unmap_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
    878 			   DMA_ATTR_SKIP_CPU_SYNC);
    879 	sg_free_table(sgt);
    880 	kfree(sgt);
    881 }
    882 EXPORT_SYMBOL(drm_gem_unmap_dma_buf);
    883 
    884 /**
    885  * drm_gem_dmabuf_vmap - dma_buf vmap implementation for GEM
    886  * @dma_buf: buffer to be mapped
    887  *
    888  * Sets up a kernel virtual mapping. This can be used as the &dma_buf_ops.vmap
    889  * callback. Calls into &drm_gem_object_funcs.vmap for device specific handling.
    890  *
    891  * Returns the kernel virtual address or NULL on failure.
    892  */
    893 void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
    894 {
    895 	struct drm_gem_object *obj = dma_buf->priv;
    896 	void *vaddr;
    897 
    898 	vaddr = drm_gem_vmap(obj);
    899 	if (IS_ERR(vaddr))
    900 		vaddr = NULL;
    901 
    902 	return vaddr;
    903 }
    904 EXPORT_SYMBOL(drm_gem_dmabuf_vmap);
    905 
    906 /**
    907  * drm_gem_dmabuf_vunmap - dma_buf vunmap implementation for GEM
    908  * @dma_buf: buffer to be unmapped
    909  * @vaddr: the virtual address of the buffer
    910  *
    911  * Releases a kernel virtual mapping. This can be used as the
    912  * &dma_buf_ops.vunmap callback. Calls into &drm_gem_object_funcs.vunmap for device specific handling.
    913  */
    914 void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
    915 {
    916 	struct drm_gem_object *obj = dma_buf->priv;
    917 
    918 	drm_gem_vunmap(obj, vaddr);
    919 }
    920 EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
    921 
    922 /**
    923  * drm_gem_prime_mmap - PRIME mmap function for GEM drivers
    924  * @obj: GEM object
    925  * @vma: Virtual address range
    926  *
    927  * This function sets up a userspace mapping for PRIME exported buffers using
    928  * the same codepath that is used for regular GEM buffer mapping on the DRM fd.
    929  * The fake GEM offset is added to vma->vm_pgoff and &drm_driver->fops->mmap is
    930  * called to set up the mapping.
    931  *
    932  * Drivers can use this as their &drm_driver.gem_prime_mmap callback.
    933  */
    934 int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
    935 {
    936 	struct drm_file *priv;
    937 	struct file *fil;
    938 	int ret;
    939 
    940 	/* Add the fake offset */
    941 	vma->vm_pgoff += drm_vma_node_start(&obj->vma_node);
    942 
    943 	if (obj->funcs && obj->funcs->mmap) {
    944 		ret = obj->funcs->mmap(obj, vma);
    945 		if (ret)
    946 			return ret;
    947 		vma->vm_private_data = obj;
    948 		drm_gem_object_get(obj);
    949 		return 0;
    950 	}
    951 
    952 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
    953 	fil = kzalloc(sizeof(*fil), GFP_KERNEL);
    954 	if (!priv || !fil) {
    955 		ret = -ENOMEM;
    956 		goto out;
    957 	}
    958 
    959 	/* Used by drm_gem_mmap() to lookup the GEM object */
    960 	priv->minor = obj->dev->primary;
    961 	fil->private_data = priv;
    962 
    963 	ret = drm_vma_node_allow(&obj->vma_node, priv);
    964 	if (ret)
    965 		goto out;
    966 
    967 	ret = obj->dev->driver->fops->mmap(fil, vma);
    968 
    969 	drm_vma_node_revoke(&obj->vma_node, priv);
    970 out:
    971 	kfree(priv);
    972 	kfree(fil);
    973 
    974 	return ret;
    975 }
    976 EXPORT_SYMBOL(drm_gem_prime_mmap);
    977 
    978 /**
    979  * drm_gem_dmabuf_mmap - dma_buf mmap implementation for GEM
    980  * @dma_buf: buffer to be mapped
    981  * @vma: virtual address range
    982  *
    983  * Provides memory mapping for the buffer. This can be used as the
    984  * &dma_buf_ops.mmap callback. It just forwards to &drm_driver.gem_prime_mmap,
    985  * which should be set to drm_gem_prime_mmap().
    986  *
    987  * FIXME: There's really no point to this wrapper, drivers which need anything
    988  * else but drm_gem_prime_mmap can roll their own &dma_buf_ops.mmap callback.
    989  *
    990  * Returns 0 on success or a negative error code on failure.
    991  */
    992 #ifdef __NetBSD__
    993 static int
    994 drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, off_t *offp, size_t size,
    995     int prot, int *flagsp, int *advicep, struct uvm_object **uobjp,
    996     int *maxprotp)
    997 #else
    998 int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
    999 #endif
   1000 {
   1001 	struct drm_gem_object *obj = dma_buf->priv;
   1002 	struct drm_device *dev = obj->dev;
   1003 
   1004 	if (!dev->driver->gem_prime_mmap)
   1005 		return -ENOSYS;
   1006 
   1007 #ifdef __NetBSD__
   1008 	return dev->driver->gem_prime_mmap(obj, offp, size, prot, flagsp,
   1009 	    advicep, uobjp, maxprotp);
   1010 #else
   1011 	return dev->driver->gem_prime_mmap(obj, vma);
   1012 #endif
   1013 }
   1014 EXPORT_SYMBOL(drm_gem_dmabuf_mmap);
   1015 
   1016 static const struct dma_buf_ops drm_gem_prime_dmabuf_ops =  {
   1017 	.cache_sgt_mapping = true,
   1018 	.attach = drm_gem_map_attach,
   1019 	.detach = drm_gem_map_detach,
   1020 	.map_dma_buf = drm_gem_map_dma_buf,
   1021 	.unmap_dma_buf = drm_gem_unmap_dma_buf,
   1022 	.release = drm_gem_dmabuf_release,
   1023 	.mmap = drm_gem_dmabuf_mmap,
   1024 	.vmap = drm_gem_dmabuf_vmap,
   1025 	.vunmap = drm_gem_dmabuf_vunmap,
   1026 };
   1027 
   1028 /**
   1029  * drm_prime_pages_to_sg - converts a page array into an sg list
   1030  * @pages: pointer to the array of page pointers to convert
   1031  * @nr_pages: length of the page vector
   1032  *
   1033  * This helper creates an sg table object from a set of pages
   1034  * the driver is responsible for mapping the pages into the
   1035  * importers address space for use with dma_buf itself.
   1036  *
   1037  * This is useful for implementing &drm_gem_object_funcs.get_sg_table.
   1038  */
   1039 struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages)
   1040 {
   1041 	struct sg_table *sg = NULL;
   1042 	int ret;
   1043 
   1044 	sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
   1045 	if (!sg) {
   1046 		ret = -ENOMEM;
   1047 		goto out;
   1048 	}
   1049 
   1050 	ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
   1051 				nr_pages << PAGE_SHIFT, GFP_KERNEL);
   1052 	if (ret)
   1053 		goto out;
   1054 
   1055 	return sg;
   1056 out:
   1057 	kfree(sg);
   1058 	return ERR_PTR(ret);
   1059 }
   1060 EXPORT_SYMBOL(drm_prime_pages_to_sg);
   1061 
   1062 /**
   1063  * drm_gem_prime_export - helper library implementation of the export callback
   1064  * @obj: GEM object to export
   1065  * @flags: flags like DRM_CLOEXEC and DRM_RDWR
   1066  *
   1067  * This is the implementation of the &drm_gem_object_funcs.export functions for GEM drivers
   1068  * using the PRIME helpers. It is used as the default in
   1069  * drm_gem_prime_handle_to_fd().
   1070  */
   1071 struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj,
   1072 				     int flags)
   1073 {
   1074 	struct drm_device *dev = obj->dev;
   1075 	struct dma_buf_export_info exp_info = {
   1076 #ifndef __NetBSD__
   1077 		.exp_name = KBUILD_MODNAME, /* white lie for debug */
   1078 		.owner = dev->driver->fops->owner,
   1079 #endif
   1080 		.ops = &drm_gem_prime_dmabuf_ops,
   1081 		.size = obj->size,
   1082 		.flags = flags,
   1083 		.priv = obj,
   1084 		.resv = obj->resv,
   1085 	};
   1086 
   1087 	return drm_gem_dmabuf_export(dev, &exp_info);
   1088 }
   1089 EXPORT_SYMBOL(drm_gem_prime_export);
   1090 
   1091 /**
   1092  * drm_gem_prime_import_dev - core implementation of the import callback
   1093  * @dev: drm_device to import into
   1094  * @dma_buf: dma-buf object to import
   1095  * @attach_dev: struct device to dma_buf attach
   1096  *
   1097  * This is the core of drm_gem_prime_import(). It's designed to be called by
   1098  * drivers who want to use a different device structure than &drm_device.dev for
   1099  * attaching via dma_buf. This function calls
   1100  * &drm_driver.gem_prime_import_sg_table internally.
   1101  *
   1102  * Drivers must arrange to call drm_prime_gem_destroy() from their
   1103  * &drm_gem_object_funcs.free hook when using this function.
   1104  */
   1105 struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev,
   1106 					    struct dma_buf *dma_buf,
   1107 					    struct device *attach_dev)
   1108 {
   1109 	struct dma_buf_attachment *attach;
   1110 	struct sg_table *sgt;
   1111 	struct drm_gem_object *obj;
   1112 	int ret;
   1113 
   1114 	if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) {
   1115 		obj = dma_buf->priv;
   1116 		if (obj->dev == dev) {
   1117 			/*
   1118 			 * Importing dmabuf exported from out own gem increases
   1119 			 * refcount on gem itself instead of f_count of dmabuf.
   1120 			 */
   1121 			drm_gem_object_get(obj);
   1122 			return obj;
   1123 		}
   1124 	}
   1125 
   1126 	if (!dev->driver->gem_prime_import_sg_table)
   1127 		return ERR_PTR(-EINVAL);
   1128 
   1129 	attach = dma_buf_attach(dma_buf, attach_dev);
   1130 	if (IS_ERR(attach))
   1131 		return ERR_CAST(attach);
   1132 
   1133 	get_dma_buf(dma_buf);
   1134 
   1135 	sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
   1136 	if (IS_ERR(sgt)) {
   1137 		ret = PTR_ERR(sgt);
   1138 		goto fail_detach;
   1139 	}
   1140 
   1141 	obj = dev->driver->gem_prime_import_sg_table(dev, attach, sgt);
   1142 	if (IS_ERR(obj)) {
   1143 		ret = PTR_ERR(obj);
   1144 		goto fail_unmap;
   1145 	}
   1146 
   1147 	obj->import_attach = attach;
   1148 	obj->resv = dma_buf->resv;
   1149 
   1150 	return obj;
   1151 
   1152 fail_unmap:
   1153 	dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
   1154 fail_detach:
   1155 	dma_buf_detach(dma_buf, attach);
   1156 	dma_buf_put(dma_buf);
   1157 
   1158 	return ERR_PTR(ret);
   1159 }
   1160 EXPORT_SYMBOL(drm_gem_prime_import_dev);
   1161 
   1162 /**
   1163  * drm_gem_prime_import - helper library implementation of the import callback
   1164  * @dev: drm_device to import into
   1165  * @dma_buf: dma-buf object to import
   1166  *
   1167  * This is the implementation of the gem_prime_import functions for GEM drivers
   1168  * using the PRIME helpers. Drivers can use this as their
   1169  * &drm_driver.gem_prime_import implementation. It is used as the default
   1170  * implementation in drm_gem_prime_fd_to_handle().
   1171  *
   1172  * Drivers must arrange to call drm_prime_gem_destroy() from their
   1173  * &drm_gem_object_funcs.free hook when using this function.
   1174  */
   1175 struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
   1176 					    struct dma_buf *dma_buf)
   1177 {
   1178 	return drm_gem_prime_import_dev(dev, dma_buf, dev->dev);
   1179 }
   1180 EXPORT_SYMBOL(drm_gem_prime_import);
   1181 
   1182 #ifdef __NetBSD__
   1183 /**
   1184 
   1185 struct sg_table *
   1186 drm_prime_bus_dmamem_to_sg(bus_dma_tag_t dmat, const bus_dma_segment_t *segs,
   1187     int nsegs)
   1188 {
   1189 	struct sg_table *sg;
   1190 	int ret;
   1191 
   1192 	sg = kmalloc(sizeof(*sg), GFP_KERNEL);
   1193 	if (sg == NULL) {
   1194 		ret = -ENOMEM;
   1195 		goto out;
   1196 	}
   1197 
   1198 	ret = sg_alloc_table_from_bus_dmamem(sg, dmat, segs, nsegs,
   1199 	    GFP_KERNEL);
   1200 	if (ret)
   1201 		goto out;
   1202 
   1203 	return sg;
   1204 out:
   1205 	kfree(sg);
   1206 	return ERR_PTR(ret);
   1207 }
   1208 
   1209 struct sg_table *
   1210 drm_prime_pglist_to_sg(struct pglist *pglist, unsigned npages)
   1211 {
   1212 	struct sg_table *sg;
   1213 	int ret;
   1214 
   1215 	sg = kmalloc(sizeof(*sg), GFP_KERNEL);
   1216 	if (sg == NULL) {
   1217 		ret = -ENOMEM;
   1218 		goto out;
   1219 	}
   1220 
   1221 	ret = sg_alloc_table_from_pglist(sg, pglist, 0, npages << PAGE_SHIFT,
   1222 	    npages, GFP_KERNEL);
   1223 	if (ret)
   1224 		goto out;
   1225 
   1226 	return sg;
   1227 
   1228 out:
   1229 	kfree(sg);
   1230 	return ERR_PTR(ret);
   1231 }
   1232 
   1233 bus_size_t
   1234 drm_prime_sg_size(struct sg_table *sg)
   1235 {
   1236 
   1237 	return sg->sgt_npgs << PAGE_SHIFT;
   1238 }
   1239 
   1240 void
   1241 drm_prime_sg_free(struct sg_table *sg)
   1242 {
   1243 
   1244 	sg_free_table(sg);
   1245 	kfree(sg);
   1246 }
   1247 
   1248 int
   1249 drm_prime_sg_to_bus_dmamem(bus_dma_tag_t dmat, bus_dma_segment_t *segs,
   1250     int nsegs, int *rsegs, const struct sg_table *sgt)
   1251 {
   1252 
   1253 	/* XXX errno NetBSD->Linux */
   1254 	return -bus_dmamem_import_pages(dmat, segs, nsegs, rsegs, sgt->sgt_pgs,
   1255 	    sgt->sgt_npgs);
   1256 }
   1257 
   1258 int
   1259 drm_prime_bus_dmamap_load_sgt(bus_dma_tag_t dmat, bus_dmamap_t map,
   1260     struct sg_table *sgt)
   1261 {
   1262 	bus_dma_segment_t *segs;
   1263 	bus_size_t size = drm_prime_sg_size(sgt);
   1264 	int nsegs = sgt->sgt_npgs;
   1265 	int ret;
   1266 
   1267 	segs = kcalloc(sgt->sgt_npgs, sizeof(segs[0]), GFP_KERNEL);
   1268 	if (segs == NULL) {
   1269 		ret = -ENOMEM;
   1270 		goto out0;
   1271 	}
   1272 
   1273 	ret = drm_prime_sg_to_bus_dmamem(dmat, segs, nsegs, &nsegs, sgt);
   1274 	if (ret)
   1275 		goto out1;
   1276 	KASSERT(nsegs <= sgt->sgt_npgs);
   1277 
   1278 	/* XXX errno NetBSD->Linux */
   1279 	ret = -bus_dmamap_load_raw(dmat, map, segs, nsegs, size,
   1280 	    BUS_DMA_NOWAIT);
   1281 	if (ret)
   1282 		goto out1;
   1283 
   1284 out1:	kfree(segs);
   1285 out0:	return ret;
   1286 }
   1287 
   1288 bool
   1289 drm_prime_sg_importable(bus_dma_tag_t dmat, struct sg_table *sgt)
   1290 {
   1291 	unsigned i;
   1292 
   1293 	for (i = 0; i < sgt->sgt_npgs; i++) {
   1294 		if (bus_dmatag_bounces_paddr(dmat, sgt->sgt_pgs[i]))
   1295 			return false;
   1296 	}
   1297 	return true;
   1298 }
   1299 
   1300 #else  /* !__NetBSD__ */
   1301 
   1302 /**
   1303  * drm_prime_sg_to_page_addr_arrays - convert an sg table into a page array
   1304  * @sgt: scatter-gather table to convert
   1305  * @pages: optional array of page pointers to store the page array in
   1306  * @addrs: optional array to store the dma bus address of each page
   1307  * @max_entries: size of both the passed-in arrays
   1308  *
   1309  * Exports an sg table into an array of pages and addresses. This is currently
   1310  * required by the TTM driver in order to do correct fault handling.
   1311  *
   1312  * Drivers can use this in their &drm_driver.gem_prime_import_sg_table
   1313  * implementation.
   1314  */
   1315 int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
   1316 				     dma_addr_t *addrs, int max_entries)
   1317 {
   1318 	unsigned count;
   1319 	struct scatterlist *sg;
   1320 	struct page *page;
   1321 	u32 len, index;
   1322 	dma_addr_t addr;
   1323 
   1324 	index = 0;
   1325 	for_each_sg(sgt->sgl, sg, sgt->nents, count) {
   1326 		len = sg->length;
   1327 		page = sg_page(sg);
   1328 		addr = sg_dma_address(sg);
   1329 
   1330 		while (len > 0) {
   1331 			if (WARN_ON(index >= max_entries))
   1332 				return -1;
   1333 			if (pages)
   1334 				pages[index] = page;
   1335 			if (addrs)
   1336 				addrs[index] = addr;
   1337 
   1338 			page++;
   1339 			addr += PAGE_SIZE;
   1340 			len -= PAGE_SIZE;
   1341 			index++;
   1342 		}
   1343 	}
   1344 	return 0;
   1345 }
   1346 EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
   1347 
   1348 #endif	/* __NetBSD__ */
   1349 
   1350 /**
   1351  * drm_prime_gem_destroy - helper to clean up a PRIME-imported GEM object
   1352  * @obj: GEM object which was created from a dma-buf
   1353  * @sg: the sg-table which was pinned at import time
   1354  *
   1355  * This is the cleanup functions which GEM drivers need to call when they use
   1356  * drm_gem_prime_import() or drm_gem_prime_import_dev() to import dma-bufs.
   1357  */
   1358 void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
   1359 {
   1360 	struct dma_buf_attachment *attach;
   1361 	struct dma_buf *dma_buf;
   1362 	attach = obj->import_attach;
   1363 	if (sg)
   1364 		dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
   1365 	dma_buf = attach->dmabuf;
   1366 	dma_buf_detach(attach->dmabuf, attach);
   1367 	/* remove the reference */
   1368 	dma_buf_put(dma_buf);
   1369 }
   1370 EXPORT_SYMBOL(drm_prime_gem_destroy);
   1371