Home | History | Annotate | Line # | Download | only in drm
drm_prime.c revision 1.10
      1 /*	$NetBSD: drm_prime.c,v 1.10 2021/12/18 23:44:57 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright  2012 Red Hat
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the "Software"),
      8  * to deal in the Software without restriction, including without limitation
      9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     10  * and/or sell copies of the Software, and to permit persons to whom the
     11  * Software is furnished to do so, subject to the following conditions:
     12  *
     13  * The above copyright notice and this permission notice (including the next
     14  * paragraph) shall be included in all copies or substantial portions of the
     15  * Software.
     16  *
     17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     22  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
     23  * IN THE SOFTWARE.
     24  *
     25  * Authors:
     26  *      Dave Airlie <airlied (at) redhat.com>
     27  *      Rob Clark <rob.clark (at) linaro.org>
     28  *
     29  */
     30 
     31 #include <sys/cdefs.h>
     32 __KERNEL_RCSID(0, "$NetBSD: drm_prime.c,v 1.10 2021/12/18 23:44:57 riastradh Exp $");
     33 
     34 #include <linux/export.h>
     35 #include <linux/dma-buf.h>
     36 #include <linux/rbtree.h>
     37 
     38 #include <drm/drm.h>
     39 #include <drm/drm_drv.h>
     40 #include <drm/drm_file.h>
     41 #include <drm/drm_framebuffer.h>
     42 #include <drm/drm_gem.h>
     43 #include <drm/drm_prime.h>
     44 
     45 #include "drm_internal.h"
     46 
     47 #ifdef __NetBSD__
     48 
     49 #include <drm/bus_dma_hacks.h>
     50 
     51 #include <linux/nbsd-namespace.h>
     52 
     53 /*
     54  * We use struct sg_table just to pass around an array of pages from
     55  * one device to another in drm prime.  Since this is _not_ a complete
     56  * implementation of Linux's sg table abstraction (e.g., it does not
     57  * remember DMA addresses and RAM pages separately, and it doesn't
     58  * support the nested chained iteration of Linux scatterlists), we
     59  * isolate it to this file and make all callers go through a few extra
     60  * subroutines (drm_prime_sg_size, drm_prime_sg_free, &c.) to use it.
     61  * Don't use this outside drm prime!
     62  */
     63 
     64 struct sg_table {
     65 	paddr_t		*sgt_pgs;
     66 	unsigned	sgt_npgs;
     67 };
     68 
     69 static int
     70 sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
     71     unsigned npages, bus_size_t offset, bus_size_t size, gfp_t gfp)
     72 {
     73 	unsigned i;
     74 
     75 	KASSERT(offset == 0);
     76 	KASSERT(size == npages << PAGE_SHIFT);
     77 
     78 	sgt->sgt_pgs = kcalloc(npages, sizeof(sgt->sgt_pgs[0]), gfp);
     79 	if (sgt->sgt_pgs == NULL)
     80 		return -ENOMEM;
     81 	sgt->sgt_npgs = npages;
     82 
     83 	for (i = 0; i < npages; i++)
     84 		sgt->sgt_pgs[i] = VM_PAGE_TO_PHYS(&pages[i]->p_vmp);
     85 
     86 	return 0;
     87 }
     88 
     89 static int
     90 sg_alloc_table_from_pglist(struct sg_table *sgt, const struct pglist *pglist,
     91     unsigned npages, bus_size_t offset, bus_size_t size, gfp_t gfp)
     92 {
     93 	struct vm_page *pg;
     94 	unsigned i;
     95 
     96 	KASSERT(offset == 0);
     97 	KASSERT(size == npages << PAGE_SHIFT);
     98 
     99 	sgt->sgt_pgs = kcalloc(npages, sizeof(sgt->sgt_pgs[0]), gfp);
    100 	if (sgt->sgt_pgs == NULL)
    101 		return -ENOMEM;
    102 	sgt->sgt_npgs = npages;
    103 
    104 	i = 0;
    105 	TAILQ_FOREACH(pg, pglist, pageq.queue) {
    106 		KASSERT(i < npages);
    107 		sgt->sgt_pgs[i] = VM_PAGE_TO_PHYS(pg);
    108 	}
    109 	KASSERT(i == npages);
    110 
    111 	return 0;
    112 }
    113 
    114 static int
    115 sg_alloc_table_from_bus_dmamem(struct sg_table *sgt, bus_dma_tag_t dmat,
    116     const bus_dma_segment_t *segs, int nsegs, gfp_t gfp)
    117 {
    118 	int ret;
    119 
    120 	KASSERT(nsegs > 0);
    121 	sgt->sgt_pgs = kcalloc(nsegs, sizeof(sgt->sgt_pgs[0]), gfp);
    122 	if (sgt->sgt_pgs == NULL)
    123 		return -ENOMEM;
    124 	sgt->sgt_npgs = nsegs;
    125 
    126 	/* XXX errno NetBSD->Linux */
    127 	ret = -bus_dmamem_export_pages(dmat, segs, nsegs, sgt->sgt_pgs,
    128 	    sgt->sgt_npgs);
    129 	if (ret)
    130 		return ret;
    131 
    132 	return 0;
    133 }
    134 
    135 static void
    136 sg_free_table(struct sg_table *sgt)
    137 {
    138 
    139 	kfree(sgt->sgt_pgs);
    140 	sgt->sgt_pgs = NULL;
    141 	sgt->sgt_npgs = 0;
    142 }
    143 
    144 #endif	/* __NetBSD__ */
    145 
    146 /**
    147  * DOC: overview and lifetime rules
    148  *
    149  * Similar to GEM global names, PRIME file descriptors are also used to share
    150  * buffer objects across processes. They offer additional security: as file
    151  * descriptors must be explicitly sent over UNIX domain sockets to be shared
    152  * between applications, they can't be guessed like the globally unique GEM
    153  * names.
    154  *
    155  * Drivers that support the PRIME API implement the
    156  * &drm_driver.prime_handle_to_fd and &drm_driver.prime_fd_to_handle operations.
    157  * GEM based drivers must use drm_gem_prime_handle_to_fd() and
    158  * drm_gem_prime_fd_to_handle() to implement these. For GEM based drivers the
    159  * actual driver interfaces is provided through the &drm_gem_object_funcs.export
    160  * and &drm_driver.gem_prime_import hooks.
    161  *
    162  * &dma_buf_ops implementations for GEM drivers are all individually exported
    163  * for drivers which need to overwrite or reimplement some of them.
    164  *
    165  * Reference Counting for GEM Drivers
    166  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    167  *
    168  * On the export the &dma_buf holds a reference to the exported buffer object,
    169  * usually a &drm_gem_object. It takes this reference in the PRIME_HANDLE_TO_FD
    170  * IOCTL, when it first calls &drm_gem_object_funcs.export
    171  * and stores the exporting GEM object in the &dma_buf.priv field. This
    172  * reference needs to be released when the final reference to the &dma_buf
    173  * itself is dropped and its &dma_buf_ops.release function is called.  For
    174  * GEM-based drivers, the &dma_buf should be exported using
    175  * drm_gem_dmabuf_export() and then released by drm_gem_dmabuf_release().
    176  *
    177  * Thus the chain of references always flows in one direction, avoiding loops:
    178  * importing GEM object -> dma-buf -> exported GEM bo. A further complication
    179  * are the lookup caches for import and export. These are required to guarantee
    180  * that any given object will always have only one uniqe userspace handle. This
    181  * is required to allow userspace to detect duplicated imports, since some GEM
    182  * drivers do fail command submissions if a given buffer object is listed more
    183  * than once. These import and export caches in &drm_prime_file_private only
    184  * retain a weak reference, which is cleaned up when the corresponding object is
    185  * released.
    186  *
    187  * Self-importing: If userspace is using PRIME as a replacement for flink then
    188  * it will get a fd->handle request for a GEM object that it created.  Drivers
    189  * should detect this situation and return back the underlying object from the
    190  * dma-buf private. For GEM based drivers this is handled in
    191  * drm_gem_prime_import() already.
    192  */
    193 
    194 struct drm_prime_member {
    195 	struct dma_buf *dma_buf;
    196 	uint32_t handle;
    197 
    198 	struct rb_node dmabuf_rb;
    199 	struct rb_node handle_rb;
    200 };
    201 
    202 static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
    203 				    struct dma_buf *dma_buf, uint32_t handle)
    204 {
    205 	struct drm_prime_member *member;
    206 	struct rb_node **p, *rb;
    207 
    208 	member = kmalloc(sizeof(*member), GFP_KERNEL);
    209 	if (!member)
    210 		return -ENOMEM;
    211 
    212 	get_dma_buf(dma_buf);
    213 	member->dma_buf = dma_buf;
    214 	member->handle = handle;
    215 
    216 	rb = NULL;
    217 	p = &prime_fpriv->dmabufs.rb_node;
    218 	while (*p) {
    219 		struct drm_prime_member *pos;
    220 
    221 		rb = *p;
    222 		pos = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
    223 		if (dma_buf > pos->dma_buf)
    224 			p = &rb->rb_right;
    225 		else
    226 			p = &rb->rb_left;
    227 	}
    228 	rb_link_node(&member->dmabuf_rb, rb, p);
    229 	rb_insert_color(&member->dmabuf_rb, &prime_fpriv->dmabufs);
    230 
    231 	rb = NULL;
    232 	p = &prime_fpriv->handles.rb_node;
    233 	while (*p) {
    234 		struct drm_prime_member *pos;
    235 
    236 		rb = *p;
    237 		pos = rb_entry(rb, struct drm_prime_member, handle_rb);
    238 		if (handle > pos->handle)
    239 			p = &rb->rb_right;
    240 		else
    241 			p = &rb->rb_left;
    242 	}
    243 	rb_link_node(&member->handle_rb, rb, p);
    244 	rb_insert_color(&member->handle_rb, &prime_fpriv->handles);
    245 
    246 	return 0;
    247 }
    248 
    249 static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv,
    250 						      uint32_t handle)
    251 {
    252 	struct rb_node *rb;
    253 
    254 	rb = prime_fpriv->handles.rb_node;
    255 	while (rb) {
    256 		struct drm_prime_member *member;
    257 
    258 		member = rb_entry(rb, struct drm_prime_member, handle_rb);
    259 		if (member->handle == handle)
    260 			return member->dma_buf;
    261 		else if (member->handle < handle)
    262 			rb = rb->rb_right;
    263 		else
    264 			rb = rb->rb_left;
    265 	}
    266 
    267 	return NULL;
    268 }
    269 
    270 static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv,
    271 				       struct dma_buf *dma_buf,
    272 				       uint32_t *handle)
    273 {
    274 	struct rb_node *rb;
    275 
    276 	rb = prime_fpriv->dmabufs.rb_node;
    277 	while (rb) {
    278 		struct drm_prime_member *member;
    279 
    280 		member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
    281 		if (member->dma_buf == dma_buf) {
    282 			*handle = member->handle;
    283 			return 0;
    284 		} else if (member->dma_buf < dma_buf) {
    285 			rb = rb->rb_right;
    286 		} else {
    287 			rb = rb->rb_left;
    288 		}
    289 	}
    290 
    291 	return -ENOENT;
    292 }
    293 
    294 void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
    295 					struct dma_buf *dma_buf)
    296 {
    297 	struct rb_node *rb;
    298 
    299 	rb = prime_fpriv->dmabufs.rb_node;
    300 	while (rb) {
    301 		struct drm_prime_member *member;
    302 
    303 		member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
    304 		if (member->dma_buf == dma_buf) {
    305 			rb_erase(&member->handle_rb, &prime_fpriv->handles);
    306 			rb_erase(&member->dmabuf_rb, &prime_fpriv->dmabufs);
    307 
    308 			dma_buf_put(dma_buf);
    309 			kfree(member);
    310 			return;
    311 		} else if (member->dma_buf < dma_buf) {
    312 			rb = rb->rb_right;
    313 		} else {
    314 			rb = rb->rb_left;
    315 		}
    316 #endif
    317 	}
    318 }
    319 
    320 void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
    321 {
    322 	mutex_init(&prime_fpriv->lock);
    323 	prime_fpriv->dmabufs = RB_ROOT;
    324 	prime_fpriv->handles = RB_ROOT;
    325 }
    326 
    327 void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
    328 {
    329 	/* by now drm_gem_release should've made sure the list is empty */
    330 	WARN_ON(!RB_EMPTY_ROOT(&prime_fpriv->dmabufs));
    331 }
    332 
    333 /**
    334  * drm_gem_dmabuf_export - &dma_buf export implementation for GEM
    335  * @dev: parent device for the exported dmabuf
    336  * @exp_info: the export information used by dma_buf_export()
    337  *
    338  * This wraps dma_buf_export() for use by generic GEM drivers that are using
    339  * drm_gem_dmabuf_release(). In addition to calling dma_buf_export(), we take
    340  * a reference to the &drm_device and the exported &drm_gem_object (stored in
    341  * &dma_buf_export_info.priv) which is released by drm_gem_dmabuf_release().
    342  *
    343  * Returns the new dmabuf.
    344  */
    345 struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
    346 				      struct dma_buf_export_info *exp_info)
    347 {
    348 	struct drm_gem_object *obj = exp_info->priv;
    349 	struct dma_buf *dma_buf;
    350 
    351 	dma_buf = dma_buf_export(exp_info);
    352 	if (IS_ERR(dma_buf))
    353 		return dma_buf;
    354 
    355 	drm_dev_get(dev);
    356 	drm_gem_object_get(obj);
    357 	dma_buf->file->f_mapping = obj->dev->anon_inode->i_mapping;
    358 
    359 	return dma_buf;
    360 }
    361 EXPORT_SYMBOL(drm_gem_dmabuf_export);
    362 
    363 /**
    364  * drm_gem_dmabuf_release - &dma_buf release implementation for GEM
    365  * @dma_buf: buffer to be released
    366  *
    367  * Generic release function for dma_bufs exported as PRIME buffers. GEM drivers
    368  * must use this in their &dma_buf_ops structure as the release callback.
    369  * drm_gem_dmabuf_release() should be used in conjunction with
    370  * drm_gem_dmabuf_export().
    371  */
    372 void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
    373 {
    374 	struct drm_gem_object *obj = dma_buf->priv;
    375 	struct drm_device *dev = obj->dev;
    376 
    377 	/* drop the reference on the export fd holds */
    378 	drm_gem_object_put_unlocked(obj);
    379 
    380 	drm_dev_put(dev);
    381 }
    382 EXPORT_SYMBOL(drm_gem_dmabuf_release);
    383 
    384 /**
    385  * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers
    386  * @dev: dev to export the buffer from
    387  * @file_priv: drm file-private structure
    388  * @prime_fd: fd id of the dma-buf which should be imported
    389  * @handle: pointer to storage for the handle of the imported buffer object
    390  *
    391  * This is the PRIME import function which must be used mandatorily by GEM
    392  * drivers to ensure correct lifetime management of the underlying GEM object.
    393  * The actual importing of GEM object from the dma-buf is done through the
    394  * &drm_driver.gem_prime_import driver callback.
    395  *
    396  * Returns 0 on success or a negative error code on failure.
    397  */
    398 int drm_gem_prime_fd_to_handle(struct drm_device *dev,
    399 			       struct drm_file *file_priv, int prime_fd,
    400 			       uint32_t *handle)
    401 {
    402 	struct dma_buf *dma_buf;
    403 	struct drm_gem_object *obj;
    404 	int ret;
    405 
    406 	dma_buf = dma_buf_get(prime_fd);
    407 	if (IS_ERR(dma_buf))
    408 		return PTR_ERR(dma_buf);
    409 
    410 	mutex_lock(&file_priv->prime.lock);
    411 
    412 	ret = drm_prime_lookup_buf_handle(&file_priv->prime,
    413 			dma_buf, handle);
    414 	if (ret == 0)
    415 		goto out_put;
    416 
    417 	/* never seen this one, need to import */
    418 	mutex_lock(&dev->object_name_lock);
    419 	if (dev->driver->gem_prime_import)
    420 		obj = dev->driver->gem_prime_import(dev, dma_buf);
    421 	else
    422 		obj = drm_gem_prime_import(dev, dma_buf);
    423 	if (IS_ERR(obj)) {
    424 		ret = PTR_ERR(obj);
    425 		goto out_unlock;
    426 	}
    427 
    428 	if (obj->dma_buf) {
    429 		WARN_ON(obj->dma_buf != dma_buf);
    430 	} else {
    431 		obj->dma_buf = dma_buf;
    432 		get_dma_buf(dma_buf);
    433 	}
    434 
    435 	/* _handle_create_tail unconditionally unlocks dev->object_name_lock. */
    436 	ret = drm_gem_handle_create_tail(file_priv, obj, handle);
    437 	drm_gem_object_put_unlocked(obj);
    438 	if (ret)
    439 		goto out_put;
    440 
    441 	ret = drm_prime_add_buf_handle(&file_priv->prime,
    442 			dma_buf, *handle);
    443 	mutex_unlock(&file_priv->prime.lock);
    444 	if (ret)
    445 		goto fail;
    446 
    447 	dma_buf_put(dma_buf);
    448 
    449 	return 0;
    450 
    451 fail:
    452 	/* hmm, if driver attached, we are relying on the free-object path
    453 	 * to detach.. which seems ok..
    454 	 */
    455 	drm_gem_handle_delete(file_priv, *handle);
    456 	dma_buf_put(dma_buf);
    457 	return ret;
    458 
    459 out_unlock:
    460 	mutex_unlock(&dev->object_name_lock);
    461 out_put:
    462 	mutex_unlock(&file_priv->prime.lock);
    463 	dma_buf_put(dma_buf);
    464 	return ret;
    465 }
    466 EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
    467 
    468 int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
    469 				 struct drm_file *file_priv)
    470 {
    471 	struct drm_prime_handle *args = data;
    472 
    473 	if (!dev->driver->prime_fd_to_handle)
    474 		return -ENOSYS;
    475 
    476 	return dev->driver->prime_fd_to_handle(dev, file_priv,
    477 			args->fd, &args->handle);
    478 }
    479 
    480 static struct dma_buf *export_and_register_object(struct drm_device *dev,
    481 						  struct drm_gem_object *obj,
    482 						  uint32_t flags)
    483 {
    484 	struct dma_buf *dmabuf;
    485 
    486 	/* prevent races with concurrent gem_close. */
    487 	if (obj->handle_count == 0) {
    488 		dmabuf = ERR_PTR(-ENOENT);
    489 		return dmabuf;
    490 	}
    491 
    492 	if (obj->funcs && obj->funcs->export)
    493 		dmabuf = obj->funcs->export(obj, flags);
    494 	else if (dev->driver->gem_prime_export)
    495 		dmabuf = dev->driver->gem_prime_export(obj, flags);
    496 	else
    497 		dmabuf = drm_gem_prime_export(obj, flags);
    498 	if (IS_ERR(dmabuf)) {
    499 		/* normally the created dma-buf takes ownership of the ref,
    500 		 * but if that fails then drop the ref
    501 		 */
    502 		return dmabuf;
    503 	}
    504 
    505 	/*
    506 	 * Note that callers do not need to clean up the export cache
    507 	 * since the check for obj->handle_count guarantees that someone
    508 	 * will clean it up.
    509 	 */
    510 	obj->dma_buf = dmabuf;
    511 	get_dma_buf(obj->dma_buf);
    512 
    513 	return dmabuf;
    514 }
    515 
    516 /**
    517  * drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers
    518  * @dev: dev to export the buffer from
    519  * @file_priv: drm file-private structure
    520  * @handle: buffer handle to export
    521  * @flags: flags like DRM_CLOEXEC
    522  * @prime_fd: pointer to storage for the fd id of the create dma-buf
    523  *
    524  * This is the PRIME export function which must be used mandatorily by GEM
    525  * drivers to ensure correct lifetime management of the underlying GEM object.
    526  * The actual exporting from GEM object to a dma-buf is done through the
    527  * &drm_driver.gem_prime_export driver callback.
    528  */
    529 int drm_gem_prime_handle_to_fd(struct drm_device *dev,
    530 			       struct drm_file *file_priv, uint32_t handle,
    531 			       uint32_t flags,
    532 			       int *prime_fd)
    533 {
    534 	struct drm_gem_object *obj;
    535 	int ret = 0;
    536 	struct dma_buf *dmabuf;
    537 
    538 	mutex_lock(&file_priv->prime.lock);
    539 	obj = drm_gem_object_lookup(file_priv, handle);
    540 	if (!obj)  {
    541 		ret = -ENOENT;
    542 		goto out_unlock;
    543 	}
    544 
    545 	dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle);
    546 	if (dmabuf) {
    547 		get_dma_buf(dmabuf);
    548 		goto out_have_handle;
    549 	}
    550 
    551 	mutex_lock(&dev->object_name_lock);
    552 	/* re-export the original imported object */
    553 	if (obj->import_attach) {
    554 		dmabuf = obj->import_attach->dmabuf;
    555 		get_dma_buf(dmabuf);
    556 		goto out_have_obj;
    557 	}
    558 
    559 	if (obj->dma_buf) {
    560 		get_dma_buf(obj->dma_buf);
    561 		dmabuf = obj->dma_buf;
    562 		goto out_have_obj;
    563 	}
    564 
    565 	dmabuf = export_and_register_object(dev, obj, flags);
    566 	if (IS_ERR(dmabuf)) {
    567 		/* normally the created dma-buf takes ownership of the ref,
    568 		 * but if that fails then drop the ref
    569 		 */
    570 		ret = PTR_ERR(dmabuf);
    571 		mutex_unlock(&dev->object_name_lock);
    572 		goto out;
    573 	}
    574 
    575 out_have_obj:
    576 	/*
    577 	 * If we've exported this buffer then cheat and add it to the import list
    578 	 * so we get the correct handle back. We must do this under the
    579 	 * protection of dev->object_name_lock to ensure that a racing gem close
    580 	 * ioctl doesn't miss to remove this buffer handle from the cache.
    581 	 */
    582 	ret = drm_prime_add_buf_handle(&file_priv->prime,
    583 				       dmabuf, handle);
    584 	mutex_unlock(&dev->object_name_lock);
    585 	if (ret)
    586 		goto fail_put_dmabuf;
    587 
    588 out_have_handle:
    589 	ret = dma_buf_fd(dmabuf, flags);
    590 	/*
    591 	 * We must _not_ remove the buffer from the handle cache since the newly
    592 	 * created dma buf is already linked in the global obj->dma_buf pointer,
    593 	 * and that is invariant as long as a userspace gem handle exists.
    594 	 * Closing the handle will clean out the cache anyway, so we don't leak.
    595 	 */
    596 	if (ret < 0) {
    597 		goto fail_put_dmabuf;
    598 	} else {
    599 		*prime_fd = ret;
    600 		ret = 0;
    601 	}
    602 
    603 	goto out;
    604 
    605 fail_put_dmabuf:
    606 	dma_buf_put(dmabuf);
    607 out:
    608 	drm_gem_object_put_unlocked(obj);
    609 out_unlock:
    610 	mutex_unlock(&file_priv->prime.lock);
    611 
    612 	return ret;
    613 }
    614 EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
    615 
    616 int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
    617 				 struct drm_file *file_priv)
    618 {
    619 	struct drm_prime_handle *args = data;
    620 
    621 	if (!dev->driver->prime_handle_to_fd)
    622 		return -ENOSYS;
    623 
    624 	/* check flags are valid */
    625 	if (args->flags & ~(DRM_CLOEXEC | DRM_RDWR))
    626 		return -EINVAL;
    627 
    628 	return dev->driver->prime_handle_to_fd(dev, file_priv,
    629 			args->handle, args->flags, &args->fd);
    630 }
    631 
    632 /**
    633  * DOC: PRIME Helpers
    634  *
    635  * Drivers can implement &drm_gem_object_funcs.export and
    636  * &drm_driver.gem_prime_import in terms of simpler APIs by using the helper
    637  * functions drm_gem_prime_export() and drm_gem_prime_import(). These functions
    638  * implement dma-buf support in terms of some lower-level helpers, which are
    639  * again exported for drivers to use individually:
    640  *
    641  * Exporting buffers
    642  * ~~~~~~~~~~~~~~~~~
    643  *
    644  * Optional pinning of buffers is handled at dma-buf attach and detach time in
    645  * drm_gem_map_attach() and drm_gem_map_detach(). Backing storage itself is
    646  * handled by drm_gem_map_dma_buf() and drm_gem_unmap_dma_buf(), which relies on
    647  * &drm_gem_object_funcs.get_sg_table.
    648  *
    649  * For kernel-internal access there's drm_gem_dmabuf_vmap() and
    650  * drm_gem_dmabuf_vunmap(). Userspace mmap support is provided by
    651  * drm_gem_dmabuf_mmap().
    652  *
    653  * Note that these export helpers can only be used if the underlying backing
    654  * storage is fully coherent and either permanently pinned, or it is safe to pin
    655  * it indefinitely.
    656  *
    657  * FIXME: The underlying helper functions are named rather inconsistently.
    658  *
    659  * Exporting buffers
    660  * ~~~~~~~~~~~~~~~~~
    661  *
    662  * Importing dma-bufs using drm_gem_prime_import() relies on
    663  * &drm_driver.gem_prime_import_sg_table.
    664  *
    665  * Note that similarly to the export helpers this permanently pins the
    666  * underlying backing storage. Which is ok for scanout, but is not the best
    667  * option for sharing lots of buffers for rendering.
    668  */
    669 
    670 /**
    671  * drm_gem_map_attach - dma_buf attach implementation for GEM
    672  * @dma_buf: buffer to attach device to
    673  * @attach: buffer attachment data
    674  *
    675  * Calls &drm_gem_object_funcs.pin for device specific handling. This can be
    676  * used as the &dma_buf_ops.attach callback. Must be used together with
    677  * drm_gem_map_detach().
    678  *
    679  * Returns 0 on success, negative error code on failure.
    680  */
    681 int drm_gem_map_attach(struct dma_buf *dma_buf,
    682 		       struct dma_buf_attachment *attach)
    683 {
    684 	struct drm_gem_object *obj = dma_buf->priv;
    685 
    686 	return drm_gem_pin(obj);
    687 }
    688 EXPORT_SYMBOL(drm_gem_map_attach);
    689 
    690 /**
    691  * drm_gem_map_detach - dma_buf detach implementation for GEM
    692  * @dma_buf: buffer to detach from
    693  * @attach: attachment to be detached
    694  *
    695  * Calls &drm_gem_object_funcs.pin for device specific handling.  Cleans up
    696  * &dma_buf_attachment from drm_gem_map_attach(). This can be used as the
    697  * &dma_buf_ops.detach callback.
    698  */
    699 void drm_gem_map_detach(struct dma_buf *dma_buf,
    700 			struct dma_buf_attachment *attach)
    701 {
    702 	struct drm_gem_object *obj = dma_buf->priv;
    703 
    704 	drm_gem_unpin(obj);
    705 }
    706 EXPORT_SYMBOL(drm_gem_map_detach);
    707 
    708 /**
    709  * drm_gem_map_dma_buf - map_dma_buf implementation for GEM
    710  * @attach: attachment whose scatterlist is to be returned
    711  * @dir: direction of DMA transfer
    712  *
    713  * Calls &drm_gem_object_funcs.get_sg_table and then maps the scatterlist. This
    714  * can be used as the &dma_buf_ops.map_dma_buf callback. Should be used together
    715  * with drm_gem_unmap_dma_buf().
    716  *
    717  * Returns:sg_table containing the scatterlist to be returned; returns ERR_PTR
    718  * on error. May return -EINTR if it is interrupted by a signal.
    719  */
    720 struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
    721 				     enum dma_data_direction dir)
    722 {
    723 	struct drm_gem_object *obj = attach->dmabuf->priv;
    724 	struct sg_table *sgt;
    725 
    726 	if (WARN_ON(dir == DMA_NONE))
    727 		return ERR_PTR(-EINVAL);
    728 
    729 	if (obj->funcs)
    730 		sgt = obj->funcs->get_sg_table(obj);
    731 	else
    732 		sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
    733 
    734 	if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
    735 			      DMA_ATTR_SKIP_CPU_SYNC)) {
    736 		sg_free_table(sgt);
    737 		kfree(sgt);
    738 		sgt = ERR_PTR(-ENOMEM);
    739 	}
    740 
    741 	return sgt;
    742 }
    743 EXPORT_SYMBOL(drm_gem_map_dma_buf);
    744 
    745 /**
    746  * drm_gem_unmap_dma_buf - unmap_dma_buf implementation for GEM
    747  * @attach: attachment to unmap buffer from
    748  * @sgt: scatterlist info of the buffer to unmap
    749  * @dir: direction of DMA transfer
    750  *
    751  * This can be used as the &dma_buf_ops.unmap_dma_buf callback.
    752  */
    753 void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
    754 			   struct sg_table *sgt,
    755 			   enum dma_data_direction dir)
    756 {
    757 	if (!sgt)
    758 		return;
    759 
    760 	dma_unmap_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
    761 			   DMA_ATTR_SKIP_CPU_SYNC);
    762 	sg_free_table(sgt);
    763 	kfree(sgt);
    764 }
    765 EXPORT_SYMBOL(drm_gem_unmap_dma_buf);
    766 
    767 /**
    768  * drm_gem_dmabuf_vmap - dma_buf vmap implementation for GEM
    769  * @dma_buf: buffer to be mapped
    770  *
    771  * Sets up a kernel virtual mapping. This can be used as the &dma_buf_ops.vmap
    772  * callback. Calls into &drm_gem_object_funcs.vmap for device specific handling.
    773  *
    774  * Returns the kernel virtual address or NULL on failure.
    775  */
    776 void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
    777 {
    778 	struct drm_gem_object *obj = dma_buf->priv;
    779 	void *vaddr;
    780 
    781 	vaddr = drm_gem_vmap(obj);
    782 	if (IS_ERR(vaddr))
    783 		vaddr = NULL;
    784 
    785 	return vaddr;
    786 }
    787 EXPORT_SYMBOL(drm_gem_dmabuf_vmap);
    788 
    789 /**
    790  * drm_gem_dmabuf_vunmap - dma_buf vunmap implementation for GEM
    791  * @dma_buf: buffer to be unmapped
    792  * @vaddr: the virtual address of the buffer
    793  *
    794  * Releases a kernel virtual mapping. This can be used as the
    795  * &dma_buf_ops.vunmap callback. Calls into &drm_gem_object_funcs.vunmap for device specific handling.
    796  */
    797 void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
    798 {
    799 	struct drm_gem_object *obj = dma_buf->priv;
    800 
    801 	drm_gem_vunmap(obj, vaddr);
    802 }
    803 EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
    804 
    805 /**
    806  * drm_gem_prime_mmap - PRIME mmap function for GEM drivers
    807  * @obj: GEM object
    808  * @vma: Virtual address range
    809  *
    810  * This function sets up a userspace mapping for PRIME exported buffers using
    811  * the same codepath that is used for regular GEM buffer mapping on the DRM fd.
    812  * The fake GEM offset is added to vma->vm_pgoff and &drm_driver->fops->mmap is
    813  * called to set up the mapping.
    814  *
    815  * Drivers can use this as their &drm_driver.gem_prime_mmap callback.
    816  */
    817 int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
    818 {
    819 	struct drm_file *priv;
    820 	struct file *fil;
    821 	int ret;
    822 
    823 	/* Add the fake offset */
    824 	vma->vm_pgoff += drm_vma_node_start(&obj->vma_node);
    825 
    826 	if (obj->funcs && obj->funcs->mmap) {
    827 		ret = obj->funcs->mmap(obj, vma);
    828 		if (ret)
    829 			return ret;
    830 		vma->vm_private_data = obj;
    831 		drm_gem_object_get(obj);
    832 		return 0;
    833 	}
    834 
    835 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
    836 	fil = kzalloc(sizeof(*fil), GFP_KERNEL);
    837 	if (!priv || !fil) {
    838 		ret = -ENOMEM;
    839 		goto out;
    840 	}
    841 
    842 	/* Used by drm_gem_mmap() to lookup the GEM object */
    843 	priv->minor = obj->dev->primary;
    844 	fil->private_data = priv;
    845 
    846 	ret = drm_vma_node_allow(&obj->vma_node, priv);
    847 	if (ret)
    848 		goto out;
    849 
    850 	ret = obj->dev->driver->fops->mmap(fil, vma);
    851 
    852 	drm_vma_node_revoke(&obj->vma_node, priv);
    853 out:
    854 	kfree(priv);
    855 	kfree(fil);
    856 
    857 	return ret;
    858 }
    859 EXPORT_SYMBOL(drm_gem_prime_mmap);
    860 
    861 /**
    862  * drm_gem_dmabuf_mmap - dma_buf mmap implementation for GEM
    863  * @dma_buf: buffer to be mapped
    864  * @vma: virtual address range
    865  *
    866  * Provides memory mapping for the buffer. This can be used as the
    867  * &dma_buf_ops.mmap callback. It just forwards to &drm_driver.gem_prime_mmap,
    868  * which should be set to drm_gem_prime_mmap().
    869  *
    870  * FIXME: There's really no point to this wrapper, drivers which need anything
    871  * else but drm_gem_prime_mmap can roll their own &dma_buf_ops.mmap callback.
    872  *
    873  * Returns 0 on success or a negative error code on failure.
    874  */
    875 #ifdef __NetBSD__
    876 static int
    877 drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, off_t *offp, size_t size,
    878     int prot, int *flagsp, int *advicep, struct uvm_object **uobjp,
    879     int *maxprotp)
    880 #else
    881 int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
    882 #endif
    883 {
    884 	struct drm_gem_object *obj = dma_buf->priv;
    885 	struct drm_device *dev = obj->dev;
    886 
    887 	if (!dev->driver->gem_prime_mmap)
    888 		return -ENOSYS;
    889 
    890 #ifdef __NetBSD__
    891 	return dev->driver->gem_prime_mmap(obj, offp, size, prot, flagsp,
    892 	    advicep, uobjp, maxprotp);
    893 #else
    894 	return dev->driver->gem_prime_mmap(obj, vma);
    895 #endif
    896 }
    897 EXPORT_SYMBOL(drm_gem_dmabuf_mmap);
    898 
    899 static const struct dma_buf_ops drm_gem_prime_dmabuf_ops =  {
    900 	.cache_sgt_mapping = true,
    901 	.attach = drm_gem_map_attach,
    902 	.detach = drm_gem_map_detach,
    903 	.map_dma_buf = drm_gem_map_dma_buf,
    904 	.unmap_dma_buf = drm_gem_unmap_dma_buf,
    905 	.release = drm_gem_dmabuf_release,
    906 	.mmap = drm_gem_dmabuf_mmap,
    907 	.vmap = drm_gem_dmabuf_vmap,
    908 	.vunmap = drm_gem_dmabuf_vunmap,
    909 };
    910 
    911 /**
    912  * drm_prime_pages_to_sg - converts a page array into an sg list
    913  * @pages: pointer to the array of page pointers to convert
    914  * @nr_pages: length of the page vector
    915  *
    916  * This helper creates an sg table object from a set of pages
    917  * the driver is responsible for mapping the pages into the
    918  * importers address space for use with dma_buf itself.
    919  *
    920  * This is useful for implementing &drm_gem_object_funcs.get_sg_table.
    921  */
    922 struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages)
    923 {
    924 	struct sg_table *sg = NULL;
    925 	int ret;
    926 
    927 	sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
    928 	if (!sg) {
    929 		ret = -ENOMEM;
    930 		goto out;
    931 	}
    932 
    933 	ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
    934 				nr_pages << PAGE_SHIFT, GFP_KERNEL);
    935 	if (ret)
    936 		goto out;
    937 
    938 	return sg;
    939 out:
    940 	kfree(sg);
    941 	return ERR_PTR(ret);
    942 }
    943 EXPORT_SYMBOL(drm_prime_pages_to_sg);
    944 
    945 /**
    946  * drm_gem_prime_export - helper library implementation of the export callback
    947  * @obj: GEM object to export
    948  * @flags: flags like DRM_CLOEXEC and DRM_RDWR
    949  *
    950  * This is the implementation of the &drm_gem_object_funcs.export functions for GEM drivers
    951  * using the PRIME helpers. It is used as the default in
    952  * drm_gem_prime_handle_to_fd().
    953  */
    954 struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj,
    955 				     int flags)
    956 {
    957 	struct drm_device *dev = obj->dev;
    958 	struct dma_buf_export_info exp_info = {
    959 #ifndef __NetBSD__
    960 		.exp_name = KBUILD_MODNAME, /* white lie for debug */
    961 		.owner = dev->driver->fops->owner,
    962 #endif
    963 		.ops = &drm_gem_prime_dmabuf_ops,
    964 		.size = obj->size,
    965 		.flags = flags,
    966 		.priv = obj,
    967 		.resv = obj->resv,
    968 	};
    969 
    970 	return drm_gem_dmabuf_export(dev, &exp_info);
    971 }
    972 EXPORT_SYMBOL(drm_gem_prime_export);
    973 
    974 /**
    975  * drm_gem_prime_import_dev - core implementation of the import callback
    976  * @dev: drm_device to import into
    977  * @dma_buf: dma-buf object to import
    978  * @attach_dev: struct device to dma_buf attach
    979  *
    980  * This is the core of drm_gem_prime_import(). It's designed to be called by
    981  * drivers who want to use a different device structure than &drm_device.dev for
    982  * attaching via dma_buf. This function calls
    983  * &drm_driver.gem_prime_import_sg_table internally.
    984  *
    985  * Drivers must arrange to call drm_prime_gem_destroy() from their
    986  * &drm_gem_object_funcs.free hook when using this function.
    987  */
    988 struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev,
    989 					    struct dma_buf *dma_buf,
    990 					    struct device *attach_dev)
    991 {
    992 	struct dma_buf_attachment *attach;
    993 	struct sg_table *sgt;
    994 	struct drm_gem_object *obj;
    995 	int ret;
    996 
    997 	if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) {
    998 		obj = dma_buf->priv;
    999 		if (obj->dev == dev) {
   1000 			/*
   1001 			 * Importing dmabuf exported from out own gem increases
   1002 			 * refcount on gem itself instead of f_count of dmabuf.
   1003 			 */
   1004 			drm_gem_object_get(obj);
   1005 			return obj;
   1006 		}
   1007 	}
   1008 
   1009 	if (!dev->driver->gem_prime_import_sg_table)
   1010 		return ERR_PTR(-EINVAL);
   1011 
   1012 	attach = dma_buf_attach(dma_buf, attach_dev);
   1013 	if (IS_ERR(attach))
   1014 		return ERR_CAST(attach);
   1015 
   1016 	get_dma_buf(dma_buf);
   1017 
   1018 	sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
   1019 	if (IS_ERR(sgt)) {
   1020 		ret = PTR_ERR(sgt);
   1021 		goto fail_detach;
   1022 	}
   1023 
   1024 	obj = dev->driver->gem_prime_import_sg_table(dev, attach, sgt);
   1025 	if (IS_ERR(obj)) {
   1026 		ret = PTR_ERR(obj);
   1027 		goto fail_unmap;
   1028 	}
   1029 
   1030 	obj->import_attach = attach;
   1031 	obj->resv = dma_buf->resv;
   1032 
   1033 	return obj;
   1034 
   1035 fail_unmap:
   1036 	dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
   1037 fail_detach:
   1038 	dma_buf_detach(dma_buf, attach);
   1039 	dma_buf_put(dma_buf);
   1040 
   1041 	return ERR_PTR(ret);
   1042 }
   1043 EXPORT_SYMBOL(drm_gem_prime_import_dev);
   1044 
   1045 /**
   1046  * drm_gem_prime_import - helper library implementation of the import callback
   1047  * @dev: drm_device to import into
   1048  * @dma_buf: dma-buf object to import
   1049  *
   1050  * This is the implementation of the gem_prime_import functions for GEM drivers
   1051  * using the PRIME helpers. Drivers can use this as their
   1052  * &drm_driver.gem_prime_import implementation. It is used as the default
   1053  * implementation in drm_gem_prime_fd_to_handle().
   1054  *
   1055  * Drivers must arrange to call drm_prime_gem_destroy() from their
   1056  * &drm_gem_object_funcs.free hook when using this function.
   1057  */
   1058 struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
   1059 					    struct dma_buf *dma_buf)
   1060 {
   1061 	return drm_gem_prime_import_dev(dev, dma_buf, dev->dev);
   1062 }
   1063 EXPORT_SYMBOL(drm_gem_prime_import);
   1064 
   1065 /**
   1066 #ifdef __NetBSD__
   1067 
   1068 struct sg_table *
   1069 drm_prime_bus_dmamem_to_sg(bus_dma_tag_t dmat, const bus_dma_segment_t *segs,
   1070     int nsegs)
   1071 {
   1072 	struct sg_table *sg;
   1073 	int ret;
   1074 
   1075 	sg = kmalloc(sizeof(*sg), GFP_KERNEL);
   1076 	if (sg == NULL) {
   1077 		ret = -ENOMEM;
   1078 		goto out;
   1079 	}
   1080 
   1081 	ret = sg_alloc_table_from_bus_dmamem(sg, dmat, segs, nsegs,
   1082 	    GFP_KERNEL);
   1083 	if (ret)
   1084 		goto out;
   1085 
   1086 	return sg;
   1087 out:
   1088 	kfree(sg);
   1089 	return ERR_PTR(ret);
   1090 }
   1091 
   1092 struct sg_table *
   1093 drm_prime_pglist_to_sg(struct pglist *pglist, unsigned npages)
   1094 {
   1095 	struct sg_table *sg;
   1096 	int ret;
   1097 
   1098 	sg = kmalloc(sizeof(*sg), GFP_KERNEL);
   1099 	if (sg == NULL) {
   1100 		ret = -ENOMEM;
   1101 		goto out;
   1102 	}
   1103 
   1104 	ret = sg_alloc_table_from_pglist(sg, pglist, 0, npages << PAGE_SHIFT,
   1105 	    npages, GFP_KERNEL);
   1106 	if (ret)
   1107 		goto out;
   1108 
   1109 	return sg;
   1110 
   1111 out:
   1112 	kfree(sg);
   1113 	return ERR_PTR(ret);
   1114 }
   1115 
   1116 bus_size_t
   1117 drm_prime_sg_size(struct sg_table *sg)
   1118 {
   1119 
   1120 	return sg->sgt_npgs << PAGE_SHIFT;
   1121 }
   1122 
   1123 void
   1124 drm_prime_sg_free(struct sg_table *sg)
   1125 {
   1126 
   1127 	sg_free_table(sg);
   1128 	kfree(sg);
   1129 }
   1130 
   1131 int
   1132 drm_prime_sg_to_bus_dmamem(bus_dma_tag_t dmat, bus_dma_segment_t *segs,
   1133     int nsegs, int *rsegs, const struct sg_table *sgt)
   1134 {
   1135 
   1136 	/* XXX errno NetBSD->Linux */
   1137 	return -bus_dmamem_import_pages(dmat, segs, nsegs, rsegs, sgt->sgt_pgs,
   1138 	    sgt->sgt_npgs);
   1139 }
   1140 
   1141 int
   1142 drm_prime_bus_dmamap_load_sgt(bus_dma_tag_t dmat, bus_dmamap_t map,
   1143     struct sg_table *sgt)
   1144 {
   1145 	bus_dma_segment_t *segs;
   1146 	bus_size_t size = drm_prime_sg_size(sgt);
   1147 	int nsegs = sgt->sgt_npgs;
   1148 	int ret;
   1149 
   1150 	segs = kcalloc(sgt->sgt_npgs, sizeof(segs[0]), GFP_KERNEL);
   1151 	if (segs == NULL) {
   1152 		ret = -ENOMEM;
   1153 		goto out0;
   1154 	}
   1155 
   1156 	ret = drm_prime_sg_to_bus_dmamem(dmat, segs, nsegs, &nsegs, sgt);
   1157 	if (ret)
   1158 		goto out1;
   1159 	KASSERT(nsegs <= sgt->sgt_npgs);
   1160 
   1161 	/* XXX errno NetBSD->Linux */
   1162 	ret = -bus_dmamap_load_raw(dmat, map, segs, nsegs, size,
   1163 	    BUS_DMA_NOWAIT);
   1164 	if (ret)
   1165 		goto out1;
   1166 
   1167 out1:	kfree(segs);
   1168 out0:	return ret;
   1169 }
   1170 
   1171 bool
   1172 drm_prime_sg_importable(bus_dma_tag_t dmat, struct sg_table *sgt)
   1173 {
   1174 	unsigned i;
   1175 
   1176 	for (i = 0; i < sgt->sgt_npgs; i++) {
   1177 		if (bus_dmatag_bounces_paddr(dmat, sgt->sgt_pgs[i]))
   1178 			return false;
   1179 	}
   1180 	return true;
   1181 }
   1182 
   1183 #else  /* !__NetBSD__ */
   1184 
   1185 /**
   1186  * drm_prime_sg_to_page_addr_arrays - convert an sg table into a page array
   1187  * @sgt: scatter-gather table to convert
   1188  * @pages: optional array of page pointers to store the page array in
   1189  * @addrs: optional array to store the dma bus address of each page
   1190  * @max_entries: size of both the passed-in arrays
   1191  *
   1192  * Exports an sg table into an array of pages and addresses. This is currently
   1193  * required by the TTM driver in order to do correct fault handling.
   1194  *
   1195  * Drivers can use this in their &drm_driver.gem_prime_import_sg_table
   1196  * implementation.
   1197  */
   1198 int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
   1199 				     dma_addr_t *addrs, int max_entries)
   1200 {
   1201 	unsigned count;
   1202 	struct scatterlist *sg;
   1203 	struct page *page;
   1204 	u32 len, index;
   1205 	dma_addr_t addr;
   1206 
   1207 	index = 0;
   1208 	for_each_sg(sgt->sgl, sg, sgt->nents, count) {
   1209 		len = sg->length;
   1210 		page = sg_page(sg);
   1211 		addr = sg_dma_address(sg);
   1212 
   1213 		while (len > 0) {
   1214 			if (WARN_ON(index >= max_entries))
   1215 				return -1;
   1216 			if (pages)
   1217 				pages[index] = page;
   1218 			if (addrs)
   1219 				addrs[index] = addr;
   1220 
   1221 			page++;
   1222 			addr += PAGE_SIZE;
   1223 			len -= PAGE_SIZE;
   1224 			index++;
   1225 		}
   1226 	}
   1227 	return 0;
   1228 }
   1229 EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
   1230 
   1231 #endif	/* __NetBSD__ */
   1232 
   1233 /**
   1234  * drm_prime_gem_destroy - helper to clean up a PRIME-imported GEM object
   1235  * @obj: GEM object which was created from a dma-buf
   1236  * @sg: the sg-table which was pinned at import time
   1237  *
   1238  * This is the cleanup functions which GEM drivers need to call when they use
   1239  * drm_gem_prime_import() or drm_gem_prime_import_dev() to import dma-bufs.
   1240  */
   1241 void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
   1242 {
   1243 	struct dma_buf_attachment *attach;
   1244 	struct dma_buf *dma_buf;
   1245 	attach = obj->import_attach;
   1246 	if (sg)
   1247 		dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
   1248 	dma_buf = attach->dmabuf;
   1249 	dma_buf_detach(attach->dmabuf, attach);
   1250 	/* remove the reference */
   1251 	dma_buf_put(dma_buf);
   1252 }
   1253 EXPORT_SYMBOL(drm_prime_gem_destroy);
   1254