Home | History | Annotate | Line # | Download | only in drm
drm_prime.c revision 1.7.6.1
      1  1.7.6.1        ad /*	$NetBSD: drm_prime.c,v 1.7.6.1 2020/02/29 20:20:13 ad Exp $	*/
      2      1.2  riastrad 
      3      1.1  riastrad /*
      4      1.1  riastrad  * Copyright  2012 Red Hat
      5      1.1  riastrad  *
      6      1.1  riastrad  * Permission is hereby granted, free of charge, to any person obtaining a
      7      1.1  riastrad  * copy of this software and associated documentation files (the "Software"),
      8      1.1  riastrad  * to deal in the Software without restriction, including without limitation
      9      1.1  riastrad  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     10      1.1  riastrad  * and/or sell copies of the Software, and to permit persons to whom the
     11      1.1  riastrad  * Software is furnished to do so, subject to the following conditions:
     12      1.1  riastrad  *
     13      1.1  riastrad  * The above copyright notice and this permission notice (including the next
     14      1.1  riastrad  * paragraph) shall be included in all copies or substantial portions of the
     15      1.1  riastrad  * Software.
     16      1.1  riastrad  *
     17      1.1  riastrad  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     18      1.1  riastrad  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     19      1.1  riastrad  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     20      1.1  riastrad  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     21      1.1  riastrad  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     22      1.1  riastrad  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
     23      1.1  riastrad  * IN THE SOFTWARE.
     24      1.1  riastrad  *
     25      1.1  riastrad  * Authors:
     26      1.1  riastrad  *      Dave Airlie <airlied (at) redhat.com>
     27      1.1  riastrad  *      Rob Clark <rob.clark (at) linaro.org>
     28      1.1  riastrad  *
     29      1.1  riastrad  */
     30      1.1  riastrad 
     31      1.2  riastrad #include <sys/cdefs.h>
     32  1.7.6.1        ad __KERNEL_RCSID(0, "$NetBSD: drm_prime.c,v 1.7.6.1 2020/02/29 20:20:13 ad Exp $");
     33      1.2  riastrad 
     34      1.1  riastrad #include <linux/export.h>
     35      1.1  riastrad #include <linux/dma-buf.h>
     36      1.1  riastrad #include <drm/drmP.h>
     37      1.2  riastrad #include <drm/drm_gem.h>
     38      1.2  riastrad 
     39      1.2  riastrad #include "drm_internal.h"
     40      1.1  riastrad 
     41      1.4  riastrad #ifdef __NetBSD__
     42      1.4  riastrad 
     43      1.5  riastrad #include <drm/bus_dma_hacks.h>
     44      1.5  riastrad 
     45  1.7.6.1        ad #include <linux/nbsd-namespace.h>
     46  1.7.6.1        ad 
     47      1.4  riastrad /*
     48      1.5  riastrad  * We use struct sg_table just to pass around an array of pages from
     49      1.5  riastrad  * one device to another in drm prime.  Since this is _not_ a complete
     50      1.5  riastrad  * implementation of Linux's sg table abstraction (e.g., it does not
     51      1.5  riastrad  * remember DMA addresses and RAM pages separately, and it doesn't
     52      1.5  riastrad  * support the nested chained iteration of Linux scatterlists), we
     53      1.5  riastrad  * isolate it to this file and make all callers go through a few extra
     54      1.5  riastrad  * subroutines (drm_prime_sg_size, drm_prime_sg_free, &c.) to use it.
     55      1.5  riastrad  * Don't use this outside drm prime!
     56      1.4  riastrad  */
     57      1.4  riastrad 
     58      1.3  riastrad struct sg_table {
     59      1.6  riastrad 	paddr_t		*sgt_pgs;
     60      1.5  riastrad 	unsigned	sgt_npgs;
     61      1.3  riastrad };
     62      1.3  riastrad 
     63      1.3  riastrad static int
     64      1.3  riastrad sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
     65      1.3  riastrad     unsigned npages, bus_size_t offset, bus_size_t size, gfp_t gfp)
     66      1.3  riastrad {
     67      1.3  riastrad 	unsigned i;
     68      1.3  riastrad 
     69      1.3  riastrad 	KASSERT(offset == 0);
     70      1.3  riastrad 	KASSERT(size == npages << PAGE_SHIFT);
     71      1.3  riastrad 
     72      1.5  riastrad 	sgt->sgt_pgs = kcalloc(npages, sizeof(sgt->sgt_pgs[0]), gfp);
     73      1.5  riastrad 	if (sgt->sgt_pgs == NULL)
     74      1.3  riastrad 		return -ENOMEM;
     75      1.5  riastrad 	sgt->sgt_npgs = npages;
     76      1.3  riastrad 
     77      1.5  riastrad 	for (i = 0; i < npages; i++)
     78      1.6  riastrad 		sgt->sgt_pgs[i] = VM_PAGE_TO_PHYS(&pages[i]->p_vmp);
     79      1.3  riastrad 
     80      1.3  riastrad 	return 0;
     81      1.3  riastrad }
     82      1.3  riastrad 
     83      1.3  riastrad static int
     84      1.5  riastrad sg_alloc_table_from_pglist(struct sg_table *sgt, const struct pglist *pglist,
     85      1.3  riastrad     unsigned npages, bus_size_t offset, bus_size_t size, gfp_t gfp)
     86      1.3  riastrad {
     87      1.3  riastrad 	struct vm_page *pg;
     88      1.3  riastrad 	unsigned i;
     89      1.3  riastrad 
     90      1.3  riastrad 	KASSERT(offset == 0);
     91      1.3  riastrad 	KASSERT(size == npages << PAGE_SHIFT);
     92      1.3  riastrad 
     93      1.5  riastrad 	sgt->sgt_pgs = kcalloc(npages, sizeof(sgt->sgt_pgs[0]), gfp);
     94      1.5  riastrad 	if (sgt->sgt_pgs == NULL)
     95      1.3  riastrad 		return -ENOMEM;
     96      1.5  riastrad 	sgt->sgt_npgs = npages;
     97      1.3  riastrad 
     98      1.3  riastrad 	i = 0;
     99      1.3  riastrad 	TAILQ_FOREACH(pg, pglist, pageq.queue) {
    100      1.3  riastrad 		KASSERT(i < npages);
    101      1.6  riastrad 		sgt->sgt_pgs[i] = VM_PAGE_TO_PHYS(pg);
    102      1.3  riastrad 	}
    103      1.3  riastrad 	KASSERT(i == npages);
    104      1.3  riastrad 
    105      1.3  riastrad 	return 0;
    106      1.3  riastrad }
    107      1.3  riastrad 
    108      1.4  riastrad static int
    109      1.5  riastrad sg_alloc_table_from_bus_dmamem(struct sg_table *sgt, bus_dma_tag_t dmat,
    110      1.5  riastrad     const bus_dma_segment_t *segs, int nsegs, gfp_t gfp)
    111      1.4  riastrad {
    112      1.5  riastrad 	int ret;
    113      1.4  riastrad 
    114      1.4  riastrad 	KASSERT(nsegs > 0);
    115      1.5  riastrad 	sgt->sgt_pgs = kcalloc(nsegs, sizeof(sgt->sgt_pgs[0]), gfp);
    116      1.5  riastrad 	if (sgt->sgt_pgs == NULL)
    117      1.4  riastrad 		return -ENOMEM;
    118      1.5  riastrad 	sgt->sgt_npgs = nsegs;
    119      1.4  riastrad 
    120      1.5  riastrad 	/* XXX errno NetBSD->Linux */
    121      1.5  riastrad 	ret = -bus_dmamem_export_pages(dmat, segs, nsegs, sgt->sgt_pgs,
    122      1.5  riastrad 	    sgt->sgt_npgs);
    123      1.5  riastrad 	if (ret)
    124      1.5  riastrad 		return ret;
    125      1.4  riastrad 
    126      1.4  riastrad 	return 0;
    127      1.4  riastrad }
    128      1.4  riastrad 
    129      1.3  riastrad static void
    130      1.3  riastrad sg_free_table(struct sg_table *sgt)
    131      1.3  riastrad {
    132      1.3  riastrad 
    133      1.5  riastrad 	kfree(sgt->sgt_pgs);
    134      1.5  riastrad 	sgt->sgt_pgs = NULL;
    135      1.5  riastrad 	sgt->sgt_npgs = 0;
    136      1.3  riastrad }
    137      1.3  riastrad 
    138      1.4  riastrad #endif	/* __NetBSD__ */
    139      1.4  riastrad 
    140      1.1  riastrad /*
    141      1.1  riastrad  * DMA-BUF/GEM Object references and lifetime overview:
    142      1.1  riastrad  *
    143      1.1  riastrad  * On the export the dma_buf holds a reference to the exporting GEM
    144      1.1  riastrad  * object. It takes this reference in handle_to_fd_ioctl, when it
    145      1.1  riastrad  * first calls .prime_export and stores the exporting GEM object in
    146      1.1  riastrad  * the dma_buf priv. This reference is released when the dma_buf
    147      1.1  riastrad  * object goes away in the driver .release function.
    148      1.1  riastrad  *
    149      1.1  riastrad  * On the import the importing GEM object holds a reference to the
    150      1.1  riastrad  * dma_buf (which in turn holds a ref to the exporting GEM object).
    151      1.1  riastrad  * It takes that reference in the fd_to_handle ioctl.
    152      1.1  riastrad  * It calls dma_buf_get, creates an attachment to it and stores the
    153      1.1  riastrad  * attachment in the GEM object. When this attachment is destroyed
    154      1.1  riastrad  * when the imported object is destroyed, we remove the attachment
    155      1.1  riastrad  * and drop the reference to the dma_buf.
    156      1.1  riastrad  *
    157      1.1  riastrad  * Thus the chain of references always flows in one direction
    158      1.1  riastrad  * (avoiding loops): importing_gem -> dmabuf -> exporting_gem
    159      1.1  riastrad  *
    160      1.1  riastrad  * Self-importing: if userspace is using PRIME as a replacement for flink
    161      1.1  riastrad  * then it will get a fd->handle request for a GEM object that it created.
    162      1.1  riastrad  * Drivers should detect this situation and return back the gem object
    163      1.2  riastrad  * from the dma-buf private.  Prime will do this automatically for drivers that
    164      1.2  riastrad  * use the drm_gem_prime_{import,export} helpers.
    165      1.1  riastrad  */
    166      1.1  riastrad 
    167      1.1  riastrad struct drm_prime_member {
    168      1.1  riastrad 	struct list_head entry;
    169      1.1  riastrad 	struct dma_buf *dma_buf;
    170      1.1  riastrad 	uint32_t handle;
    171      1.1  riastrad };
    172      1.1  riastrad 
    173      1.2  riastrad struct drm_prime_attachment {
    174      1.2  riastrad 	struct sg_table *sgt;
    175      1.2  riastrad 	enum dma_data_direction dir;
    176      1.2  riastrad };
    177      1.2  riastrad 
    178      1.2  riastrad static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
    179      1.2  riastrad 				    struct dma_buf *dma_buf, uint32_t handle)
    180      1.2  riastrad {
    181      1.2  riastrad 	struct drm_prime_member *member;
    182      1.2  riastrad 
    183      1.2  riastrad 	member = kmalloc(sizeof(*member), GFP_KERNEL);
    184      1.2  riastrad 	if (!member)
    185      1.2  riastrad 		return -ENOMEM;
    186      1.2  riastrad 
    187      1.2  riastrad 	get_dma_buf(dma_buf);
    188      1.2  riastrad 	member->dma_buf = dma_buf;
    189      1.2  riastrad 	member->handle = handle;
    190      1.2  riastrad 	list_add(&member->entry, &prime_fpriv->head);
    191      1.2  riastrad 	return 0;
    192      1.2  riastrad }
    193      1.2  riastrad 
    194      1.2  riastrad static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv,
    195      1.2  riastrad 						      uint32_t handle)
    196      1.2  riastrad {
    197      1.2  riastrad 	struct drm_prime_member *member;
    198      1.2  riastrad 
    199      1.2  riastrad 	list_for_each_entry(member, &prime_fpriv->head, entry) {
    200      1.2  riastrad 		if (member->handle == handle)
    201      1.2  riastrad 			return member->dma_buf;
    202      1.2  riastrad 	}
    203      1.2  riastrad 
    204      1.2  riastrad 	return NULL;
    205      1.2  riastrad }
    206      1.2  riastrad 
    207      1.2  riastrad static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv,
    208      1.2  riastrad 				       struct dma_buf *dma_buf,
    209      1.2  riastrad 				       uint32_t *handle)
    210      1.2  riastrad {
    211      1.2  riastrad 	struct drm_prime_member *member;
    212      1.2  riastrad 
    213      1.2  riastrad 	list_for_each_entry(member, &prime_fpriv->head, entry) {
    214      1.2  riastrad 		if (member->dma_buf == dma_buf) {
    215      1.2  riastrad 			*handle = member->handle;
    216      1.2  riastrad 			return 0;
    217      1.2  riastrad 		}
    218      1.2  riastrad 	}
    219      1.2  riastrad 	return -ENOENT;
    220      1.2  riastrad }
    221      1.2  riastrad 
    222      1.2  riastrad static int drm_gem_map_attach(struct dma_buf *dma_buf,
    223      1.2  riastrad 			      struct device *target_dev,
    224      1.2  riastrad 			      struct dma_buf_attachment *attach)
    225      1.2  riastrad {
    226      1.2  riastrad 	struct drm_prime_attachment *prime_attach;
    227      1.2  riastrad 	struct drm_gem_object *obj = dma_buf->priv;
    228      1.2  riastrad 	struct drm_device *dev = obj->dev;
    229      1.2  riastrad 
    230      1.2  riastrad 	prime_attach = kzalloc(sizeof(*prime_attach), GFP_KERNEL);
    231      1.2  riastrad 	if (!prime_attach)
    232      1.2  riastrad 		return -ENOMEM;
    233      1.2  riastrad 
    234      1.2  riastrad 	prime_attach->dir = DMA_NONE;
    235      1.2  riastrad 	attach->priv = prime_attach;
    236      1.2  riastrad 
    237      1.2  riastrad 	if (!dev->driver->gem_prime_pin)
    238      1.2  riastrad 		return 0;
    239      1.2  riastrad 
    240      1.2  riastrad 	return dev->driver->gem_prime_pin(obj);
    241      1.2  riastrad }
    242      1.2  riastrad 
    243      1.2  riastrad static void drm_gem_map_detach(struct dma_buf *dma_buf,
    244      1.2  riastrad 			       struct dma_buf_attachment *attach)
    245      1.2  riastrad {
    246      1.2  riastrad 	struct drm_prime_attachment *prime_attach = attach->priv;
    247      1.2  riastrad 	struct drm_gem_object *obj = dma_buf->priv;
    248      1.2  riastrad 	struct drm_device *dev = obj->dev;
    249      1.2  riastrad 	struct sg_table *sgt;
    250      1.2  riastrad 
    251      1.2  riastrad 	if (dev->driver->gem_prime_unpin)
    252      1.2  riastrad 		dev->driver->gem_prime_unpin(obj);
    253      1.2  riastrad 
    254      1.2  riastrad 	if (!prime_attach)
    255      1.2  riastrad 		return;
    256      1.2  riastrad 
    257      1.2  riastrad 	sgt = prime_attach->sgt;
    258      1.2  riastrad 	if (sgt) {
    259      1.3  riastrad #ifndef __NetBSD__		/* We map/unmap elsewhere.  */
    260      1.2  riastrad 		if (prime_attach->dir != DMA_NONE)
    261      1.2  riastrad 			dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
    262      1.2  riastrad 					prime_attach->dir);
    263      1.3  riastrad #endif
    264      1.2  riastrad 		sg_free_table(sgt);
    265      1.2  riastrad 	}
    266      1.2  riastrad 
    267      1.2  riastrad 	kfree(sgt);
    268      1.2  riastrad 	kfree(prime_attach);
    269      1.2  riastrad 	attach->priv = NULL;
    270      1.2  riastrad }
    271      1.2  riastrad 
    272      1.2  riastrad void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
    273      1.2  riastrad 					struct dma_buf *dma_buf)
    274      1.2  riastrad {
    275      1.2  riastrad 	struct drm_prime_member *member, *safe;
    276      1.2  riastrad 
    277      1.2  riastrad 	list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) {
    278      1.2  riastrad 		if (member->dma_buf == dma_buf) {
    279      1.2  riastrad 			dma_buf_put(dma_buf);
    280      1.2  riastrad 			list_del(&member->entry);
    281      1.2  riastrad 			kfree(member);
    282      1.2  riastrad 		}
    283      1.2  riastrad 	}
    284      1.2  riastrad }
    285      1.2  riastrad 
    286      1.2  riastrad static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
    287  1.7.6.1        ad 					    enum dma_data_direction dir)
    288      1.2  riastrad {
    289      1.2  riastrad 	struct drm_prime_attachment *prime_attach = attach->priv;
    290      1.2  riastrad 	struct drm_gem_object *obj = attach->dmabuf->priv;
    291      1.2  riastrad 	struct sg_table *sgt;
    292      1.2  riastrad 
    293      1.2  riastrad 	if (WARN_ON(dir == DMA_NONE || !prime_attach))
    294      1.2  riastrad 		return ERR_PTR(-EINVAL);
    295      1.2  riastrad 
    296      1.2  riastrad 	/* return the cached mapping when possible */
    297      1.2  riastrad 	if (prime_attach->dir == dir)
    298      1.2  riastrad 		return prime_attach->sgt;
    299      1.2  riastrad 
    300      1.2  riastrad 	/*
    301      1.2  riastrad 	 * two mappings with different directions for the same attachment are
    302      1.2  riastrad 	 * not allowed
    303      1.2  riastrad 	 */
    304      1.2  riastrad 	if (WARN_ON(prime_attach->dir != DMA_NONE))
    305      1.2  riastrad 		return ERR_PTR(-EBUSY);
    306      1.2  riastrad 
    307      1.2  riastrad 	sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
    308      1.2  riastrad 	if (!IS_ERR(sgt)) {
    309      1.3  riastrad #ifdef __NetBSD__		/* We map/unmap elsewhere.  */
    310      1.3  riastrad 		prime_attach->sgt = sgt;
    311      1.3  riastrad 		prime_attach->dir = dir;
    312      1.3  riastrad #else
    313      1.2  riastrad 		if (!dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir)) {
    314      1.2  riastrad 			sg_free_table(sgt);
    315      1.2  riastrad 			kfree(sgt);
    316      1.2  riastrad 			sgt = ERR_PTR(-ENOMEM);
    317      1.2  riastrad 		} else {
    318      1.2  riastrad 			prime_attach->sgt = sgt;
    319      1.2  riastrad 			prime_attach->dir = dir;
    320      1.2  riastrad 		}
    321      1.3  riastrad #endif
    322      1.2  riastrad 	}
    323      1.2  riastrad 
    324      1.2  riastrad 	return sgt;
    325      1.2  riastrad }
    326      1.2  riastrad 
    327      1.2  riastrad static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
    328      1.2  riastrad 				  struct sg_table *sgt,
    329      1.2  riastrad 				  enum dma_data_direction dir)
    330      1.2  riastrad {
    331      1.2  riastrad 	/* nothing to be done here */
    332      1.2  riastrad }
    333      1.2  riastrad 
    334      1.2  riastrad /**
    335      1.2  riastrad  * drm_gem_dmabuf_release - dma_buf release implementation for GEM
    336      1.2  riastrad  * @dma_buf: buffer to be released
    337      1.2  riastrad  *
    338      1.2  riastrad  * Generic release function for dma_bufs exported as PRIME buffers. GEM drivers
    339      1.2  riastrad  * must use this in their dma_buf ops structure as the release callback.
    340      1.2  riastrad  */
    341      1.2  riastrad void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
    342      1.2  riastrad {
    343      1.2  riastrad 	struct drm_gem_object *obj = dma_buf->priv;
    344      1.2  riastrad 
    345      1.2  riastrad 	/* drop the reference on the export fd holds */
    346      1.2  riastrad 	drm_gem_object_unreference_unlocked(obj);
    347      1.2  riastrad }
    348      1.2  riastrad EXPORT_SYMBOL(drm_gem_dmabuf_release);
    349      1.2  riastrad 
    350      1.2  riastrad static void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
    351      1.2  riastrad {
    352      1.2  riastrad 	struct drm_gem_object *obj = dma_buf->priv;
    353      1.2  riastrad 	struct drm_device *dev = obj->dev;
    354      1.2  riastrad 
    355      1.2  riastrad 	return dev->driver->gem_prime_vmap(obj);
    356      1.2  riastrad }
    357      1.2  riastrad 
    358      1.2  riastrad static void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
    359      1.2  riastrad {
    360      1.2  riastrad 	struct drm_gem_object *obj = dma_buf->priv;
    361      1.2  riastrad 	struct drm_device *dev = obj->dev;
    362      1.2  riastrad 
    363      1.2  riastrad 	dev->driver->gem_prime_vunmap(obj, vaddr);
    364      1.2  riastrad }
    365      1.2  riastrad 
    366      1.2  riastrad static void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
    367      1.2  riastrad 					unsigned long page_num)
    368      1.2  riastrad {
    369      1.2  riastrad 	return NULL;
    370      1.2  riastrad }
    371      1.2  riastrad 
    372      1.2  riastrad static void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
    373      1.2  riastrad 					 unsigned long page_num, void *addr)
    374      1.2  riastrad {
    375      1.2  riastrad 
    376      1.2  riastrad }
    377      1.2  riastrad static void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf,
    378      1.2  riastrad 				 unsigned long page_num)
    379      1.2  riastrad {
    380      1.2  riastrad 	return NULL;
    381      1.2  riastrad }
    382      1.2  riastrad 
    383      1.2  riastrad static void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
    384      1.2  riastrad 				  unsigned long page_num, void *addr)
    385      1.2  riastrad {
    386      1.2  riastrad 
    387      1.2  riastrad }
    388      1.2  riastrad 
    389      1.3  riastrad #ifdef __NetBSD__
    390      1.3  riastrad static int
    391      1.3  riastrad drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, off_t *offp, size_t size,
    392      1.3  riastrad     int prot, int *flagsp, int *advicep, struct uvm_object **uobjp,
    393      1.3  riastrad     int *maxprotp)
    394      1.3  riastrad #else
    395      1.2  riastrad static int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf,
    396      1.2  riastrad 			       struct vm_area_struct *vma)
    397      1.3  riastrad #endif
    398      1.2  riastrad {
    399      1.2  riastrad 	struct drm_gem_object *obj = dma_buf->priv;
    400      1.2  riastrad 	struct drm_device *dev = obj->dev;
    401      1.2  riastrad 
    402      1.2  riastrad 	if (!dev->driver->gem_prime_mmap)
    403      1.2  riastrad 		return -ENOSYS;
    404      1.2  riastrad 
    405      1.3  riastrad #ifdef __NetBSD__
    406      1.3  riastrad 	return dev->driver->gem_prime_mmap(obj, offp, size, prot, flagsp,
    407      1.3  riastrad 	    advicep, uobjp, maxprotp);
    408      1.3  riastrad #else
    409      1.2  riastrad 	return dev->driver->gem_prime_mmap(obj, vma);
    410      1.3  riastrad #endif
    411      1.2  riastrad }
    412      1.2  riastrad 
    413      1.2  riastrad static const struct dma_buf_ops drm_gem_prime_dmabuf_ops =  {
    414      1.2  riastrad 	.attach = drm_gem_map_attach,
    415      1.2  riastrad 	.detach = drm_gem_map_detach,
    416      1.2  riastrad 	.map_dma_buf = drm_gem_map_dma_buf,
    417      1.2  riastrad 	.unmap_dma_buf = drm_gem_unmap_dma_buf,
    418      1.2  riastrad 	.release = drm_gem_dmabuf_release,
    419      1.2  riastrad 	.kmap = drm_gem_dmabuf_kmap,
    420      1.2  riastrad 	.kmap_atomic = drm_gem_dmabuf_kmap_atomic,
    421      1.2  riastrad 	.kunmap = drm_gem_dmabuf_kunmap,
    422      1.2  riastrad 	.kunmap_atomic = drm_gem_dmabuf_kunmap_atomic,
    423      1.2  riastrad 	.mmap = drm_gem_dmabuf_mmap,
    424      1.2  riastrad 	.vmap = drm_gem_dmabuf_vmap,
    425      1.2  riastrad 	.vunmap = drm_gem_dmabuf_vunmap,
    426      1.2  riastrad };
    427      1.2  riastrad 
    428      1.2  riastrad /**
    429      1.2  riastrad  * DOC: PRIME Helpers
    430      1.2  riastrad  *
    431      1.2  riastrad  * Drivers can implement @gem_prime_export and @gem_prime_import in terms of
    432      1.2  riastrad  * simpler APIs by using the helper functions @drm_gem_prime_export and
    433      1.2  riastrad  * @drm_gem_prime_import.  These functions implement dma-buf support in terms of
    434      1.2  riastrad  * six lower-level driver callbacks:
    435      1.2  riastrad  *
    436      1.2  riastrad  * Export callbacks:
    437      1.2  riastrad  *
    438      1.2  riastrad  *  - @gem_prime_pin (optional): prepare a GEM object for exporting
    439      1.2  riastrad  *
    440      1.2  riastrad  *  - @gem_prime_get_sg_table: provide a scatter/gather table of pinned pages
    441      1.2  riastrad  *
    442      1.2  riastrad  *  - @gem_prime_vmap: vmap a buffer exported by your driver
    443      1.2  riastrad  *
    444      1.2  riastrad  *  - @gem_prime_vunmap: vunmap a buffer exported by your driver
    445      1.2  riastrad  *
    446      1.2  riastrad  *  - @gem_prime_mmap (optional): mmap a buffer exported by your driver
    447      1.2  riastrad  *
    448      1.2  riastrad  * Import callback:
    449      1.2  riastrad  *
    450      1.2  riastrad  *  - @gem_prime_import_sg_table (import): produce a GEM object from another
    451      1.2  riastrad  *    driver's scatter/gather table
    452      1.2  riastrad  */
    453      1.2  riastrad 
    454      1.2  riastrad /**
    455      1.2  riastrad  * drm_gem_prime_export - helper library implementation of the export callback
    456      1.2  riastrad  * @dev: drm_device to export from
    457      1.2  riastrad  * @obj: GEM object to export
    458      1.2  riastrad  * @flags: flags like DRM_CLOEXEC
    459      1.2  riastrad  *
    460      1.2  riastrad  * This is the implementation of the gem_prime_export functions for GEM drivers
    461      1.2  riastrad  * using the PRIME helpers.
    462      1.2  riastrad  */
    463      1.2  riastrad struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
    464      1.2  riastrad 				     struct drm_gem_object *obj,
    465      1.2  riastrad 				     int flags)
    466      1.2  riastrad {
    467      1.2  riastrad 	struct dma_buf_export_info exp_info = {
    468      1.3  riastrad #ifndef __NetBSD__
    469      1.2  riastrad 		.exp_name = KBUILD_MODNAME, /* white lie for debug */
    470      1.2  riastrad 		.owner = dev->driver->fops->owner,
    471      1.3  riastrad #endif
    472      1.2  riastrad 		.ops = &drm_gem_prime_dmabuf_ops,
    473      1.2  riastrad 		.size = obj->size,
    474      1.2  riastrad 		.flags = flags,
    475      1.2  riastrad 		.priv = obj,
    476      1.2  riastrad 	};
    477      1.2  riastrad 
    478      1.2  riastrad 	if (dev->driver->gem_prime_res_obj)
    479      1.2  riastrad 		exp_info.resv = dev->driver->gem_prime_res_obj(obj);
    480      1.2  riastrad 
    481      1.2  riastrad 	return dma_buf_export(&exp_info);
    482      1.2  riastrad }
    483      1.2  riastrad EXPORT_SYMBOL(drm_gem_prime_export);
    484      1.2  riastrad 
    485      1.2  riastrad static struct dma_buf *export_and_register_object(struct drm_device *dev,
    486      1.2  riastrad 						  struct drm_gem_object *obj,
    487      1.2  riastrad 						  uint32_t flags)
    488      1.2  riastrad {
    489      1.2  riastrad 	struct dma_buf *dmabuf;
    490      1.2  riastrad 
    491      1.2  riastrad 	/* prevent races with concurrent gem_close. */
    492      1.2  riastrad 	if (obj->handle_count == 0) {
    493      1.2  riastrad 		dmabuf = ERR_PTR(-ENOENT);
    494      1.2  riastrad 		return dmabuf;
    495      1.2  riastrad 	}
    496      1.2  riastrad 
    497      1.2  riastrad 	dmabuf = dev->driver->gem_prime_export(dev, obj, flags);
    498      1.2  riastrad 	if (IS_ERR(dmabuf)) {
    499      1.2  riastrad 		/* normally the created dma-buf takes ownership of the ref,
    500      1.2  riastrad 		 * but if that fails then drop the ref
    501      1.2  riastrad 		 */
    502      1.2  riastrad 		return dmabuf;
    503      1.2  riastrad 	}
    504      1.2  riastrad 
    505      1.2  riastrad 	/*
    506      1.2  riastrad 	 * Note that callers do not need to clean up the export cache
    507      1.2  riastrad 	 * since the check for obj->handle_count guarantees that someone
    508      1.2  riastrad 	 * will clean it up.
    509      1.2  riastrad 	 */
    510      1.2  riastrad 	obj->dma_buf = dmabuf;
    511      1.2  riastrad 	get_dma_buf(obj->dma_buf);
    512      1.2  riastrad 	/* Grab a new ref since the callers is now used by the dma-buf */
    513      1.2  riastrad 	drm_gem_object_reference(obj);
    514      1.2  riastrad 
    515      1.2  riastrad 	return dmabuf;
    516      1.2  riastrad }
    517      1.2  riastrad 
    518      1.2  riastrad /**
    519      1.2  riastrad  * drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers
    520      1.2  riastrad  * @dev: dev to export the buffer from
    521      1.2  riastrad  * @file_priv: drm file-private structure
    522      1.2  riastrad  * @handle: buffer handle to export
    523      1.2  riastrad  * @flags: flags like DRM_CLOEXEC
    524      1.2  riastrad  * @prime_fd: pointer to storage for the fd id of the create dma-buf
    525      1.2  riastrad  *
    526      1.2  riastrad  * This is the PRIME export function which must be used mandatorily by GEM
    527      1.2  riastrad  * drivers to ensure correct lifetime management of the underlying GEM object.
    528      1.2  riastrad  * The actual exporting from GEM object to a dma-buf is done through the
    529      1.2  riastrad  * gem_prime_export driver callback.
    530      1.2  riastrad  */
    531      1.1  riastrad int drm_gem_prime_handle_to_fd(struct drm_device *dev,
    532      1.2  riastrad 			       struct drm_file *file_priv, uint32_t handle,
    533      1.2  riastrad 			       uint32_t flags,
    534      1.2  riastrad 			       int *prime_fd)
    535      1.1  riastrad {
    536      1.1  riastrad 	struct drm_gem_object *obj;
    537      1.2  riastrad 	int ret = 0;
    538      1.2  riastrad 	struct dma_buf *dmabuf;
    539      1.1  riastrad 
    540      1.2  riastrad 	mutex_lock(&file_priv->prime.lock);
    541      1.1  riastrad 	obj = drm_gem_object_lookup(dev, file_priv, handle);
    542      1.2  riastrad 	if (!obj)  {
    543      1.2  riastrad 		ret = -ENOENT;
    544      1.2  riastrad 		goto out_unlock;
    545      1.2  riastrad 	}
    546      1.2  riastrad 
    547      1.2  riastrad 	dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle);
    548      1.2  riastrad 	if (dmabuf) {
    549      1.2  riastrad 		get_dma_buf(dmabuf);
    550      1.2  riastrad 		goto out_have_handle;
    551      1.2  riastrad 	}
    552      1.1  riastrad 
    553      1.2  riastrad 	mutex_lock(&dev->object_name_lock);
    554      1.1  riastrad 	/* re-export the original imported object */
    555      1.1  riastrad 	if (obj->import_attach) {
    556      1.2  riastrad 		dmabuf = obj->import_attach->dmabuf;
    557      1.2  riastrad 		get_dma_buf(dmabuf);
    558      1.2  riastrad 		goto out_have_obj;
    559      1.2  riastrad 	}
    560      1.2  riastrad 
    561      1.2  riastrad 	if (obj->dma_buf) {
    562      1.2  riastrad 		get_dma_buf(obj->dma_buf);
    563      1.2  riastrad 		dmabuf = obj->dma_buf;
    564      1.2  riastrad 		goto out_have_obj;
    565      1.2  riastrad 	}
    566      1.2  riastrad 
    567      1.2  riastrad 	dmabuf = export_and_register_object(dev, obj, flags);
    568      1.2  riastrad 	if (IS_ERR(dmabuf)) {
    569      1.2  riastrad 		/* normally the created dma-buf takes ownership of the ref,
    570      1.2  riastrad 		 * but if that fails then drop the ref
    571      1.2  riastrad 		 */
    572      1.2  riastrad 		ret = PTR_ERR(dmabuf);
    573      1.2  riastrad 		mutex_unlock(&dev->object_name_lock);
    574      1.2  riastrad 		goto out;
    575      1.1  riastrad 	}
    576      1.1  riastrad 
    577      1.2  riastrad out_have_obj:
    578      1.2  riastrad 	/*
    579      1.2  riastrad 	 * If we've exported this buffer then cheat and add it to the import list
    580      1.2  riastrad 	 * so we get the correct handle back. We must do this under the
    581      1.2  riastrad 	 * protection of dev->object_name_lock to ensure that a racing gem close
    582      1.2  riastrad 	 * ioctl doesn't miss to remove this buffer handle from the cache.
    583      1.2  riastrad 	 */
    584      1.2  riastrad 	ret = drm_prime_add_buf_handle(&file_priv->prime,
    585      1.2  riastrad 				       dmabuf, handle);
    586      1.2  riastrad 	mutex_unlock(&dev->object_name_lock);
    587      1.2  riastrad 	if (ret)
    588      1.2  riastrad 		goto fail_put_dmabuf;
    589      1.2  riastrad 
    590      1.2  riastrad out_have_handle:
    591      1.2  riastrad 	ret = dma_buf_fd(dmabuf, flags);
    592      1.2  riastrad 	/*
    593      1.2  riastrad 	 * We must _not_ remove the buffer from the handle cache since the newly
    594      1.2  riastrad 	 * created dma buf is already linked in the global obj->dma_buf pointer,
    595      1.2  riastrad 	 * and that is invariant as long as a userspace gem handle exists.
    596      1.2  riastrad 	 * Closing the handle will clean out the cache anyway, so we don't leak.
    597      1.2  riastrad 	 */
    598      1.2  riastrad 	if (ret < 0) {
    599      1.2  riastrad 		goto fail_put_dmabuf;
    600      1.1  riastrad 	} else {
    601      1.2  riastrad 		*prime_fd = ret;
    602      1.2  riastrad 		ret = 0;
    603      1.2  riastrad 	}
    604      1.2  riastrad 
    605      1.2  riastrad 	goto out;
    606      1.2  riastrad 
    607      1.2  riastrad fail_put_dmabuf:
    608      1.2  riastrad 	dma_buf_put(dmabuf);
    609      1.2  riastrad out:
    610      1.2  riastrad 	drm_gem_object_unreference_unlocked(obj);
    611      1.2  riastrad out_unlock:
    612      1.2  riastrad 	mutex_unlock(&file_priv->prime.lock);
    613      1.2  riastrad 
    614      1.2  riastrad 	return ret;
    615      1.2  riastrad }
    616      1.2  riastrad EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
    617      1.2  riastrad 
    618      1.2  riastrad /**
    619      1.2  riastrad  * drm_gem_prime_import - helper library implementation of the import callback
    620      1.2  riastrad  * @dev: drm_device to import into
    621      1.2  riastrad  * @dma_buf: dma-buf object to import
    622      1.2  riastrad  *
    623      1.2  riastrad  * This is the implementation of the gem_prime_import functions for GEM drivers
    624      1.2  riastrad  * using the PRIME helpers.
    625      1.2  riastrad  */
    626      1.2  riastrad struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
    627      1.2  riastrad 					    struct dma_buf *dma_buf)
    628      1.2  riastrad {
    629      1.2  riastrad 	struct dma_buf_attachment *attach;
    630      1.2  riastrad 	struct sg_table *sgt;
    631      1.2  riastrad 	struct drm_gem_object *obj;
    632      1.2  riastrad 	int ret;
    633      1.2  riastrad 
    634      1.2  riastrad 	if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) {
    635      1.2  riastrad 		obj = dma_buf->priv;
    636      1.2  riastrad 		if (obj->dev == dev) {
    637      1.2  riastrad 			/*
    638      1.2  riastrad 			 * Importing dmabuf exported from out own gem increases
    639      1.2  riastrad 			 * refcount on gem itself instead of f_count of dmabuf.
    640      1.1  riastrad 			 */
    641      1.2  riastrad 			drm_gem_object_reference(obj);
    642      1.2  riastrad 			return obj;
    643      1.1  riastrad 		}
    644      1.1  riastrad 	}
    645      1.2  riastrad 
    646      1.2  riastrad 	if (!dev->driver->gem_prime_import_sg_table)
    647      1.2  riastrad 		return ERR_PTR(-EINVAL);
    648      1.2  riastrad 
    649      1.2  riastrad 	attach = dma_buf_attach(dma_buf, dev->dev);
    650      1.2  riastrad 	if (IS_ERR(attach))
    651      1.2  riastrad 		return ERR_CAST(attach);
    652      1.2  riastrad 
    653      1.2  riastrad 	get_dma_buf(dma_buf);
    654      1.2  riastrad 
    655      1.2  riastrad 	sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
    656      1.2  riastrad 	if (IS_ERR(sgt)) {
    657      1.2  riastrad 		ret = PTR_ERR(sgt);
    658      1.2  riastrad 		goto fail_detach;
    659      1.2  riastrad 	}
    660      1.2  riastrad 
    661      1.2  riastrad 	obj = dev->driver->gem_prime_import_sg_table(dev, attach, sgt);
    662      1.2  riastrad 	if (IS_ERR(obj)) {
    663      1.2  riastrad 		ret = PTR_ERR(obj);
    664      1.2  riastrad 		goto fail_unmap;
    665      1.1  riastrad 	}
    666      1.1  riastrad 
    667      1.2  riastrad 	obj->import_attach = attach;
    668      1.2  riastrad 
    669      1.2  riastrad 	return obj;
    670      1.2  riastrad 
    671      1.2  riastrad fail_unmap:
    672      1.2  riastrad 	dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
    673      1.2  riastrad fail_detach:
    674      1.2  riastrad 	dma_buf_detach(dma_buf, attach);
    675      1.2  riastrad 	dma_buf_put(dma_buf);
    676      1.2  riastrad 
    677      1.2  riastrad 	return ERR_PTR(ret);
    678      1.1  riastrad }
    679      1.2  riastrad EXPORT_SYMBOL(drm_gem_prime_import);
    680      1.1  riastrad 
    681      1.2  riastrad /**
    682      1.2  riastrad  * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers
    683      1.2  riastrad  * @dev: dev to export the buffer from
    684      1.2  riastrad  * @file_priv: drm file-private structure
    685      1.2  riastrad  * @prime_fd: fd id of the dma-buf which should be imported
    686      1.2  riastrad  * @handle: pointer to storage for the handle of the imported buffer object
    687      1.2  riastrad  *
    688      1.2  riastrad  * This is the PRIME import function which must be used mandatorily by GEM
    689      1.2  riastrad  * drivers to ensure correct lifetime management of the underlying GEM object.
    690      1.2  riastrad  * The actual importing of GEM object from the dma-buf is done through the
    691      1.2  riastrad  * gem_import_export driver callback.
    692      1.2  riastrad  */
    693      1.1  riastrad int drm_gem_prime_fd_to_handle(struct drm_device *dev,
    694      1.2  riastrad 			       struct drm_file *file_priv, int prime_fd,
    695      1.2  riastrad 			       uint32_t *handle)
    696      1.1  riastrad {
    697      1.1  riastrad 	struct dma_buf *dma_buf;
    698      1.1  riastrad 	struct drm_gem_object *obj;
    699      1.1  riastrad 	int ret;
    700      1.1  riastrad 
    701      1.1  riastrad 	dma_buf = dma_buf_get(prime_fd);
    702      1.1  riastrad 	if (IS_ERR(dma_buf))
    703      1.1  riastrad 		return PTR_ERR(dma_buf);
    704      1.1  riastrad 
    705      1.1  riastrad 	mutex_lock(&file_priv->prime.lock);
    706      1.1  riastrad 
    707      1.2  riastrad 	ret = drm_prime_lookup_buf_handle(&file_priv->prime,
    708      1.1  riastrad 			dma_buf, handle);
    709      1.2  riastrad 	if (ret == 0)
    710      1.1  riastrad 		goto out_put;
    711      1.1  riastrad 
    712      1.1  riastrad 	/* never seen this one, need to import */
    713      1.2  riastrad 	mutex_lock(&dev->object_name_lock);
    714      1.1  riastrad 	obj = dev->driver->gem_prime_import(dev, dma_buf);
    715      1.1  riastrad 	if (IS_ERR(obj)) {
    716      1.1  riastrad 		ret = PTR_ERR(obj);
    717      1.2  riastrad 		goto out_unlock;
    718      1.2  riastrad 	}
    719      1.2  riastrad 
    720      1.2  riastrad 	if (obj->dma_buf) {
    721      1.2  riastrad 		WARN_ON(obj->dma_buf != dma_buf);
    722      1.2  riastrad 	} else {
    723      1.2  riastrad 		obj->dma_buf = dma_buf;
    724      1.2  riastrad 		get_dma_buf(dma_buf);
    725      1.1  riastrad 	}
    726      1.1  riastrad 
    727      1.2  riastrad 	/* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
    728      1.2  riastrad 	ret = drm_gem_handle_create_tail(file_priv, obj, handle);
    729      1.1  riastrad 	drm_gem_object_unreference_unlocked(obj);
    730      1.1  riastrad 	if (ret)
    731      1.1  riastrad 		goto out_put;
    732      1.1  riastrad 
    733      1.2  riastrad 	ret = drm_prime_add_buf_handle(&file_priv->prime,
    734      1.1  riastrad 			dma_buf, *handle);
    735      1.1  riastrad 	if (ret)
    736      1.1  riastrad 		goto fail;
    737      1.1  riastrad 
    738      1.1  riastrad 	mutex_unlock(&file_priv->prime.lock);
    739      1.2  riastrad 
    740      1.2  riastrad 	dma_buf_put(dma_buf);
    741      1.2  riastrad 
    742      1.1  riastrad 	return 0;
    743      1.1  riastrad 
    744      1.1  riastrad fail:
    745      1.1  riastrad 	/* hmm, if driver attached, we are relying on the free-object path
    746      1.1  riastrad 	 * to detach.. which seems ok..
    747      1.1  riastrad 	 */
    748      1.2  riastrad 	drm_gem_handle_delete(file_priv, *handle);
    749      1.2  riastrad out_unlock:
    750      1.2  riastrad 	mutex_unlock(&dev->object_name_lock);
    751      1.1  riastrad out_put:
    752      1.1  riastrad 	dma_buf_put(dma_buf);
    753      1.1  riastrad 	mutex_unlock(&file_priv->prime.lock);
    754      1.1  riastrad 	return ret;
    755      1.1  riastrad }
    756      1.1  riastrad EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
    757      1.1  riastrad 
    758      1.1  riastrad int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
    759      1.1  riastrad 				 struct drm_file *file_priv)
    760      1.1  riastrad {
    761      1.1  riastrad 	struct drm_prime_handle *args = data;
    762      1.1  riastrad 	uint32_t flags;
    763      1.1  riastrad 
    764      1.1  riastrad 	if (!drm_core_check_feature(dev, DRIVER_PRIME))
    765      1.1  riastrad 		return -EINVAL;
    766      1.1  riastrad 
    767      1.1  riastrad 	if (!dev->driver->prime_handle_to_fd)
    768      1.1  riastrad 		return -ENOSYS;
    769      1.1  riastrad 
    770      1.1  riastrad 	/* check flags are valid */
    771      1.1  riastrad 	if (args->flags & ~DRM_CLOEXEC)
    772      1.1  riastrad 		return -EINVAL;
    773      1.1  riastrad 
    774      1.1  riastrad 	/* we only want to pass DRM_CLOEXEC which is == O_CLOEXEC */
    775      1.1  riastrad 	flags = args->flags & DRM_CLOEXEC;
    776      1.1  riastrad 
    777      1.1  riastrad 	return dev->driver->prime_handle_to_fd(dev, file_priv,
    778      1.1  riastrad 			args->handle, flags, &args->fd);
    779      1.1  riastrad }
    780      1.1  riastrad 
    781      1.1  riastrad int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
    782      1.1  riastrad 				 struct drm_file *file_priv)
    783      1.1  riastrad {
    784      1.1  riastrad 	struct drm_prime_handle *args = data;
    785      1.1  riastrad 
    786      1.1  riastrad 	if (!drm_core_check_feature(dev, DRIVER_PRIME))
    787      1.1  riastrad 		return -EINVAL;
    788      1.1  riastrad 
    789      1.1  riastrad 	if (!dev->driver->prime_fd_to_handle)
    790      1.1  riastrad 		return -ENOSYS;
    791      1.1  riastrad 
    792      1.1  riastrad 	return dev->driver->prime_fd_to_handle(dev, file_priv,
    793      1.1  riastrad 			args->fd, &args->handle);
    794      1.1  riastrad }
    795      1.1  riastrad 
    796      1.2  riastrad /**
    797      1.2  riastrad  * drm_prime_pages_to_sg - converts a page array into an sg list
    798      1.2  riastrad  * @pages: pointer to the array of page pointers to convert
    799      1.2  riastrad  * @nr_pages: length of the page vector
    800      1.1  riastrad  *
    801      1.2  riastrad  * This helper creates an sg table object from a set of pages
    802      1.1  riastrad  * the driver is responsible for mapping the pages into the
    803      1.2  riastrad  * importers address space for use with dma_buf itself.
    804      1.1  riastrad  */
    805      1.2  riastrad struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages)
    806      1.1  riastrad {
    807      1.1  riastrad 	struct sg_table *sg = NULL;
    808      1.1  riastrad 	int ret;
    809      1.1  riastrad 
    810      1.1  riastrad 	sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
    811      1.2  riastrad 	if (!sg) {
    812      1.2  riastrad 		ret = -ENOMEM;
    813      1.1  riastrad 		goto out;
    814      1.2  riastrad 	}
    815      1.1  riastrad 
    816      1.2  riastrad 	ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
    817      1.2  riastrad 				nr_pages << PAGE_SHIFT, GFP_KERNEL);
    818      1.1  riastrad 	if (ret)
    819      1.1  riastrad 		goto out;
    820      1.1  riastrad 
    821      1.1  riastrad 	return sg;
    822      1.1  riastrad out:
    823      1.1  riastrad 	kfree(sg);
    824      1.2  riastrad 	return ERR_PTR(ret);
    825      1.1  riastrad }
    826      1.1  riastrad EXPORT_SYMBOL(drm_prime_pages_to_sg);
    827      1.1  riastrad 
    828      1.3  riastrad #ifdef __NetBSD__
    829      1.3  riastrad 
    830      1.3  riastrad struct sg_table *
    831      1.5  riastrad drm_prime_bus_dmamem_to_sg(bus_dma_tag_t dmat, const bus_dma_segment_t *segs,
    832      1.5  riastrad     int nsegs)
    833      1.4  riastrad {
    834      1.4  riastrad 	struct sg_table *sg;
    835      1.4  riastrad 	int ret;
    836      1.4  riastrad 
    837      1.4  riastrad 	sg = kmalloc(sizeof(*sg), GFP_KERNEL);
    838      1.4  riastrad 	if (sg == NULL) {
    839      1.4  riastrad 		ret = -ENOMEM;
    840      1.4  riastrad 		goto out;
    841      1.4  riastrad 	}
    842      1.4  riastrad 
    843      1.5  riastrad 	ret = sg_alloc_table_from_bus_dmamem(sg, dmat, segs, nsegs,
    844      1.5  riastrad 	    GFP_KERNEL);
    845      1.4  riastrad 	if (ret)
    846      1.4  riastrad 		goto out;
    847      1.4  riastrad 
    848      1.4  riastrad 	return sg;
    849      1.4  riastrad out:
    850      1.4  riastrad 	kfree(sg);
    851      1.4  riastrad 	return ERR_PTR(ret);
    852      1.4  riastrad }
    853      1.4  riastrad 
    854      1.4  riastrad struct sg_table *
    855      1.3  riastrad drm_prime_pglist_to_sg(struct pglist *pglist, unsigned npages)
    856      1.3  riastrad {
    857      1.3  riastrad 	struct sg_table *sg;
    858      1.3  riastrad 	int ret;
    859      1.3  riastrad 
    860      1.3  riastrad 	sg = kmalloc(sizeof(*sg), GFP_KERNEL);
    861      1.3  riastrad 	if (sg == NULL) {
    862      1.3  riastrad 		ret = -ENOMEM;
    863      1.3  riastrad 		goto out;
    864      1.3  riastrad 	}
    865      1.3  riastrad 
    866      1.3  riastrad 	ret = sg_alloc_table_from_pglist(sg, pglist, 0, npages << PAGE_SHIFT,
    867      1.3  riastrad 	    npages, GFP_KERNEL);
    868      1.3  riastrad 	if (ret)
    869      1.3  riastrad 		goto out;
    870      1.3  riastrad 
    871      1.3  riastrad 	return sg;
    872      1.3  riastrad 
    873      1.3  riastrad out:
    874      1.3  riastrad 	kfree(sg);
    875      1.3  riastrad 	return ERR_PTR(ret);
    876      1.3  riastrad }
    877      1.3  riastrad 
    878      1.4  riastrad bus_size_t
    879      1.4  riastrad drm_prime_sg_size(struct sg_table *sg)
    880      1.4  riastrad {
    881      1.4  riastrad 
    882      1.5  riastrad 	return sg->sgt_npgs << PAGE_SHIFT;
    883      1.4  riastrad }
    884      1.4  riastrad 
    885      1.3  riastrad void
    886      1.3  riastrad drm_prime_sg_free(struct sg_table *sg)
    887      1.3  riastrad {
    888      1.3  riastrad 
    889      1.3  riastrad 	sg_free_table(sg);
    890      1.3  riastrad 	kfree(sg);
    891      1.3  riastrad }
    892      1.3  riastrad 
    893      1.3  riastrad int
    894      1.5  riastrad drm_prime_sg_to_bus_dmamem(bus_dma_tag_t dmat, bus_dma_segment_t *segs,
    895      1.5  riastrad     int nsegs, int *rsegs, const struct sg_table *sgt)
    896      1.3  riastrad {
    897      1.3  riastrad 
    898      1.3  riastrad 	/* XXX errno NetBSD->Linux */
    899      1.5  riastrad 	return -bus_dmamem_import_pages(dmat, segs, nsegs, rsegs, sgt->sgt_pgs,
    900      1.5  riastrad 	    sgt->sgt_npgs);
    901      1.3  riastrad }
    902      1.3  riastrad 
    903      1.4  riastrad int
    904      1.5  riastrad drm_prime_bus_dmamap_load_sgt(bus_dma_tag_t dmat, bus_dmamap_t map,
    905      1.5  riastrad     struct sg_table *sgt)
    906      1.4  riastrad {
    907      1.5  riastrad 	bus_dma_segment_t *segs;
    908      1.5  riastrad 	bus_size_t size = drm_prime_sg_size(sgt);
    909      1.5  riastrad 	int nsegs = sgt->sgt_npgs;
    910      1.5  riastrad 	int ret;
    911      1.5  riastrad 
    912      1.5  riastrad 	segs = kcalloc(sgt->sgt_npgs, sizeof(segs[0]), GFP_KERNEL);
    913      1.5  riastrad 	if (segs == NULL) {
    914      1.5  riastrad 		ret = -ENOMEM;
    915      1.5  riastrad 		goto out0;
    916      1.5  riastrad 	}
    917      1.5  riastrad 
    918      1.5  riastrad 	ret = drm_prime_sg_to_bus_dmamem(dmat, segs, nsegs, &nsegs, sgt);
    919      1.5  riastrad 	if (ret)
    920      1.5  riastrad 		goto out1;
    921      1.5  riastrad 	KASSERT(nsegs <= sgt->sgt_npgs);
    922      1.5  riastrad 
    923      1.5  riastrad 	/* XXX errno NetBSD->Linux */
    924      1.5  riastrad 	ret = -bus_dmamap_load_raw(dmat, map, segs, nsegs, size,
    925      1.5  riastrad 	    BUS_DMA_NOWAIT);
    926      1.5  riastrad 	if (ret)
    927      1.5  riastrad 		goto out1;
    928      1.4  riastrad 
    929      1.5  riastrad out1:	kfree(segs);
    930      1.5  riastrad out0:	return ret;
    931      1.4  riastrad }
    932      1.4  riastrad 
    933      1.7  riastrad bool
    934      1.7  riastrad drm_prime_sg_importable(bus_dma_tag_t dmat, struct sg_table *sgt)
    935      1.7  riastrad {
    936      1.7  riastrad 	unsigned i;
    937      1.7  riastrad 
    938      1.7  riastrad 	for (i = 0; i < sgt->sgt_npgs; i++) {
    939      1.7  riastrad 		if (bus_dmatag_bounces_paddr(dmat, sgt->sgt_pgs[i]))
    940      1.7  riastrad 			return false;
    941      1.7  riastrad 	}
    942      1.7  riastrad 	return true;
    943      1.7  riastrad }
    944      1.7  riastrad 
    945      1.3  riastrad #else  /* !__NetBSD__ */
    946      1.3  riastrad 
    947      1.2  riastrad /**
    948      1.2  riastrad  * drm_prime_sg_to_page_addr_arrays - convert an sg table into a page array
    949      1.2  riastrad  * @sgt: scatter-gather table to convert
    950      1.2  riastrad  * @pages: array of page pointers to store the page array in
    951      1.2  riastrad  * @addrs: optional array to store the dma bus address of each page
    952      1.2  riastrad  * @max_pages: size of both the passed-in arrays
    953      1.2  riastrad  *
    954      1.2  riastrad  * Exports an sg table into an array of pages and addresses. This is currently
    955      1.2  riastrad  * required by the TTM driver in order to do correct fault handling.
    956      1.2  riastrad  */
    957      1.1  riastrad int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
    958      1.1  riastrad 				     dma_addr_t *addrs, int max_pages)
    959      1.1  riastrad {
    960      1.1  riastrad 	unsigned count;
    961      1.1  riastrad 	struct scatterlist *sg;
    962      1.1  riastrad 	struct page *page;
    963      1.2  riastrad 	u32 len;
    964      1.1  riastrad 	int pg_index;
    965      1.1  riastrad 	dma_addr_t addr;
    966      1.1  riastrad 
    967      1.1  riastrad 	pg_index = 0;
    968      1.1  riastrad 	for_each_sg(sgt->sgl, sg, sgt->nents, count) {
    969      1.1  riastrad 		len = sg->length;
    970      1.1  riastrad 		page = sg_page(sg);
    971      1.1  riastrad 		addr = sg_dma_address(sg);
    972      1.1  riastrad 
    973      1.1  riastrad 		while (len > 0) {
    974      1.1  riastrad 			if (WARN_ON(pg_index >= max_pages))
    975      1.1  riastrad 				return -1;
    976      1.1  riastrad 			pages[pg_index] = page;
    977      1.1  riastrad 			if (addrs)
    978      1.1  riastrad 				addrs[pg_index] = addr;
    979      1.1  riastrad 
    980      1.1  riastrad 			page++;
    981      1.1  riastrad 			addr += PAGE_SIZE;
    982      1.1  riastrad 			len -= PAGE_SIZE;
    983      1.1  riastrad 			pg_index++;
    984      1.1  riastrad 		}
    985      1.1  riastrad 	}
    986      1.1  riastrad 	return 0;
    987      1.1  riastrad }
    988      1.1  riastrad EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
    989      1.2  riastrad 
    990      1.3  riastrad #endif	/* __NetBSD__ */
    991      1.3  riastrad 
    992      1.2  riastrad /**
    993      1.2  riastrad  * drm_prime_gem_destroy - helper to clean up a PRIME-imported GEM object
    994      1.2  riastrad  * @obj: GEM object which was created from a dma-buf
    995      1.2  riastrad  * @sg: the sg-table which was pinned at import time
    996      1.2  riastrad  *
    997      1.2  riastrad  * This is the cleanup functions which GEM drivers need to call when they use
    998      1.2  riastrad  * @drm_gem_prime_import to import dma-bufs.
    999      1.2  riastrad  */
   1000      1.1  riastrad void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
   1001      1.1  riastrad {
   1002      1.1  riastrad 	struct dma_buf_attachment *attach;
   1003      1.1  riastrad 	struct dma_buf *dma_buf;
   1004      1.1  riastrad 	attach = obj->import_attach;
   1005      1.1  riastrad 	if (sg)
   1006      1.1  riastrad 		dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
   1007      1.1  riastrad 	dma_buf = attach->dmabuf;
   1008      1.1  riastrad 	dma_buf_detach(attach->dmabuf, attach);
   1009      1.1  riastrad 	/* remove the reference */
   1010      1.1  riastrad 	dma_buf_put(dma_buf);
   1011      1.1  riastrad }
   1012      1.1  riastrad EXPORT_SYMBOL(drm_prime_gem_destroy);
   1013      1.1  riastrad 
   1014      1.1  riastrad void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
   1015      1.1  riastrad {
   1016      1.1  riastrad 	INIT_LIST_HEAD(&prime_fpriv->head);
   1017      1.1  riastrad 	mutex_init(&prime_fpriv->lock);
   1018      1.1  riastrad }
   1019      1.1  riastrad 
   1020      1.1  riastrad void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
   1021      1.1  riastrad {
   1022      1.2  riastrad 	/* by now drm_gem_release should've made sure the list is empty */
   1023      1.2  riastrad 	WARN_ON(!list_empty(&prime_fpriv->head));
   1024      1.3  riastrad 	mutex_destroy(&prime_fpriv->lock);
   1025      1.1  riastrad }
   1026