Home | History | Annotate | Line # | Download | only in drm
drm_prime.c revision 1.4
      1  1.2  riastrad /*	$NetBSD: drm_prime.c,v 1.4 2018/08/27 15:26:00 riastradh Exp $	*/
      2  1.2  riastrad 
      3  1.1  riastrad /*
      4  1.1  riastrad  * Copyright  2012 Red Hat
      5  1.1  riastrad  *
      6  1.1  riastrad  * Permission is hereby granted, free of charge, to any person obtaining a
      7  1.1  riastrad  * copy of this software and associated documentation files (the "Software"),
      8  1.1  riastrad  * to deal in the Software without restriction, including without limitation
      9  1.1  riastrad  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     10  1.1  riastrad  * and/or sell copies of the Software, and to permit persons to whom the
     11  1.1  riastrad  * Software is furnished to do so, subject to the following conditions:
     12  1.1  riastrad  *
     13  1.1  riastrad  * The above copyright notice and this permission notice (including the next
     14  1.1  riastrad  * paragraph) shall be included in all copies or substantial portions of the
     15  1.1  riastrad  * Software.
     16  1.1  riastrad  *
     17  1.1  riastrad  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     18  1.1  riastrad  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     19  1.1  riastrad  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     20  1.1  riastrad  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     21  1.1  riastrad  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     22  1.1  riastrad  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
     23  1.1  riastrad  * IN THE SOFTWARE.
     24  1.1  riastrad  *
     25  1.1  riastrad  * Authors:
     26  1.1  riastrad  *      Dave Airlie <airlied (at) redhat.com>
     27  1.1  riastrad  *      Rob Clark <rob.clark (at) linaro.org>
     28  1.1  riastrad  *
     29  1.1  riastrad  */
     30  1.1  riastrad 
     31  1.2  riastrad #include <sys/cdefs.h>
     32  1.2  riastrad __KERNEL_RCSID(0, "$NetBSD: drm_prime.c,v 1.4 2018/08/27 15:26:00 riastradh Exp $");
     33  1.2  riastrad 
     34  1.1  riastrad #include <linux/export.h>
     35  1.1  riastrad #include <linux/dma-buf.h>
     36  1.1  riastrad #include <drm/drmP.h>
     37  1.2  riastrad #include <drm/drm_gem.h>
     38  1.2  riastrad 
     39  1.2  riastrad #include "drm_internal.h"
     40  1.1  riastrad 
     41  1.4  riastrad #ifdef __NetBSD__
     42  1.4  riastrad 
     43  1.4  riastrad /*
     44  1.4  riastrad  * We use struct sg_table just to pass around an array of
     45  1.4  riastrad  * bus_dma_segment_t from one device to another in drm prime.  Since
     46  1.4  riastrad  * this is _not_ a complete implementation of Linux's sg table
     47  1.4  riastrad  * abstraction (e.g., it does not remember DMA addresses and RAM pages
     48  1.4  riastrad  * separately, and it doesn't support the nested chained iteration of
     49  1.4  riastrad  * Linux scatterlists), we isolate it to this file and make all callers
     50  1.4  riastrad  * go through a few extra subroutines (drm_prime_sg_size,
     51  1.4  riastrad  * drm_prime_sg_free, &c.) to use it.  Don't use this outside drm
     52  1.4  riastrad  * prime!
     53  1.4  riastrad  */
     54  1.4  riastrad 
     55  1.3  riastrad struct sg_table {
     56  1.3  riastrad 	bus_dma_segment_t	*sgt_segs;
     57  1.3  riastrad 	int			sgt_nsegs;
     58  1.3  riastrad 	bus_size_t		sgt_size;
     59  1.3  riastrad };
     60  1.3  riastrad 
     61  1.3  riastrad static int
     62  1.3  riastrad sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
     63  1.3  riastrad     unsigned npages, bus_size_t offset, bus_size_t size, gfp_t gfp)
     64  1.3  riastrad {
     65  1.3  riastrad 	unsigned i;
     66  1.3  riastrad 
     67  1.3  riastrad 	KASSERT(offset == 0);
     68  1.3  riastrad 	KASSERT(size == npages << PAGE_SHIFT);
     69  1.3  riastrad 
     70  1.3  riastrad 	sgt->sgt_segs = kcalloc(npages, sizeof(sgt->sgt_segs[0]), gfp);
     71  1.3  riastrad 	if (sgt->sgt_segs == NULL)
     72  1.3  riastrad 		return -ENOMEM;
     73  1.3  riastrad 	sgt->sgt_nsegs = npages;
     74  1.3  riastrad 	sgt->sgt_size = size;
     75  1.3  riastrad 
     76  1.3  riastrad 	for (i = 0; i < npages; i++) {
     77  1.3  riastrad 		sgt->sgt_segs[i].ds_addr = VM_PAGE_TO_PHYS(&pages[i]->p_vmp);
     78  1.3  riastrad 		sgt->sgt_segs[i].ds_len = PAGE_SIZE;
     79  1.3  riastrad 	}
     80  1.3  riastrad 
     81  1.3  riastrad 	return 0;
     82  1.3  riastrad }
     83  1.3  riastrad 
     84  1.3  riastrad static int
     85  1.3  riastrad sg_alloc_table_from_pglist(struct sg_table *sgt, struct pglist *pglist,
     86  1.3  riastrad     unsigned npages, bus_size_t offset, bus_size_t size, gfp_t gfp)
     87  1.3  riastrad {
     88  1.3  riastrad 	struct vm_page *pg;
     89  1.3  riastrad 	unsigned i;
     90  1.3  riastrad 
     91  1.3  riastrad 	KASSERT(offset == 0);
     92  1.3  riastrad 	KASSERT(size == npages << PAGE_SHIFT);
     93  1.3  riastrad 
     94  1.3  riastrad 	sgt->sgt_segs = kcalloc(npages, sizeof(sgt->sgt_segs[0]), gfp);
     95  1.3  riastrad 	if (sgt->sgt_segs == NULL)
     96  1.3  riastrad 		return -ENOMEM;
     97  1.3  riastrad 	sgt->sgt_nsegs = npages;
     98  1.3  riastrad 	sgt->sgt_size = size;
     99  1.3  riastrad 
    100  1.3  riastrad 	i = 0;
    101  1.3  riastrad 	TAILQ_FOREACH(pg, pglist, pageq.queue) {
    102  1.3  riastrad 		KASSERT(i < npages);
    103  1.3  riastrad 		sgt->sgt_segs[i].ds_addr = VM_PAGE_TO_PHYS(pg);
    104  1.3  riastrad 		sgt->sgt_segs[i].ds_len = PAGE_SIZE;
    105  1.3  riastrad 	}
    106  1.3  riastrad 	KASSERT(i == npages);
    107  1.3  riastrad 
    108  1.3  riastrad 	return 0;
    109  1.3  riastrad }
    110  1.3  riastrad 
    111  1.4  riastrad static int
    112  1.4  riastrad sg_alloc_table_from_bus_dmamem(struct sg_table *sgt, bus_dma_segment_t *segs,
    113  1.4  riastrad     int nsegs, gfp_t gfp)
    114  1.4  riastrad {
    115  1.4  riastrad 	int seg;
    116  1.4  riastrad 
    117  1.4  riastrad 	KASSERT(nsegs > 0);
    118  1.4  riastrad 	sgt->sgt_segs = kcalloc(nsegs, sizeof(sgt->sgt_segs[0]), gfp);
    119  1.4  riastrad 	if (sgt->sgt_segs == NULL)
    120  1.4  riastrad 		return -ENOMEM;
    121  1.4  riastrad 	sgt->sgt_nsegs = nsegs;
    122  1.4  riastrad 	sgt->sgt_size = 0;
    123  1.4  riastrad 
    124  1.4  riastrad 	for (seg = 0; seg < nsegs; seg++) {
    125  1.4  riastrad 		sgt->sgt_segs[seg].ds_addr = segs[seg].ds_addr;
    126  1.4  riastrad 		sgt->sgt_segs[seg].ds_len = segs[seg].ds_len;
    127  1.4  riastrad 		KASSERT(segs[seg].ds_len <= __type_max(bus_size_t) -
    128  1.4  riastrad 		    sgt->sgt_size);
    129  1.4  riastrad 		sgt->sgt_size += segs[seg].ds_len;
    130  1.4  riastrad 	}
    131  1.4  riastrad 
    132  1.4  riastrad 	return 0;
    133  1.4  riastrad }
    134  1.4  riastrad 
    135  1.3  riastrad static void
    136  1.3  riastrad sg_free_table(struct sg_table *sgt)
    137  1.3  riastrad {
    138  1.3  riastrad 
    139  1.3  riastrad 	kfree(sgt->sgt_segs);
    140  1.3  riastrad 	sgt->sgt_segs = NULL;
    141  1.3  riastrad 	sgt->sgt_nsegs = 0;
    142  1.3  riastrad 	sgt->sgt_size = 0;
    143  1.3  riastrad }
    144  1.3  riastrad 
    145  1.4  riastrad #endif	/* __NetBSD__ */
    146  1.4  riastrad 
    147  1.1  riastrad /*
    148  1.1  riastrad  * DMA-BUF/GEM Object references and lifetime overview:
    149  1.1  riastrad  *
    150  1.1  riastrad  * On the export the dma_buf holds a reference to the exporting GEM
    151  1.1  riastrad  * object. It takes this reference in handle_to_fd_ioctl, when it
    152  1.1  riastrad  * first calls .prime_export and stores the exporting GEM object in
    153  1.1  riastrad  * the dma_buf priv. This reference is released when the dma_buf
    154  1.1  riastrad  * object goes away in the driver .release function.
    155  1.1  riastrad  *
    156  1.1  riastrad  * On the import the importing GEM object holds a reference to the
    157  1.1  riastrad  * dma_buf (which in turn holds a ref to the exporting GEM object).
    158  1.1  riastrad  * It takes that reference in the fd_to_handle ioctl.
    159  1.1  riastrad  * It calls dma_buf_get, creates an attachment to it and stores the
    160  1.1  riastrad  * attachment in the GEM object. When this attachment is destroyed
    161  1.1  riastrad  * when the imported object is destroyed, we remove the attachment
    162  1.1  riastrad  * and drop the reference to the dma_buf.
    163  1.1  riastrad  *
    164  1.1  riastrad  * Thus the chain of references always flows in one direction
    165  1.1  riastrad  * (avoiding loops): importing_gem -> dmabuf -> exporting_gem
    166  1.1  riastrad  *
    167  1.1  riastrad  * Self-importing: if userspace is using PRIME as a replacement for flink
    168  1.1  riastrad  * then it will get a fd->handle request for a GEM object that it created.
    169  1.1  riastrad  * Drivers should detect this situation and return back the gem object
    170  1.2  riastrad  * from the dma-buf private.  Prime will do this automatically for drivers that
    171  1.2  riastrad  * use the drm_gem_prime_{import,export} helpers.
    172  1.1  riastrad  */
    173  1.1  riastrad 
    174  1.1  riastrad struct drm_prime_member {
    175  1.1  riastrad 	struct list_head entry;
    176  1.1  riastrad 	struct dma_buf *dma_buf;
    177  1.1  riastrad 	uint32_t handle;
    178  1.1  riastrad };
    179  1.1  riastrad 
    180  1.2  riastrad struct drm_prime_attachment {
    181  1.2  riastrad 	struct sg_table *sgt;
    182  1.2  riastrad 	enum dma_data_direction dir;
    183  1.2  riastrad };
    184  1.2  riastrad 
    185  1.2  riastrad static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
    186  1.2  riastrad 				    struct dma_buf *dma_buf, uint32_t handle)
    187  1.2  riastrad {
    188  1.2  riastrad 	struct drm_prime_member *member;
    189  1.2  riastrad 
    190  1.2  riastrad 	member = kmalloc(sizeof(*member), GFP_KERNEL);
    191  1.2  riastrad 	if (!member)
    192  1.2  riastrad 		return -ENOMEM;
    193  1.2  riastrad 
    194  1.2  riastrad 	get_dma_buf(dma_buf);
    195  1.2  riastrad 	member->dma_buf = dma_buf;
    196  1.2  riastrad 	member->handle = handle;
    197  1.2  riastrad 	list_add(&member->entry, &prime_fpriv->head);
    198  1.2  riastrad 	return 0;
    199  1.2  riastrad }
    200  1.2  riastrad 
    201  1.2  riastrad static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv,
    202  1.2  riastrad 						      uint32_t handle)
    203  1.2  riastrad {
    204  1.2  riastrad 	struct drm_prime_member *member;
    205  1.2  riastrad 
    206  1.2  riastrad 	list_for_each_entry(member, &prime_fpriv->head, entry) {
    207  1.2  riastrad 		if (member->handle == handle)
    208  1.2  riastrad 			return member->dma_buf;
    209  1.2  riastrad 	}
    210  1.2  riastrad 
    211  1.2  riastrad 	return NULL;
    212  1.2  riastrad }
    213  1.2  riastrad 
    214  1.2  riastrad static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv,
    215  1.2  riastrad 				       struct dma_buf *dma_buf,
    216  1.2  riastrad 				       uint32_t *handle)
    217  1.2  riastrad {
    218  1.2  riastrad 	struct drm_prime_member *member;
    219  1.2  riastrad 
    220  1.2  riastrad 	list_for_each_entry(member, &prime_fpriv->head, entry) {
    221  1.2  riastrad 		if (member->dma_buf == dma_buf) {
    222  1.2  riastrad 			*handle = member->handle;
    223  1.2  riastrad 			return 0;
    224  1.2  riastrad 		}
    225  1.2  riastrad 	}
    226  1.2  riastrad 	return -ENOENT;
    227  1.2  riastrad }
    228  1.2  riastrad 
    229  1.2  riastrad static int drm_gem_map_attach(struct dma_buf *dma_buf,
    230  1.2  riastrad 			      struct device *target_dev,
    231  1.2  riastrad 			      struct dma_buf_attachment *attach)
    232  1.2  riastrad {
    233  1.2  riastrad 	struct drm_prime_attachment *prime_attach;
    234  1.2  riastrad 	struct drm_gem_object *obj = dma_buf->priv;
    235  1.2  riastrad 	struct drm_device *dev = obj->dev;
    236  1.2  riastrad 
    237  1.2  riastrad 	prime_attach = kzalloc(sizeof(*prime_attach), GFP_KERNEL);
    238  1.2  riastrad 	if (!prime_attach)
    239  1.2  riastrad 		return -ENOMEM;
    240  1.2  riastrad 
    241  1.2  riastrad 	prime_attach->dir = DMA_NONE;
    242  1.2  riastrad 	attach->priv = prime_attach;
    243  1.2  riastrad 
    244  1.2  riastrad 	if (!dev->driver->gem_prime_pin)
    245  1.2  riastrad 		return 0;
    246  1.2  riastrad 
    247  1.2  riastrad 	return dev->driver->gem_prime_pin(obj);
    248  1.2  riastrad }
    249  1.2  riastrad 
    250  1.2  riastrad static void drm_gem_map_detach(struct dma_buf *dma_buf,
    251  1.2  riastrad 			       struct dma_buf_attachment *attach)
    252  1.2  riastrad {
    253  1.2  riastrad 	struct drm_prime_attachment *prime_attach = attach->priv;
    254  1.2  riastrad 	struct drm_gem_object *obj = dma_buf->priv;
    255  1.2  riastrad 	struct drm_device *dev = obj->dev;
    256  1.2  riastrad 	struct sg_table *sgt;
    257  1.2  riastrad 
    258  1.2  riastrad 	if (dev->driver->gem_prime_unpin)
    259  1.2  riastrad 		dev->driver->gem_prime_unpin(obj);
    260  1.2  riastrad 
    261  1.2  riastrad 	if (!prime_attach)
    262  1.2  riastrad 		return;
    263  1.2  riastrad 
    264  1.2  riastrad 	sgt = prime_attach->sgt;
    265  1.2  riastrad 	if (sgt) {
    266  1.3  riastrad #ifndef __NetBSD__		/* We map/unmap elsewhere.  */
    267  1.2  riastrad 		if (prime_attach->dir != DMA_NONE)
    268  1.2  riastrad 			dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
    269  1.2  riastrad 					prime_attach->dir);
    270  1.3  riastrad #endif
    271  1.2  riastrad 		sg_free_table(sgt);
    272  1.2  riastrad 	}
    273  1.2  riastrad 
    274  1.2  riastrad 	kfree(sgt);
    275  1.2  riastrad 	kfree(prime_attach);
    276  1.2  riastrad 	attach->priv = NULL;
    277  1.2  riastrad }
    278  1.2  riastrad 
    279  1.2  riastrad void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
    280  1.2  riastrad 					struct dma_buf *dma_buf)
    281  1.2  riastrad {
    282  1.2  riastrad 	struct drm_prime_member *member, *safe;
    283  1.2  riastrad 
    284  1.2  riastrad 	list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) {
    285  1.2  riastrad 		if (member->dma_buf == dma_buf) {
    286  1.2  riastrad 			dma_buf_put(dma_buf);
    287  1.2  riastrad 			list_del(&member->entry);
    288  1.2  riastrad 			kfree(member);
    289  1.2  riastrad 		}
    290  1.2  riastrad 	}
    291  1.2  riastrad }
    292  1.2  riastrad 
    293  1.2  riastrad static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
    294  1.3  riastrad 						enum dma_data_direction dir)
    295  1.2  riastrad {
    296  1.2  riastrad 	struct drm_prime_attachment *prime_attach = attach->priv;
    297  1.2  riastrad 	struct drm_gem_object *obj = attach->dmabuf->priv;
    298  1.2  riastrad 	struct sg_table *sgt;
    299  1.2  riastrad 
    300  1.2  riastrad 	if (WARN_ON(dir == DMA_NONE || !prime_attach))
    301  1.2  riastrad 		return ERR_PTR(-EINVAL);
    302  1.2  riastrad 
    303  1.2  riastrad 	/* return the cached mapping when possible */
    304  1.2  riastrad 	if (prime_attach->dir == dir)
    305  1.2  riastrad 		return prime_attach->sgt;
    306  1.2  riastrad 
    307  1.2  riastrad 	/*
    308  1.2  riastrad 	 * two mappings with different directions for the same attachment are
    309  1.2  riastrad 	 * not allowed
    310  1.2  riastrad 	 */
    311  1.2  riastrad 	if (WARN_ON(prime_attach->dir != DMA_NONE))
    312  1.2  riastrad 		return ERR_PTR(-EBUSY);
    313  1.2  riastrad 
    314  1.2  riastrad 	sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
    315  1.2  riastrad 	if (!IS_ERR(sgt)) {
    316  1.3  riastrad #ifdef __NetBSD__		/* We map/unmap elsewhere.  */
    317  1.3  riastrad 		prime_attach->sgt = sgt;
    318  1.3  riastrad 		prime_attach->dir = dir;
    319  1.3  riastrad #else
    320  1.2  riastrad 		if (!dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir)) {
    321  1.2  riastrad 			sg_free_table(sgt);
    322  1.2  riastrad 			kfree(sgt);
    323  1.2  riastrad 			sgt = ERR_PTR(-ENOMEM);
    324  1.2  riastrad 		} else {
    325  1.2  riastrad 			prime_attach->sgt = sgt;
    326  1.2  riastrad 			prime_attach->dir = dir;
    327  1.2  riastrad 		}
    328  1.3  riastrad #endif
    329  1.2  riastrad 	}
    330  1.2  riastrad 
    331  1.2  riastrad 	return sgt;
    332  1.2  riastrad }
    333  1.2  riastrad 
    334  1.2  riastrad static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
    335  1.2  riastrad 				  struct sg_table *sgt,
    336  1.2  riastrad 				  enum dma_data_direction dir)
    337  1.2  riastrad {
    338  1.2  riastrad 	/* nothing to be done here */
    339  1.2  riastrad }
    340  1.2  riastrad 
    341  1.2  riastrad /**
    342  1.2  riastrad  * drm_gem_dmabuf_release - dma_buf release implementation for GEM
    343  1.2  riastrad  * @dma_buf: buffer to be released
    344  1.2  riastrad  *
    345  1.2  riastrad  * Generic release function for dma_bufs exported as PRIME buffers. GEM drivers
    346  1.2  riastrad  * must use this in their dma_buf ops structure as the release callback.
    347  1.2  riastrad  */
    348  1.2  riastrad void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
    349  1.2  riastrad {
    350  1.2  riastrad 	struct drm_gem_object *obj = dma_buf->priv;
    351  1.2  riastrad 
    352  1.2  riastrad 	/* drop the reference on the export fd holds */
    353  1.2  riastrad 	drm_gem_object_unreference_unlocked(obj);
    354  1.2  riastrad }
    355  1.2  riastrad EXPORT_SYMBOL(drm_gem_dmabuf_release);
    356  1.2  riastrad 
    357  1.2  riastrad static void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
    358  1.2  riastrad {
    359  1.2  riastrad 	struct drm_gem_object *obj = dma_buf->priv;
    360  1.2  riastrad 	struct drm_device *dev = obj->dev;
    361  1.2  riastrad 
    362  1.2  riastrad 	return dev->driver->gem_prime_vmap(obj);
    363  1.2  riastrad }
    364  1.2  riastrad 
    365  1.2  riastrad static void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
    366  1.2  riastrad {
    367  1.2  riastrad 	struct drm_gem_object *obj = dma_buf->priv;
    368  1.2  riastrad 	struct drm_device *dev = obj->dev;
    369  1.2  riastrad 
    370  1.2  riastrad 	dev->driver->gem_prime_vunmap(obj, vaddr);
    371  1.2  riastrad }
    372  1.2  riastrad 
    373  1.2  riastrad static void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
    374  1.2  riastrad 					unsigned long page_num)
    375  1.2  riastrad {
    376  1.2  riastrad 	return NULL;
    377  1.2  riastrad }
    378  1.2  riastrad 
    379  1.2  riastrad static void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
    380  1.2  riastrad 					 unsigned long page_num, void *addr)
    381  1.2  riastrad {
    382  1.2  riastrad 
    383  1.2  riastrad }
    384  1.2  riastrad static void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf,
    385  1.2  riastrad 				 unsigned long page_num)
    386  1.2  riastrad {
    387  1.2  riastrad 	return NULL;
    388  1.2  riastrad }
    389  1.2  riastrad 
    390  1.2  riastrad static void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
    391  1.2  riastrad 				  unsigned long page_num, void *addr)
    392  1.2  riastrad {
    393  1.2  riastrad 
    394  1.2  riastrad }
    395  1.2  riastrad 
    396  1.3  riastrad #ifdef __NetBSD__
    397  1.3  riastrad static int
    398  1.3  riastrad drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, off_t *offp, size_t size,
    399  1.3  riastrad     int prot, int *flagsp, int *advicep, struct uvm_object **uobjp,
    400  1.3  riastrad     int *maxprotp)
    401  1.3  riastrad #else
    402  1.2  riastrad static int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf,
    403  1.2  riastrad 			       struct vm_area_struct *vma)
    404  1.3  riastrad #endif
    405  1.2  riastrad {
    406  1.2  riastrad 	struct drm_gem_object *obj = dma_buf->priv;
    407  1.2  riastrad 	struct drm_device *dev = obj->dev;
    408  1.2  riastrad 
    409  1.2  riastrad 	if (!dev->driver->gem_prime_mmap)
    410  1.2  riastrad 		return -ENOSYS;
    411  1.2  riastrad 
    412  1.3  riastrad #ifdef __NetBSD__
    413  1.3  riastrad 	return dev->driver->gem_prime_mmap(obj, offp, size, prot, flagsp,
    414  1.3  riastrad 	    advicep, uobjp, maxprotp);
    415  1.3  riastrad #else
    416  1.2  riastrad 	return dev->driver->gem_prime_mmap(obj, vma);
    417  1.3  riastrad #endif
    418  1.2  riastrad }
    419  1.2  riastrad 
    420  1.2  riastrad static const struct dma_buf_ops drm_gem_prime_dmabuf_ops =  {
    421  1.2  riastrad 	.attach = drm_gem_map_attach,
    422  1.2  riastrad 	.detach = drm_gem_map_detach,
    423  1.2  riastrad 	.map_dma_buf = drm_gem_map_dma_buf,
    424  1.2  riastrad 	.unmap_dma_buf = drm_gem_unmap_dma_buf,
    425  1.2  riastrad 	.release = drm_gem_dmabuf_release,
    426  1.2  riastrad 	.kmap = drm_gem_dmabuf_kmap,
    427  1.2  riastrad 	.kmap_atomic = drm_gem_dmabuf_kmap_atomic,
    428  1.2  riastrad 	.kunmap = drm_gem_dmabuf_kunmap,
    429  1.2  riastrad 	.kunmap_atomic = drm_gem_dmabuf_kunmap_atomic,
    430  1.2  riastrad 	.mmap = drm_gem_dmabuf_mmap,
    431  1.2  riastrad 	.vmap = drm_gem_dmabuf_vmap,
    432  1.2  riastrad 	.vunmap = drm_gem_dmabuf_vunmap,
    433  1.2  riastrad };
    434  1.2  riastrad 
    435  1.2  riastrad /**
    436  1.2  riastrad  * DOC: PRIME Helpers
    437  1.2  riastrad  *
    438  1.2  riastrad  * Drivers can implement @gem_prime_export and @gem_prime_import in terms of
    439  1.2  riastrad  * simpler APIs by using the helper functions @drm_gem_prime_export and
    440  1.2  riastrad  * @drm_gem_prime_import.  These functions implement dma-buf support in terms of
    441  1.2  riastrad  * six lower-level driver callbacks:
    442  1.2  riastrad  *
    443  1.2  riastrad  * Export callbacks:
    444  1.2  riastrad  *
    445  1.2  riastrad  *  - @gem_prime_pin (optional): prepare a GEM object for exporting
    446  1.2  riastrad  *
    447  1.2  riastrad  *  - @gem_prime_get_sg_table: provide a scatter/gather table of pinned pages
    448  1.2  riastrad  *
    449  1.2  riastrad  *  - @gem_prime_vmap: vmap a buffer exported by your driver
    450  1.2  riastrad  *
    451  1.2  riastrad  *  - @gem_prime_vunmap: vunmap a buffer exported by your driver
    452  1.2  riastrad  *
    453  1.2  riastrad  *  - @gem_prime_mmap (optional): mmap a buffer exported by your driver
    454  1.2  riastrad  *
    455  1.2  riastrad  * Import callback:
    456  1.2  riastrad  *
    457  1.2  riastrad  *  - @gem_prime_import_sg_table (import): produce a GEM object from another
    458  1.2  riastrad  *    driver's scatter/gather table
    459  1.2  riastrad  */
    460  1.2  riastrad 
    461  1.2  riastrad /**
    462  1.2  riastrad  * drm_gem_prime_export - helper library implementation of the export callback
    463  1.2  riastrad  * @dev: drm_device to export from
    464  1.2  riastrad  * @obj: GEM object to export
    465  1.2  riastrad  * @flags: flags like DRM_CLOEXEC
    466  1.2  riastrad  *
    467  1.2  riastrad  * This is the implementation of the gem_prime_export functions for GEM drivers
    468  1.2  riastrad  * using the PRIME helpers.
    469  1.2  riastrad  */
    470  1.2  riastrad struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
    471  1.2  riastrad 				     struct drm_gem_object *obj,
    472  1.2  riastrad 				     int flags)
    473  1.2  riastrad {
    474  1.2  riastrad 	struct dma_buf_export_info exp_info = {
    475  1.3  riastrad #ifndef __NetBSD__
    476  1.2  riastrad 		.exp_name = KBUILD_MODNAME, /* white lie for debug */
    477  1.2  riastrad 		.owner = dev->driver->fops->owner,
    478  1.3  riastrad #endif
    479  1.2  riastrad 		.ops = &drm_gem_prime_dmabuf_ops,
    480  1.2  riastrad 		.size = obj->size,
    481  1.2  riastrad 		.flags = flags,
    482  1.2  riastrad 		.priv = obj,
    483  1.2  riastrad 	};
    484  1.2  riastrad 
    485  1.2  riastrad 	if (dev->driver->gem_prime_res_obj)
    486  1.2  riastrad 		exp_info.resv = dev->driver->gem_prime_res_obj(obj);
    487  1.2  riastrad 
    488  1.2  riastrad 	return dma_buf_export(&exp_info);
    489  1.2  riastrad }
    490  1.2  riastrad EXPORT_SYMBOL(drm_gem_prime_export);
    491  1.2  riastrad 
    492  1.2  riastrad static struct dma_buf *export_and_register_object(struct drm_device *dev,
    493  1.2  riastrad 						  struct drm_gem_object *obj,
    494  1.2  riastrad 						  uint32_t flags)
    495  1.2  riastrad {
    496  1.2  riastrad 	struct dma_buf *dmabuf;
    497  1.2  riastrad 
    498  1.2  riastrad 	/* prevent races with concurrent gem_close. */
    499  1.2  riastrad 	if (obj->handle_count == 0) {
    500  1.2  riastrad 		dmabuf = ERR_PTR(-ENOENT);
    501  1.2  riastrad 		return dmabuf;
    502  1.2  riastrad 	}
    503  1.2  riastrad 
    504  1.2  riastrad 	dmabuf = dev->driver->gem_prime_export(dev, obj, flags);
    505  1.2  riastrad 	if (IS_ERR(dmabuf)) {
    506  1.2  riastrad 		/* normally the created dma-buf takes ownership of the ref,
    507  1.2  riastrad 		 * but if that fails then drop the ref
    508  1.2  riastrad 		 */
    509  1.2  riastrad 		return dmabuf;
    510  1.2  riastrad 	}
    511  1.2  riastrad 
    512  1.2  riastrad 	/*
    513  1.2  riastrad 	 * Note that callers do not need to clean up the export cache
    514  1.2  riastrad 	 * since the check for obj->handle_count guarantees that someone
    515  1.2  riastrad 	 * will clean it up.
    516  1.2  riastrad 	 */
    517  1.2  riastrad 	obj->dma_buf = dmabuf;
    518  1.2  riastrad 	get_dma_buf(obj->dma_buf);
    519  1.2  riastrad 	/* Grab a new ref since the callers is now used by the dma-buf */
    520  1.2  riastrad 	drm_gem_object_reference(obj);
    521  1.2  riastrad 
    522  1.2  riastrad 	return dmabuf;
    523  1.2  riastrad }
    524  1.2  riastrad 
    525  1.2  riastrad /**
    526  1.2  riastrad  * drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers
    527  1.2  riastrad  * @dev: dev to export the buffer from
    528  1.2  riastrad  * @file_priv: drm file-private structure
    529  1.2  riastrad  * @handle: buffer handle to export
    530  1.2  riastrad  * @flags: flags like DRM_CLOEXEC
    531  1.2  riastrad  * @prime_fd: pointer to storage for the fd id of the create dma-buf
    532  1.2  riastrad  *
    533  1.2  riastrad  * This is the PRIME export function which must be used mandatorily by GEM
    534  1.2  riastrad  * drivers to ensure correct lifetime management of the underlying GEM object.
    535  1.2  riastrad  * The actual exporting from GEM object to a dma-buf is done through the
    536  1.2  riastrad  * gem_prime_export driver callback.
    537  1.2  riastrad  */
    538  1.1  riastrad int drm_gem_prime_handle_to_fd(struct drm_device *dev,
    539  1.2  riastrad 			       struct drm_file *file_priv, uint32_t handle,
    540  1.2  riastrad 			       uint32_t flags,
    541  1.2  riastrad 			       int *prime_fd)
    542  1.1  riastrad {
    543  1.1  riastrad 	struct drm_gem_object *obj;
    544  1.2  riastrad 	int ret = 0;
    545  1.2  riastrad 	struct dma_buf *dmabuf;
    546  1.1  riastrad 
    547  1.2  riastrad 	mutex_lock(&file_priv->prime.lock);
    548  1.1  riastrad 	obj = drm_gem_object_lookup(dev, file_priv, handle);
    549  1.2  riastrad 	if (!obj)  {
    550  1.2  riastrad 		ret = -ENOENT;
    551  1.2  riastrad 		goto out_unlock;
    552  1.2  riastrad 	}
    553  1.2  riastrad 
    554  1.2  riastrad 	dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle);
    555  1.2  riastrad 	if (dmabuf) {
    556  1.2  riastrad 		get_dma_buf(dmabuf);
    557  1.2  riastrad 		goto out_have_handle;
    558  1.2  riastrad 	}
    559  1.1  riastrad 
    560  1.2  riastrad 	mutex_lock(&dev->object_name_lock);
    561  1.1  riastrad 	/* re-export the original imported object */
    562  1.1  riastrad 	if (obj->import_attach) {
    563  1.2  riastrad 		dmabuf = obj->import_attach->dmabuf;
    564  1.2  riastrad 		get_dma_buf(dmabuf);
    565  1.2  riastrad 		goto out_have_obj;
    566  1.2  riastrad 	}
    567  1.2  riastrad 
    568  1.2  riastrad 	if (obj->dma_buf) {
    569  1.2  riastrad 		get_dma_buf(obj->dma_buf);
    570  1.2  riastrad 		dmabuf = obj->dma_buf;
    571  1.2  riastrad 		goto out_have_obj;
    572  1.2  riastrad 	}
    573  1.2  riastrad 
    574  1.2  riastrad 	dmabuf = export_and_register_object(dev, obj, flags);
    575  1.2  riastrad 	if (IS_ERR(dmabuf)) {
    576  1.2  riastrad 		/* normally the created dma-buf takes ownership of the ref,
    577  1.2  riastrad 		 * but if that fails then drop the ref
    578  1.2  riastrad 		 */
    579  1.2  riastrad 		ret = PTR_ERR(dmabuf);
    580  1.2  riastrad 		mutex_unlock(&dev->object_name_lock);
    581  1.2  riastrad 		goto out;
    582  1.1  riastrad 	}
    583  1.1  riastrad 
    584  1.2  riastrad out_have_obj:
    585  1.2  riastrad 	/*
    586  1.2  riastrad 	 * If we've exported this buffer then cheat and add it to the import list
    587  1.2  riastrad 	 * so we get the correct handle back. We must do this under the
    588  1.2  riastrad 	 * protection of dev->object_name_lock to ensure that a racing gem close
    589  1.2  riastrad 	 * ioctl doesn't miss to remove this buffer handle from the cache.
    590  1.2  riastrad 	 */
    591  1.2  riastrad 	ret = drm_prime_add_buf_handle(&file_priv->prime,
    592  1.2  riastrad 				       dmabuf, handle);
    593  1.2  riastrad 	mutex_unlock(&dev->object_name_lock);
    594  1.2  riastrad 	if (ret)
    595  1.2  riastrad 		goto fail_put_dmabuf;
    596  1.2  riastrad 
    597  1.2  riastrad out_have_handle:
    598  1.2  riastrad 	ret = dma_buf_fd(dmabuf, flags);
    599  1.2  riastrad 	/*
    600  1.2  riastrad 	 * We must _not_ remove the buffer from the handle cache since the newly
    601  1.2  riastrad 	 * created dma buf is already linked in the global obj->dma_buf pointer,
    602  1.2  riastrad 	 * and that is invariant as long as a userspace gem handle exists.
    603  1.2  riastrad 	 * Closing the handle will clean out the cache anyway, so we don't leak.
    604  1.2  riastrad 	 */
    605  1.2  riastrad 	if (ret < 0) {
    606  1.2  riastrad 		goto fail_put_dmabuf;
    607  1.1  riastrad 	} else {
    608  1.2  riastrad 		*prime_fd = ret;
    609  1.2  riastrad 		ret = 0;
    610  1.2  riastrad 	}
    611  1.2  riastrad 
    612  1.2  riastrad 	goto out;
    613  1.2  riastrad 
    614  1.2  riastrad fail_put_dmabuf:
    615  1.2  riastrad 	dma_buf_put(dmabuf);
    616  1.2  riastrad out:
    617  1.2  riastrad 	drm_gem_object_unreference_unlocked(obj);
    618  1.2  riastrad out_unlock:
    619  1.2  riastrad 	mutex_unlock(&file_priv->prime.lock);
    620  1.2  riastrad 
    621  1.2  riastrad 	return ret;
    622  1.2  riastrad }
    623  1.2  riastrad EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
    624  1.2  riastrad 
    625  1.2  riastrad /**
    626  1.2  riastrad  * drm_gem_prime_import - helper library implementation of the import callback
    627  1.2  riastrad  * @dev: drm_device to import into
    628  1.2  riastrad  * @dma_buf: dma-buf object to import
    629  1.2  riastrad  *
    630  1.2  riastrad  * This is the implementation of the gem_prime_import functions for GEM drivers
    631  1.2  riastrad  * using the PRIME helpers.
    632  1.2  riastrad  */
    633  1.2  riastrad struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
    634  1.2  riastrad 					    struct dma_buf *dma_buf)
    635  1.2  riastrad {
    636  1.2  riastrad 	struct dma_buf_attachment *attach;
    637  1.2  riastrad 	struct sg_table *sgt;
    638  1.2  riastrad 	struct drm_gem_object *obj;
    639  1.2  riastrad 	int ret;
    640  1.2  riastrad 
    641  1.2  riastrad 	if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) {
    642  1.2  riastrad 		obj = dma_buf->priv;
    643  1.2  riastrad 		if (obj->dev == dev) {
    644  1.2  riastrad 			/*
    645  1.2  riastrad 			 * Importing dmabuf exported from out own gem increases
    646  1.2  riastrad 			 * refcount on gem itself instead of f_count of dmabuf.
    647  1.1  riastrad 			 */
    648  1.2  riastrad 			drm_gem_object_reference(obj);
    649  1.2  riastrad 			return obj;
    650  1.1  riastrad 		}
    651  1.1  riastrad 	}
    652  1.2  riastrad 
    653  1.2  riastrad 	if (!dev->driver->gem_prime_import_sg_table)
    654  1.2  riastrad 		return ERR_PTR(-EINVAL);
    655  1.2  riastrad 
    656  1.2  riastrad 	attach = dma_buf_attach(dma_buf, dev->dev);
    657  1.2  riastrad 	if (IS_ERR(attach))
    658  1.2  riastrad 		return ERR_CAST(attach);
    659  1.2  riastrad 
    660  1.2  riastrad 	get_dma_buf(dma_buf);
    661  1.2  riastrad 
    662  1.2  riastrad 	sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
    663  1.2  riastrad 	if (IS_ERR(sgt)) {
    664  1.2  riastrad 		ret = PTR_ERR(sgt);
    665  1.2  riastrad 		goto fail_detach;
    666  1.2  riastrad 	}
    667  1.2  riastrad 
    668  1.2  riastrad 	obj = dev->driver->gem_prime_import_sg_table(dev, attach, sgt);
    669  1.2  riastrad 	if (IS_ERR(obj)) {
    670  1.2  riastrad 		ret = PTR_ERR(obj);
    671  1.2  riastrad 		goto fail_unmap;
    672  1.1  riastrad 	}
    673  1.1  riastrad 
    674  1.2  riastrad 	obj->import_attach = attach;
    675  1.2  riastrad 
    676  1.2  riastrad 	return obj;
    677  1.2  riastrad 
    678  1.2  riastrad fail_unmap:
    679  1.2  riastrad 	dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
    680  1.2  riastrad fail_detach:
    681  1.2  riastrad 	dma_buf_detach(dma_buf, attach);
    682  1.2  riastrad 	dma_buf_put(dma_buf);
    683  1.2  riastrad 
    684  1.2  riastrad 	return ERR_PTR(ret);
    685  1.1  riastrad }
    686  1.2  riastrad EXPORT_SYMBOL(drm_gem_prime_import);
    687  1.1  riastrad 
    688  1.2  riastrad /**
    689  1.2  riastrad  * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers
    690  1.2  riastrad  * @dev: dev to export the buffer from
    691  1.2  riastrad  * @file_priv: drm file-private structure
    692  1.2  riastrad  * @prime_fd: fd id of the dma-buf which should be imported
    693  1.2  riastrad  * @handle: pointer to storage for the handle of the imported buffer object
    694  1.2  riastrad  *
    695  1.2  riastrad  * This is the PRIME import function which must be used mandatorily by GEM
    696  1.2  riastrad  * drivers to ensure correct lifetime management of the underlying GEM object.
    697  1.2  riastrad  * The actual importing of GEM object from the dma-buf is done through the
    698  1.2  riastrad  * gem_import_export driver callback.
    699  1.2  riastrad  */
    700  1.1  riastrad int drm_gem_prime_fd_to_handle(struct drm_device *dev,
    701  1.2  riastrad 			       struct drm_file *file_priv, int prime_fd,
    702  1.2  riastrad 			       uint32_t *handle)
    703  1.1  riastrad {
    704  1.1  riastrad 	struct dma_buf *dma_buf;
    705  1.1  riastrad 	struct drm_gem_object *obj;
    706  1.1  riastrad 	int ret;
    707  1.1  riastrad 
    708  1.1  riastrad 	dma_buf = dma_buf_get(prime_fd);
    709  1.1  riastrad 	if (IS_ERR(dma_buf))
    710  1.1  riastrad 		return PTR_ERR(dma_buf);
    711  1.1  riastrad 
    712  1.1  riastrad 	mutex_lock(&file_priv->prime.lock);
    713  1.1  riastrad 
    714  1.2  riastrad 	ret = drm_prime_lookup_buf_handle(&file_priv->prime,
    715  1.1  riastrad 			dma_buf, handle);
    716  1.2  riastrad 	if (ret == 0)
    717  1.1  riastrad 		goto out_put;
    718  1.1  riastrad 
    719  1.1  riastrad 	/* never seen this one, need to import */
    720  1.2  riastrad 	mutex_lock(&dev->object_name_lock);
    721  1.1  riastrad 	obj = dev->driver->gem_prime_import(dev, dma_buf);
    722  1.1  riastrad 	if (IS_ERR(obj)) {
    723  1.1  riastrad 		ret = PTR_ERR(obj);
    724  1.2  riastrad 		goto out_unlock;
    725  1.2  riastrad 	}
    726  1.2  riastrad 
    727  1.2  riastrad 	if (obj->dma_buf) {
    728  1.2  riastrad 		WARN_ON(obj->dma_buf != dma_buf);
    729  1.2  riastrad 	} else {
    730  1.2  riastrad 		obj->dma_buf = dma_buf;
    731  1.2  riastrad 		get_dma_buf(dma_buf);
    732  1.1  riastrad 	}
    733  1.1  riastrad 
    734  1.2  riastrad 	/* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
    735  1.2  riastrad 	ret = drm_gem_handle_create_tail(file_priv, obj, handle);
    736  1.1  riastrad 	drm_gem_object_unreference_unlocked(obj);
    737  1.1  riastrad 	if (ret)
    738  1.1  riastrad 		goto out_put;
    739  1.1  riastrad 
    740  1.2  riastrad 	ret = drm_prime_add_buf_handle(&file_priv->prime,
    741  1.1  riastrad 			dma_buf, *handle);
    742  1.1  riastrad 	if (ret)
    743  1.1  riastrad 		goto fail;
    744  1.1  riastrad 
    745  1.1  riastrad 	mutex_unlock(&file_priv->prime.lock);
    746  1.2  riastrad 
    747  1.2  riastrad 	dma_buf_put(dma_buf);
    748  1.2  riastrad 
    749  1.1  riastrad 	return 0;
    750  1.1  riastrad 
    751  1.1  riastrad fail:
    752  1.1  riastrad 	/* hmm, if driver attached, we are relying on the free-object path
    753  1.1  riastrad 	 * to detach.. which seems ok..
    754  1.1  riastrad 	 */
    755  1.2  riastrad 	drm_gem_handle_delete(file_priv, *handle);
    756  1.2  riastrad out_unlock:
    757  1.2  riastrad 	mutex_unlock(&dev->object_name_lock);
    758  1.1  riastrad out_put:
    759  1.1  riastrad 	dma_buf_put(dma_buf);
    760  1.1  riastrad 	mutex_unlock(&file_priv->prime.lock);
    761  1.1  riastrad 	return ret;
    762  1.1  riastrad }
    763  1.1  riastrad EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
    764  1.1  riastrad 
    765  1.1  riastrad int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
    766  1.1  riastrad 				 struct drm_file *file_priv)
    767  1.1  riastrad {
    768  1.1  riastrad 	struct drm_prime_handle *args = data;
    769  1.1  riastrad 	uint32_t flags;
    770  1.1  riastrad 
    771  1.1  riastrad 	if (!drm_core_check_feature(dev, DRIVER_PRIME))
    772  1.1  riastrad 		return -EINVAL;
    773  1.1  riastrad 
    774  1.1  riastrad 	if (!dev->driver->prime_handle_to_fd)
    775  1.1  riastrad 		return -ENOSYS;
    776  1.1  riastrad 
    777  1.1  riastrad 	/* check flags are valid */
    778  1.1  riastrad 	if (args->flags & ~DRM_CLOEXEC)
    779  1.1  riastrad 		return -EINVAL;
    780  1.1  riastrad 
    781  1.1  riastrad 	/* we only want to pass DRM_CLOEXEC which is == O_CLOEXEC */
    782  1.1  riastrad 	flags = args->flags & DRM_CLOEXEC;
    783  1.1  riastrad 
    784  1.1  riastrad 	return dev->driver->prime_handle_to_fd(dev, file_priv,
    785  1.1  riastrad 			args->handle, flags, &args->fd);
    786  1.1  riastrad }
    787  1.1  riastrad 
    788  1.1  riastrad int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
    789  1.1  riastrad 				 struct drm_file *file_priv)
    790  1.1  riastrad {
    791  1.1  riastrad 	struct drm_prime_handle *args = data;
    792  1.1  riastrad 
    793  1.1  riastrad 	if (!drm_core_check_feature(dev, DRIVER_PRIME))
    794  1.1  riastrad 		return -EINVAL;
    795  1.1  riastrad 
    796  1.1  riastrad 	if (!dev->driver->prime_fd_to_handle)
    797  1.1  riastrad 		return -ENOSYS;
    798  1.1  riastrad 
    799  1.1  riastrad 	return dev->driver->prime_fd_to_handle(dev, file_priv,
    800  1.1  riastrad 			args->fd, &args->handle);
    801  1.1  riastrad }
    802  1.1  riastrad 
    803  1.2  riastrad /**
    804  1.2  riastrad  * drm_prime_pages_to_sg - converts a page array into an sg list
    805  1.2  riastrad  * @pages: pointer to the array of page pointers to convert
    806  1.2  riastrad  * @nr_pages: length of the page vector
    807  1.1  riastrad  *
    808  1.2  riastrad  * This helper creates an sg table object from a set of pages
    809  1.1  riastrad  * the driver is responsible for mapping the pages into the
    810  1.2  riastrad  * importers address space for use with dma_buf itself.
    811  1.1  riastrad  */
    812  1.2  riastrad struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages)
    813  1.1  riastrad {
    814  1.1  riastrad 	struct sg_table *sg = NULL;
    815  1.1  riastrad 	int ret;
    816  1.1  riastrad 
    817  1.1  riastrad 	sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
    818  1.2  riastrad 	if (!sg) {
    819  1.2  riastrad 		ret = -ENOMEM;
    820  1.1  riastrad 		goto out;
    821  1.2  riastrad 	}
    822  1.1  riastrad 
    823  1.2  riastrad 	ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
    824  1.2  riastrad 				nr_pages << PAGE_SHIFT, GFP_KERNEL);
    825  1.1  riastrad 	if (ret)
    826  1.1  riastrad 		goto out;
    827  1.1  riastrad 
    828  1.1  riastrad 	return sg;
    829  1.1  riastrad out:
    830  1.1  riastrad 	kfree(sg);
    831  1.2  riastrad 	return ERR_PTR(ret);
    832  1.1  riastrad }
    833  1.1  riastrad EXPORT_SYMBOL(drm_prime_pages_to_sg);
    834  1.1  riastrad 
    835  1.3  riastrad #ifdef __NetBSD__
    836  1.3  riastrad 
    837  1.3  riastrad struct sg_table *
    838  1.4  riastrad drm_prime_bus_dmamem_to_sg(bus_dma_segment_t *segs, int nsegs)
    839  1.4  riastrad {
    840  1.4  riastrad 	struct sg_table *sg;
    841  1.4  riastrad 	int ret;
    842  1.4  riastrad 
    843  1.4  riastrad 	sg = kmalloc(sizeof(*sg), GFP_KERNEL);
    844  1.4  riastrad 	if (sg == NULL) {
    845  1.4  riastrad 		ret = -ENOMEM;
    846  1.4  riastrad 		goto out;
    847  1.4  riastrad 	}
    848  1.4  riastrad 
    849  1.4  riastrad 	ret = sg_alloc_table_from_bus_dmamem(sg, segs, nsegs, GFP_KERNEL);
    850  1.4  riastrad 	if (ret)
    851  1.4  riastrad 		goto out;
    852  1.4  riastrad 
    853  1.4  riastrad 	return sg;
    854  1.4  riastrad out:
    855  1.4  riastrad 	kfree(sg);
    856  1.4  riastrad 	return ERR_PTR(ret);
    857  1.4  riastrad }
    858  1.4  riastrad 
    859  1.4  riastrad struct sg_table *
    860  1.3  riastrad drm_prime_pglist_to_sg(struct pglist *pglist, unsigned npages)
    861  1.3  riastrad {
    862  1.3  riastrad 	struct sg_table *sg;
    863  1.3  riastrad 	int ret;
    864  1.3  riastrad 
    865  1.3  riastrad 	sg = kmalloc(sizeof(*sg), GFP_KERNEL);
    866  1.3  riastrad 	if (sg == NULL) {
    867  1.3  riastrad 		ret = -ENOMEM;
    868  1.3  riastrad 		goto out;
    869  1.3  riastrad 	}
    870  1.3  riastrad 
    871  1.3  riastrad 	ret = sg_alloc_table_from_pglist(sg, pglist, 0, npages << PAGE_SHIFT,
    872  1.3  riastrad 	    npages, GFP_KERNEL);
    873  1.3  riastrad 	if (ret)
    874  1.3  riastrad 		goto out;
    875  1.3  riastrad 
    876  1.3  riastrad 	return sg;
    877  1.3  riastrad 
    878  1.3  riastrad out:
    879  1.3  riastrad 	kfree(sg);
    880  1.3  riastrad 	return ERR_PTR(ret);
    881  1.3  riastrad }
    882  1.3  riastrad 
    883  1.4  riastrad bus_size_t
    884  1.4  riastrad drm_prime_sg_size(struct sg_table *sg)
    885  1.4  riastrad {
    886  1.4  riastrad 
    887  1.4  riastrad 	return sg->sgt_size;
    888  1.4  riastrad }
    889  1.4  riastrad 
    890  1.3  riastrad void
    891  1.3  riastrad drm_prime_sg_free(struct sg_table *sg)
    892  1.3  riastrad {
    893  1.3  riastrad 
    894  1.3  riastrad 	sg_free_table(sg);
    895  1.3  riastrad 	kfree(sg);
    896  1.3  riastrad }
    897  1.3  riastrad 
    898  1.3  riastrad int
    899  1.3  riastrad drm_prime_bus_dmamap_load_sgt(bus_dma_tag_t dmat, bus_dmamap_t map,
    900  1.3  riastrad     struct sg_table *sgt)
    901  1.3  riastrad {
    902  1.3  riastrad 
    903  1.3  riastrad 	/* XXX errno NetBSD->Linux */
    904  1.3  riastrad 	return -bus_dmamap_load_raw(dmat, map, sgt->sgt_segs, sgt->sgt_nsegs,
    905  1.3  riastrad 	    sgt->sgt_size, BUS_DMA_NOWAIT);
    906  1.3  riastrad }
    907  1.3  riastrad 
    908  1.4  riastrad int
    909  1.4  riastrad drm_prime_bus_dmamem_map(bus_dma_tag_t dmat, struct sg_table *sgt, void **kvap,
    910  1.4  riastrad     int flags)
    911  1.4  riastrad {
    912  1.4  riastrad 
    913  1.4  riastrad 	return -bus_dmamem_map(dmat, sgt->sgt_segs, sgt->sgt_nsegs,
    914  1.4  riastrad 	    sgt->sgt_size, kvap, flags);
    915  1.4  riastrad }
    916  1.4  riastrad 
    917  1.3  riastrad #else  /* !__NetBSD__ */
    918  1.3  riastrad 
    919  1.2  riastrad /**
    920  1.2  riastrad  * drm_prime_sg_to_page_addr_arrays - convert an sg table into a page array
    921  1.2  riastrad  * @sgt: scatter-gather table to convert
    922  1.2  riastrad  * @pages: array of page pointers to store the page array in
    923  1.2  riastrad  * @addrs: optional array to store the dma bus address of each page
    924  1.2  riastrad  * @max_pages: size of both the passed-in arrays
    925  1.2  riastrad  *
    926  1.2  riastrad  * Exports an sg table into an array of pages and addresses. This is currently
    927  1.2  riastrad  * required by the TTM driver in order to do correct fault handling.
    928  1.2  riastrad  */
    929  1.1  riastrad int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
    930  1.1  riastrad 				     dma_addr_t *addrs, int max_pages)
    931  1.1  riastrad {
    932  1.1  riastrad 	unsigned count;
    933  1.1  riastrad 	struct scatterlist *sg;
    934  1.1  riastrad 	struct page *page;
    935  1.2  riastrad 	u32 len;
    936  1.1  riastrad 	int pg_index;
    937  1.1  riastrad 	dma_addr_t addr;
    938  1.1  riastrad 
    939  1.1  riastrad 	pg_index = 0;
    940  1.1  riastrad 	for_each_sg(sgt->sgl, sg, sgt->nents, count) {
    941  1.1  riastrad 		len = sg->length;
    942  1.1  riastrad 		page = sg_page(sg);
    943  1.1  riastrad 		addr = sg_dma_address(sg);
    944  1.1  riastrad 
    945  1.1  riastrad 		while (len > 0) {
    946  1.1  riastrad 			if (WARN_ON(pg_index >= max_pages))
    947  1.1  riastrad 				return -1;
    948  1.1  riastrad 			pages[pg_index] = page;
    949  1.1  riastrad 			if (addrs)
    950  1.1  riastrad 				addrs[pg_index] = addr;
    951  1.1  riastrad 
    952  1.1  riastrad 			page++;
    953  1.1  riastrad 			addr += PAGE_SIZE;
    954  1.1  riastrad 			len -= PAGE_SIZE;
    955  1.1  riastrad 			pg_index++;
    956  1.1  riastrad 		}
    957  1.1  riastrad 	}
    958  1.1  riastrad 	return 0;
    959  1.1  riastrad }
    960  1.1  riastrad EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
    961  1.2  riastrad 
    962  1.3  riastrad #endif	/* __NetBSD__ */
    963  1.3  riastrad 
    964  1.2  riastrad /**
    965  1.2  riastrad  * drm_prime_gem_destroy - helper to clean up a PRIME-imported GEM object
    966  1.2  riastrad  * @obj: GEM object which was created from a dma-buf
    967  1.2  riastrad  * @sg: the sg-table which was pinned at import time
    968  1.2  riastrad  *
    969  1.2  riastrad  * This is the cleanup functions which GEM drivers need to call when they use
    970  1.2  riastrad  * @drm_gem_prime_import to import dma-bufs.
    971  1.2  riastrad  */
    972  1.1  riastrad void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
    973  1.1  riastrad {
    974  1.1  riastrad 	struct dma_buf_attachment *attach;
    975  1.1  riastrad 	struct dma_buf *dma_buf;
    976  1.1  riastrad 	attach = obj->import_attach;
    977  1.1  riastrad 	if (sg)
    978  1.1  riastrad 		dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
    979  1.1  riastrad 	dma_buf = attach->dmabuf;
    980  1.1  riastrad 	dma_buf_detach(attach->dmabuf, attach);
    981  1.1  riastrad 	/* remove the reference */
    982  1.1  riastrad 	dma_buf_put(dma_buf);
    983  1.1  riastrad }
    984  1.1  riastrad EXPORT_SYMBOL(drm_prime_gem_destroy);
    985  1.1  riastrad 
    986  1.1  riastrad void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
    987  1.1  riastrad {
    988  1.1  riastrad 	INIT_LIST_HEAD(&prime_fpriv->head);
    989  1.3  riastrad #ifdef __NetBSD__
    990  1.3  riastrad 	linux_mutex_init(&prime_fpriv->lock);
    991  1.3  riastrad #else
    992  1.1  riastrad 	mutex_init(&prime_fpriv->lock);
    993  1.3  riastrad #endif
    994  1.1  riastrad }
    995  1.1  riastrad 
    996  1.1  riastrad void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
    997  1.1  riastrad {
    998  1.2  riastrad 	/* by now drm_gem_release should've made sure the list is empty */
    999  1.2  riastrad 	WARN_ON(!list_empty(&prime_fpriv->head));
   1000  1.3  riastrad #ifdef __NetBSD__
   1001  1.3  riastrad 	linux_mutex_destroy(&prime_fpriv->lock);
   1002  1.3  riastrad #else
   1003  1.3  riastrad 	mutex_destroy(&prime_fpriv->lock);
   1004  1.3  riastrad #endif
   1005  1.1  riastrad }
   1006