Home | History | Annotate | Line # | Download | only in drm
drm_gem.c revision 1.1.1.3
      1 /*	$NetBSD: drm_gem.c,v 1.1.1.3 2018/08/27 01:34:42 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright  2008 Intel Corporation
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the "Software"),
      8  * to deal in the Software without restriction, including without limitation
      9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     10  * and/or sell copies of the Software, and to permit persons to whom the
     11  * Software is furnished to do so, subject to the following conditions:
     12  *
     13  * The above copyright notice and this permission notice (including the next
     14  * paragraph) shall be included in all copies or substantial portions of the
     15  * Software.
     16  *
     17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     22  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
     23  * IN THE SOFTWARE.
     24  *
     25  * Authors:
     26  *    Eric Anholt <eric (at) anholt.net>
     27  *
     28  */
     29 
     30 #include <sys/cdefs.h>
     31 __KERNEL_RCSID(0, "$NetBSD: drm_gem.c,v 1.1.1.3 2018/08/27 01:34:42 riastradh Exp $");
     32 
     33 #include <linux/types.h>
     34 #include <linux/slab.h>
     35 #include <linux/mm.h>
     36 #include <linux/uaccess.h>
     37 #include <linux/fs.h>
     38 #include <linux/file.h>
     39 #include <linux/module.h>
     40 #include <linux/mman.h>
     41 #include <linux/pagemap.h>
     42 #include <linux/shmem_fs.h>
     43 #include <linux/dma-buf.h>
     44 #include <drm/drmP.h>
     45 #include <drm/drm_vma_manager.h>
     46 #include <drm/drm_gem.h>
     47 #include "drm_internal.h"
     48 
     49 /** @file drm_gem.c
     50  *
     51  * This file provides some of the base ioctls and library routines for
     52  * the graphics memory manager implemented by each device driver.
     53  *
     54  * Because various devices have different requirements in terms of
     55  * synchronization and migration strategies, implementing that is left up to
     56  * the driver, and all that the general API provides should be generic --
     57  * allocating objects, reading/writing data with the cpu, freeing objects.
     58  * Even there, platform-dependent optimizations for reading/writing data with
     59  * the CPU mean we'll likely hook those out to driver-specific calls.  However,
     60  * the DRI2 implementation wants to have at least allocate/mmap be generic.
     61  *
     62  * The goal was to have swap-backed object allocation managed through
     63  * struct file.  However, file descriptors as handles to a struct file have
     64  * two major failings:
     65  * - Process limits prevent more than 1024 or so being used at a time by
     66  *   default.
     67  * - Inability to allocate high fds will aggravate the X Server's select()
     68  *   handling, and likely that of many GL client applications as well.
     69  *
     70  * This led to a plan of using our own integer IDs (called handles, following
     71  * DRM terminology) to mimic fds, and implement the fd syscalls we need as
     72  * ioctls.  The objects themselves will still include the struct file so
     73  * that we can transition to fds if the required kernel infrastructure shows
     74  * up at a later date, and as our interface with shmfs for memory allocation.
     75  */
     76 
     77 /*
     78  * We make up offsets for buffer objects so we can recognize them at
     79  * mmap time.
     80  */
     81 
     82 /* pgoff in mmap is an unsigned long, so we need to make sure that
     83  * the faked up offset will fit
     84  */
     85 
     86 #if BITS_PER_LONG == 64
     87 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
     88 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
     89 #else
     90 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
     91 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
     92 #endif
     93 
     94 /**
     95  * drm_gem_init - Initialize the GEM device fields
     96  * @dev: drm_devic structure to initialize
     97  */
     98 int
     99 drm_gem_init(struct drm_device *dev)
    100 {
    101 	struct drm_vma_offset_manager *vma_offset_manager;
    102 
    103 	mutex_init(&dev->object_name_lock);
    104 	idr_init(&dev->object_name_idr);
    105 
    106 	vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL);
    107 	if (!vma_offset_manager) {
    108 		DRM_ERROR("out of memory\n");
    109 		return -ENOMEM;
    110 	}
    111 
    112 	dev->vma_offset_manager = vma_offset_manager;
    113 	drm_vma_offset_manager_init(vma_offset_manager,
    114 				    DRM_FILE_PAGE_OFFSET_START,
    115 				    DRM_FILE_PAGE_OFFSET_SIZE);
    116 
    117 	return 0;
    118 }
    119 
    120 void
    121 drm_gem_destroy(struct drm_device *dev)
    122 {
    123 
    124 	drm_vma_offset_manager_destroy(dev->vma_offset_manager);
    125 	kfree(dev->vma_offset_manager);
    126 	dev->vma_offset_manager = NULL;
    127 }
    128 
    129 /**
    130  * drm_gem_object_init - initialize an allocated shmem-backed GEM object
    131  * @dev: drm_device the object should be initialized for
    132  * @obj: drm_gem_object to initialize
    133  * @size: object size
    134  *
    135  * Initialize an already allocated GEM object of the specified size with
    136  * shmfs backing store.
    137  */
    138 int drm_gem_object_init(struct drm_device *dev,
    139 			struct drm_gem_object *obj, size_t size)
    140 {
    141 	struct file *filp;
    142 
    143 	drm_gem_private_object_init(dev, obj, size);
    144 
    145 	filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
    146 	if (IS_ERR(filp))
    147 		return PTR_ERR(filp);
    148 
    149 	obj->filp = filp;
    150 
    151 	return 0;
    152 }
    153 EXPORT_SYMBOL(drm_gem_object_init);
    154 
    155 /**
    156  * drm_gem_private_object_init - initialize an allocated private GEM object
    157  * @dev: drm_device the object should be initialized for
    158  * @obj: drm_gem_object to initialize
    159  * @size: object size
    160  *
    161  * Initialize an already allocated GEM object of the specified size with
    162  * no GEM provided backing store. Instead the caller is responsible for
    163  * backing the object and handling it.
    164  */
    165 void drm_gem_private_object_init(struct drm_device *dev,
    166 				 struct drm_gem_object *obj, size_t size)
    167 {
    168 	BUG_ON((size & (PAGE_SIZE - 1)) != 0);
    169 
    170 	obj->dev = dev;
    171 	obj->filp = NULL;
    172 
    173 	kref_init(&obj->refcount);
    174 	obj->handle_count = 0;
    175 	obj->size = size;
    176 	drm_vma_node_reset(&obj->vma_node);
    177 }
    178 EXPORT_SYMBOL(drm_gem_private_object_init);
    179 
    180 static void
    181 drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
    182 {
    183 	/*
    184 	 * Note: obj->dma_buf can't disappear as long as we still hold a
    185 	 * handle reference in obj->handle_count.
    186 	 */
    187 	mutex_lock(&filp->prime.lock);
    188 	if (obj->dma_buf) {
    189 		drm_prime_remove_buf_handle_locked(&filp->prime,
    190 						   obj->dma_buf);
    191 	}
    192 	mutex_unlock(&filp->prime.lock);
    193 }
    194 
    195 /**
    196  * drm_gem_object_handle_free - release resources bound to userspace handles
    197  * @obj: GEM object to clean up.
    198  *
    199  * Called after the last handle to the object has been closed
    200  *
    201  * Removes any name for the object. Note that this must be
    202  * called before drm_gem_object_free or we'll be touching
    203  * freed memory
    204  */
    205 static void drm_gem_object_handle_free(struct drm_gem_object *obj)
    206 {
    207 	struct drm_device *dev = obj->dev;
    208 
    209 	/* Remove any name for this object */
    210 	if (obj->name) {
    211 		idr_remove(&dev->object_name_idr, obj->name);
    212 		obj->name = 0;
    213 	}
    214 }
    215 
    216 static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
    217 {
    218 	/* Unbreak the reference cycle if we have an exported dma_buf. */
    219 	if (obj->dma_buf) {
    220 		dma_buf_put(obj->dma_buf);
    221 		obj->dma_buf = NULL;
    222 	}
    223 }
    224 
    225 static void
    226 drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
    227 {
    228 	if (WARN_ON(obj->handle_count == 0))
    229 		return;
    230 
    231 	/*
    232 	* Must bump handle count first as this may be the last
    233 	* ref, in which case the object would disappear before we
    234 	* checked for a name
    235 	*/
    236 
    237 	mutex_lock(&obj->dev->object_name_lock);
    238 	if (--obj->handle_count == 0) {
    239 		drm_gem_object_handle_free(obj);
    240 		drm_gem_object_exported_dma_buf_free(obj);
    241 	}
    242 	mutex_unlock(&obj->dev->object_name_lock);
    243 
    244 	drm_gem_object_unreference_unlocked(obj);
    245 }
    246 
    247 /**
    248  * drm_gem_handle_delete - deletes the given file-private handle
    249  * @filp: drm file-private structure to use for the handle look up
    250  * @handle: userspace handle to delete
    251  *
    252  * Removes the GEM handle from the @filp lookup table and if this is the last
    253  * handle also cleans up linked resources like GEM names.
    254  */
    255 int
    256 drm_gem_handle_delete(struct drm_file *filp, u32 handle)
    257 {
    258 	struct drm_device *dev;
    259 	struct drm_gem_object *obj;
    260 
    261 	/* This is gross. The idr system doesn't let us try a delete and
    262 	 * return an error code.  It just spews if you fail at deleting.
    263 	 * So, we have to grab a lock around finding the object and then
    264 	 * doing the delete on it and dropping the refcount, or the user
    265 	 * could race us to double-decrement the refcount and cause a
    266 	 * use-after-free later.  Given the frequency of our handle lookups,
    267 	 * we may want to use ida for number allocation and a hash table
    268 	 * for the pointers, anyway.
    269 	 */
    270 	spin_lock(&filp->table_lock);
    271 
    272 	/* Check if we currently have a reference on the object */
    273 	obj = idr_find(&filp->object_idr, handle);
    274 	if (obj == NULL) {
    275 		spin_unlock(&filp->table_lock);
    276 		return -EINVAL;
    277 	}
    278 	dev = obj->dev;
    279 
    280 	/* Release reference and decrement refcount. */
    281 	idr_remove(&filp->object_idr, handle);
    282 	spin_unlock(&filp->table_lock);
    283 
    284 	if (drm_core_check_feature(dev, DRIVER_PRIME))
    285 		drm_gem_remove_prime_handles(obj, filp);
    286 	drm_vma_node_revoke(&obj->vma_node, filp->filp);
    287 
    288 	if (dev->driver->gem_close_object)
    289 		dev->driver->gem_close_object(obj, filp);
    290 	drm_gem_object_handle_unreference_unlocked(obj);
    291 
    292 	return 0;
    293 }
    294 EXPORT_SYMBOL(drm_gem_handle_delete);
    295 
    296 /**
    297  * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
    298  * @file: drm file-private structure to remove the dumb handle from
    299  * @dev: corresponding drm_device
    300  * @handle: the dumb handle to remove
    301  *
    302  * This implements the ->dumb_destroy kms driver callback for drivers which use
    303  * gem to manage their backing storage.
    304  */
    305 int drm_gem_dumb_destroy(struct drm_file *file,
    306 			 struct drm_device *dev,
    307 			 uint32_t handle)
    308 {
    309 	return drm_gem_handle_delete(file, handle);
    310 }
    311 EXPORT_SYMBOL(drm_gem_dumb_destroy);
    312 
    313 /**
    314  * drm_gem_handle_create_tail - internal functions to create a handle
    315  * @file_priv: drm file-private structure to register the handle for
    316  * @obj: object to register
    317  * @handlep: pointer to return the created handle to the caller
    318  *
    319  * This expects the dev->object_name_lock to be held already and will drop it
    320  * before returning. Used to avoid races in establishing new handles when
    321  * importing an object from either an flink name or a dma-buf.
    322  */
    323 int
    324 drm_gem_handle_create_tail(struct drm_file *file_priv,
    325 			   struct drm_gem_object *obj,
    326 			   u32 *handlep)
    327 {
    328 	struct drm_device *dev = obj->dev;
    329 	int ret;
    330 
    331 	WARN_ON(!mutex_is_locked(&dev->object_name_lock));
    332 
    333 	/*
    334 	 * Get the user-visible handle using idr.  Preload and perform
    335 	 * allocation under our spinlock.
    336 	 */
    337 	idr_preload(GFP_KERNEL);
    338 	spin_lock(&file_priv->table_lock);
    339 
    340 	ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
    341 	drm_gem_object_reference(obj);
    342 	obj->handle_count++;
    343 	spin_unlock(&file_priv->table_lock);
    344 	idr_preload_end();
    345 	mutex_unlock(&dev->object_name_lock);
    346 	if (ret < 0)
    347 		goto err_unref;
    348 
    349 	*handlep = ret;
    350 
    351 	ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp);
    352 	if (ret)
    353 		goto err_remove;
    354 
    355 	if (dev->driver->gem_open_object) {
    356 		ret = dev->driver->gem_open_object(obj, file_priv);
    357 		if (ret)
    358 			goto err_revoke;
    359 	}
    360 
    361 	return 0;
    362 
    363 err_revoke:
    364 	drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
    365 err_remove:
    366 	spin_lock(&file_priv->table_lock);
    367 	idr_remove(&file_priv->object_idr, *handlep);
    368 	spin_unlock(&file_priv->table_lock);
    369 err_unref:
    370 	drm_gem_object_handle_unreference_unlocked(obj);
    371 	return ret;
    372 }
    373 
    374 /**
    375  * drm_gem_handle_create - create a gem handle for an object
    376  * @file_priv: drm file-private structure to register the handle for
    377  * @obj: object to register
    378  * @handlep: pionter to return the created handle to the caller
    379  *
    380  * Create a handle for this object. This adds a handle reference
    381  * to the object, which includes a regular reference count. Callers
    382  * will likely want to dereference the object afterwards.
    383  */
    384 int drm_gem_handle_create(struct drm_file *file_priv,
    385 			  struct drm_gem_object *obj,
    386 			  u32 *handlep)
    387 {
    388 	mutex_lock(&obj->dev->object_name_lock);
    389 
    390 	return drm_gem_handle_create_tail(file_priv, obj, handlep);
    391 }
    392 EXPORT_SYMBOL(drm_gem_handle_create);
    393 
    394 
    395 /**
    396  * drm_gem_free_mmap_offset - release a fake mmap offset for an object
    397  * @obj: obj in question
    398  *
    399  * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
    400  */
    401 void
    402 drm_gem_free_mmap_offset(struct drm_gem_object *obj)
    403 {
    404 	struct drm_device *dev = obj->dev;
    405 
    406 	drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node);
    407 }
    408 EXPORT_SYMBOL(drm_gem_free_mmap_offset);
    409 
    410 /**
    411  * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
    412  * @obj: obj in question
    413  * @size: the virtual size
    414  *
    415  * GEM memory mapping works by handing back to userspace a fake mmap offset
    416  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
    417  * up the object based on the offset and sets up the various memory mapping
    418  * structures.
    419  *
    420  * This routine allocates and attaches a fake offset for @obj, in cases where
    421  * the virtual size differs from the physical size (ie. obj->size).  Otherwise
    422  * just use drm_gem_create_mmap_offset().
    423  */
    424 int
    425 drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
    426 {
    427 	struct drm_device *dev = obj->dev;
    428 
    429 	return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node,
    430 				  size / PAGE_SIZE);
    431 }
    432 EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
    433 
    434 /**
    435  * drm_gem_create_mmap_offset - create a fake mmap offset for an object
    436  * @obj: obj in question
    437  *
    438  * GEM memory mapping works by handing back to userspace a fake mmap offset
    439  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
    440  * up the object based on the offset and sets up the various memory mapping
    441  * structures.
    442  *
    443  * This routine allocates and attaches a fake offset for @obj.
    444  */
    445 int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
    446 {
    447 	return drm_gem_create_mmap_offset_size(obj, obj->size);
    448 }
    449 EXPORT_SYMBOL(drm_gem_create_mmap_offset);
    450 
    451 /**
    452  * drm_gem_get_pages - helper to allocate backing pages for a GEM object
    453  * from shmem
    454  * @obj: obj in question
    455  *
    456  * This reads the page-array of the shmem-backing storage of the given gem
    457  * object. An array of pages is returned. If a page is not allocated or
    458  * swapped-out, this will allocate/swap-in the required pages. Note that the
    459  * whole object is covered by the page-array and pinned in memory.
    460  *
    461  * Use drm_gem_put_pages() to release the array and unpin all pages.
    462  *
    463  * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()).
    464  * If you require other GFP-masks, you have to do those allocations yourself.
    465  *
    466  * Note that you are not allowed to change gfp-zones during runtime. That is,
    467  * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as
    468  * set during initialization. If you have special zone constraints, set them
    469  * after drm_gem_init_object() via mapping_set_gfp_mask(). shmem-core takes care
    470  * to keep pages in the required zone during swap-in.
    471  */
    472 struct page **drm_gem_get_pages(struct drm_gem_object *obj)
    473 {
    474 	struct address_space *mapping;
    475 	struct page *p, **pages;
    476 	int i, npages;
    477 
    478 	/* This is the shared memory object that backs the GEM resource */
    479 	mapping = file_inode(obj->filp)->i_mapping;
    480 
    481 	/* We already BUG_ON() for non-page-aligned sizes in
    482 	 * drm_gem_object_init(), so we should never hit this unless
    483 	 * driver author is doing something really wrong:
    484 	 */
    485 	WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
    486 
    487 	npages = obj->size >> PAGE_SHIFT;
    488 
    489 	pages = drm_malloc_ab(npages, sizeof(struct page *));
    490 	if (pages == NULL)
    491 		return ERR_PTR(-ENOMEM);
    492 
    493 	for (i = 0; i < npages; i++) {
    494 		p = shmem_read_mapping_page(mapping, i);
    495 		if (IS_ERR(p))
    496 			goto fail;
    497 		pages[i] = p;
    498 
    499 		/* Make sure shmem keeps __GFP_DMA32 allocated pages in the
    500 		 * correct region during swapin. Note that this requires
    501 		 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
    502 		 * so shmem can relocate pages during swapin if required.
    503 		 */
    504 		BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) &&
    505 				(page_to_pfn(p) >= 0x00100000UL));
    506 	}
    507 
    508 	return pages;
    509 
    510 fail:
    511 	while (i--)
    512 		page_cache_release(pages[i]);
    513 
    514 	drm_free_large(pages);
    515 	return ERR_CAST(p);
    516 }
    517 EXPORT_SYMBOL(drm_gem_get_pages);
    518 
    519 /**
    520  * drm_gem_put_pages - helper to free backing pages for a GEM object
    521  * @obj: obj in question
    522  * @pages: pages to free
    523  * @dirty: if true, pages will be marked as dirty
    524  * @accessed: if true, the pages will be marked as accessed
    525  */
    526 void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
    527 		bool dirty, bool accessed)
    528 {
    529 	int i, npages;
    530 
    531 	/* We already BUG_ON() for non-page-aligned sizes in
    532 	 * drm_gem_object_init(), so we should never hit this unless
    533 	 * driver author is doing something really wrong:
    534 	 */
    535 	WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
    536 
    537 	npages = obj->size >> PAGE_SHIFT;
    538 
    539 	for (i = 0; i < npages; i++) {
    540 		if (dirty)
    541 			set_page_dirty(pages[i]);
    542 
    543 		if (accessed)
    544 			mark_page_accessed(pages[i]);
    545 
    546 		/* Undo the reference we took when populating the table */
    547 		page_cache_release(pages[i]);
    548 	}
    549 
    550 	drm_free_large(pages);
    551 }
    552 EXPORT_SYMBOL(drm_gem_put_pages);
    553 
    554 /** Returns a reference to the object named by the handle. */
    555 struct drm_gem_object *
    556 drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
    557 		      u32 handle)
    558 {
    559 	struct drm_gem_object *obj;
    560 
    561 	spin_lock(&filp->table_lock);
    562 
    563 	/* Check if we currently have a reference on the object */
    564 	obj = idr_find(&filp->object_idr, handle);
    565 	if (obj == NULL) {
    566 		spin_unlock(&filp->table_lock);
    567 		return NULL;
    568 	}
    569 
    570 	drm_gem_object_reference(obj);
    571 
    572 	spin_unlock(&filp->table_lock);
    573 
    574 	return obj;
    575 }
    576 EXPORT_SYMBOL(drm_gem_object_lookup);
    577 
    578 /**
    579  * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
    580  * @dev: drm_device
    581  * @data: ioctl data
    582  * @file_priv: drm file-private structure
    583  *
    584  * Releases the handle to an mm object.
    585  */
    586 int
    587 drm_gem_close_ioctl(struct drm_device *dev, void *data,
    588 		    struct drm_file *file_priv)
    589 {
    590 	struct drm_gem_close *args = data;
    591 	int ret;
    592 
    593 	if (!drm_core_check_feature(dev, DRIVER_GEM))
    594 		return -ENODEV;
    595 
    596 	ret = drm_gem_handle_delete(file_priv, args->handle);
    597 
    598 	return ret;
    599 }
    600 
    601 /**
    602  * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl
    603  * @dev: drm_device
    604  * @data: ioctl data
    605  * @file_priv: drm file-private structure
    606  *
    607  * Create a global name for an object, returning the name.
    608  *
    609  * Note that the name does not hold a reference; when the object
    610  * is freed, the name goes away.
    611  */
    612 int
    613 drm_gem_flink_ioctl(struct drm_device *dev, void *data,
    614 		    struct drm_file *file_priv)
    615 {
    616 	struct drm_gem_flink *args = data;
    617 	struct drm_gem_object *obj;
    618 	int ret;
    619 
    620 	if (!drm_core_check_feature(dev, DRIVER_GEM))
    621 		return -ENODEV;
    622 
    623 	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
    624 	if (obj == NULL)
    625 		return -ENOENT;
    626 
    627 	mutex_lock(&dev->object_name_lock);
    628 	idr_preload(GFP_KERNEL);
    629 	/* prevent races with concurrent gem_close. */
    630 	if (obj->handle_count == 0) {
    631 		ret = -ENOENT;
    632 		goto err;
    633 	}
    634 
    635 	if (!obj->name) {
    636 		ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT);
    637 		if (ret < 0)
    638 			goto err;
    639 
    640 		obj->name = ret;
    641 	}
    642 
    643 	args->name = (uint64_t) obj->name;
    644 	ret = 0;
    645 
    646 err:
    647 	idr_preload_end();
    648 	mutex_unlock(&dev->object_name_lock);
    649 	drm_gem_object_unreference_unlocked(obj);
    650 	return ret;
    651 }
    652 
    653 /**
    654  * drm_gem_open - implementation of the GEM_OPEN ioctl
    655  * @dev: drm_device
    656  * @data: ioctl data
    657  * @file_priv: drm file-private structure
    658  *
    659  * Open an object using the global name, returning a handle and the size.
    660  *
    661  * This handle (of course) holds a reference to the object, so the object
    662  * will not go away until the handle is deleted.
    663  */
    664 int
    665 drm_gem_open_ioctl(struct drm_device *dev, void *data,
    666 		   struct drm_file *file_priv)
    667 {
    668 	struct drm_gem_open *args = data;
    669 	struct drm_gem_object *obj;
    670 	int ret;
    671 	u32 handle;
    672 
    673 	if (!drm_core_check_feature(dev, DRIVER_GEM))
    674 		return -ENODEV;
    675 
    676 	mutex_lock(&dev->object_name_lock);
    677 	obj = idr_find(&dev->object_name_idr, (int) args->name);
    678 	if (obj) {
    679 		drm_gem_object_reference(obj);
    680 	} else {
    681 		mutex_unlock(&dev->object_name_lock);
    682 		return -ENOENT;
    683 	}
    684 
    685 	/* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
    686 	ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
    687 	drm_gem_object_unreference_unlocked(obj);
    688 	if (ret)
    689 		return ret;
    690 
    691 	args->handle = handle;
    692 	args->size = obj->size;
    693 
    694 	return 0;
    695 }
    696 
    697 /**
    698  * gem_gem_open - initalizes GEM file-private structures at devnode open time
    699  * @dev: drm_device which is being opened by userspace
    700  * @file_private: drm file-private structure to set up
    701  *
    702  * Called at device open time, sets up the structure for handling refcounting
    703  * of mm objects.
    704  */
    705 void
    706 drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
    707 {
    708 	idr_init(&file_private->object_idr);
    709 	spin_lock_init(&file_private->table_lock);
    710 }
    711 
    712 /*
    713  * Called at device close to release the file's
    714  * handle references on objects.
    715  */
    716 static int
    717 drm_gem_object_release_handle(int id, void *ptr, void *data)
    718 {
    719 	struct drm_file *file_priv = data;
    720 	struct drm_gem_object *obj = ptr;
    721 	struct drm_device *dev = obj->dev;
    722 
    723 	if (dev->driver->gem_close_object)
    724 		dev->driver->gem_close_object(obj, file_priv);
    725 
    726 	if (drm_core_check_feature(dev, DRIVER_PRIME))
    727 		drm_gem_remove_prime_handles(obj, file_priv);
    728 	drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
    729 
    730 	drm_gem_object_handle_unreference_unlocked(obj);
    731 
    732 	return 0;
    733 }
    734 
    735 /**
    736  * drm_gem_release - release file-private GEM resources
    737  * @dev: drm_device which is being closed by userspace
    738  * @file_private: drm file-private structure to clean up
    739  *
    740  * Called at close time when the filp is going away.
    741  *
    742  * Releases any remaining references on objects by this filp.
    743  */
    744 void
    745 drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
    746 {
    747 	idr_for_each(&file_private->object_idr,
    748 		     &drm_gem_object_release_handle, file_private);
    749 	idr_destroy(&file_private->object_idr);
    750 }
    751 
    752 void
    753 drm_gem_object_release(struct drm_gem_object *obj)
    754 {
    755 	WARN_ON(obj->dma_buf);
    756 
    757 	if (obj->filp)
    758 		fput(obj->filp);
    759 
    760 	drm_gem_free_mmap_offset(obj);
    761 }
    762 EXPORT_SYMBOL(drm_gem_object_release);
    763 
    764 /**
    765  * drm_gem_object_free - free a GEM object
    766  * @kref: kref of the object to free
    767  *
    768  * Called after the last reference to the object has been lost.
    769  * Must be called holding struct_ mutex
    770  *
    771  * Frees the object
    772  */
    773 void
    774 drm_gem_object_free(struct kref *kref)
    775 {
    776 	struct drm_gem_object *obj =
    777 		container_of(kref, struct drm_gem_object, refcount);
    778 	struct drm_device *dev = obj->dev;
    779 
    780 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
    781 
    782 	if (dev->driver->gem_free_object != NULL)
    783 		dev->driver->gem_free_object(obj);
    784 }
    785 EXPORT_SYMBOL(drm_gem_object_free);
    786 
    787 void drm_gem_vm_open(struct vm_area_struct *vma)
    788 {
    789 	struct drm_gem_object *obj = vma->vm_private_data;
    790 
    791 	drm_gem_object_reference(obj);
    792 }
    793 EXPORT_SYMBOL(drm_gem_vm_open);
    794 
    795 void drm_gem_vm_close(struct vm_area_struct *vma)
    796 {
    797 	struct drm_gem_object *obj = vma->vm_private_data;
    798 
    799 	drm_gem_object_unreference_unlocked(obj);
    800 }
    801 EXPORT_SYMBOL(drm_gem_vm_close);
    802 
    803 /**
    804  * drm_gem_mmap_obj - memory map a GEM object
    805  * @obj: the GEM object to map
    806  * @obj_size: the object size to be mapped, in bytes
    807  * @vma: VMA for the area to be mapped
    808  *
    809  * Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops
    810  * provided by the driver. Depending on their requirements, drivers can either
    811  * provide a fault handler in their gem_vm_ops (in which case any accesses to
    812  * the object will be trapped, to perform migration, GTT binding, surface
    813  * register allocation, or performance monitoring), or mmap the buffer memory
    814  * synchronously after calling drm_gem_mmap_obj.
    815  *
    816  * This function is mainly intended to implement the DMABUF mmap operation, when
    817  * the GEM object is not looked up based on its fake offset. To implement the
    818  * DRM mmap operation, drivers should use the drm_gem_mmap() function.
    819  *
    820  * drm_gem_mmap_obj() assumes the user is granted access to the buffer while
    821  * drm_gem_mmap() prevents unprivileged users from mapping random objects. So
    822  * callers must verify access restrictions before calling this helper.
    823  *
    824  * Return 0 or success or -EINVAL if the object size is smaller than the VMA
    825  * size, or if no gem_vm_ops are provided.
    826  */
    827 int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
    828 		     struct vm_area_struct *vma)
    829 {
    830 	struct drm_device *dev = obj->dev;
    831 
    832 	/* Check for valid size. */
    833 	if (obj_size < vma->vm_end - vma->vm_start)
    834 		return -EINVAL;
    835 
    836 	if (!dev->driver->gem_vm_ops)
    837 		return -EINVAL;
    838 
    839 	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
    840 	vma->vm_ops = dev->driver->gem_vm_ops;
    841 	vma->vm_private_data = obj;
    842 	vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
    843 
    844 	/* Take a ref for this mapping of the object, so that the fault
    845 	 * handler can dereference the mmap offset's pointer to the object.
    846 	 * This reference is cleaned up by the corresponding vm_close
    847 	 * (which should happen whether the vma was created by this call, or
    848 	 * by a vm_open due to mremap or partial unmap or whatever).
    849 	 */
    850 	drm_gem_object_reference(obj);
    851 
    852 	return 0;
    853 }
    854 EXPORT_SYMBOL(drm_gem_mmap_obj);
    855 
    856 /**
    857  * drm_gem_mmap - memory map routine for GEM objects
    858  * @filp: DRM file pointer
    859  * @vma: VMA for the area to be mapped
    860  *
    861  * If a driver supports GEM object mapping, mmap calls on the DRM file
    862  * descriptor will end up here.
    863  *
    864  * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
    865  * contain the fake offset we created when the GTT map ioctl was called on
    866  * the object) and map it with a call to drm_gem_mmap_obj().
    867  *
    868  * If the caller is not granted access to the buffer object, the mmap will fail
    869  * with EACCES. Please see the vma manager for more information.
    870  */
    871 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
    872 {
    873 	struct drm_file *priv = filp->private_data;
    874 	struct drm_device *dev = priv->minor->dev;
    875 	struct drm_gem_object *obj = NULL;
    876 	struct drm_vma_offset_node *node;
    877 	int ret;
    878 
    879 	if (drm_device_is_unplugged(dev))
    880 		return -ENODEV;
    881 
    882 	drm_vma_offset_lock_lookup(dev->vma_offset_manager);
    883 	node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
    884 						  vma->vm_pgoff,
    885 						  vma_pages(vma));
    886 	if (likely(node)) {
    887 		obj = container_of(node, struct drm_gem_object, vma_node);
    888 		/*
    889 		 * When the object is being freed, after it hits 0-refcnt it
    890 		 * proceeds to tear down the object. In the process it will
    891 		 * attempt to remove the VMA offset and so acquire this
    892 		 * mgr->vm_lock.  Therefore if we find an object with a 0-refcnt
    893 		 * that matches our range, we know it is in the process of being
    894 		 * destroyed and will be freed as soon as we release the lock -
    895 		 * so we have to check for the 0-refcnted object and treat it as
    896 		 * invalid.
    897 		 */
    898 		if (!kref_get_unless_zero(&obj->refcount))
    899 			obj = NULL;
    900 	}
    901 	drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
    902 
    903 	if (!obj)
    904 		return -EINVAL;
    905 
    906 	if (!drm_vma_node_is_allowed(node, filp)) {
    907 		drm_gem_object_unreference_unlocked(obj);
    908 		return -EACCES;
    909 	}
    910 
    911 	ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
    912 			       vma);
    913 
    914 	drm_gem_object_unreference_unlocked(obj);
    915 
    916 	return ret;
    917 }
    918 EXPORT_SYMBOL(drm_gem_mmap);
    919