Home | History | Annotate | Line # | Download | only in vgem
      1 /*	$NetBSD: vgem_drv.c,v 1.3 2021/12/18 23:45:44 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright 2011 Red Hat, Inc.
      5  * Copyright  2014 The Chromium OS Authors
      6  *
      7  * Permission is hereby granted, free of charge, to any person obtaining a
      8  * copy of this software and associated documentation files (the "Software")
      9  * to deal in the software without restriction, including without limitation
     10  * on the rights to use, copy, modify, merge, publish, distribute, sub
     11  * license, and/or sell copies of the Software, and to permit persons to whom
     12  * them Software is furnished to do so, subject to the following conditions:
     13  *
     14  * The above copyright notice and this permission notice (including the next
     15  * paragraph) shall be included in all copies or substantial portions of the
     16  * Software.
     17  *
     18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTIBILITY,
     20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
     21  * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER
     22  * IN AN ACTION OF CONTRACT, TORT, OR OTHERWISE, ARISING FROM, OUT OF OR IN
     23  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
     24  *
     25  * Authors:
     26  *	Adam Jackson <ajax (at) redhat.com>
     27  *	Ben Widawsky <ben (at) bwidawsk.net>
     28  */
     29 
     30 /**
     31  * This is vgem, a (non-hardware-backed) GEM service.  This is used by Mesa's
     32  * software renderer and the X server for efficient buffer sharing.
     33  */
     34 
     35 #include <sys/cdefs.h>
     36 __KERNEL_RCSID(0, "$NetBSD: vgem_drv.c,v 1.3 2021/12/18 23:45:44 riastradh Exp $");
     37 
     38 #include <linux/dma-buf.h>
     39 #include <linux/module.h>
     40 #include <linux/platform_device.h>
     41 #include <linux/shmem_fs.h>
     42 #include <linux/vmalloc.h>
     43 
     44 #include <drm/drm_drv.h>
     45 #include <drm/drm_file.h>
     46 #include <drm/drm_ioctl.h>
     47 #include <drm/drm_prime.h>
     48 
     49 #include "vgem_drv.h"
     50 
     51 #define DRIVER_NAME	"vgem"
     52 #define DRIVER_DESC	"Virtual GEM provider"
     53 #define DRIVER_DATE	"20120112"
     54 #define DRIVER_MAJOR	1
     55 #define DRIVER_MINOR	0
     56 
     57 static struct vgem_device {
     58 	struct drm_device drm;
     59 	struct platform_device *platform;
     60 } *vgem_device;
     61 
     62 static void vgem_gem_free_object(struct drm_gem_object *obj)
     63 {
     64 	struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj);
     65 
     66 	kvfree(vgem_obj->pages);
     67 	mutex_destroy(&vgem_obj->pages_lock);
     68 
     69 	if (obj->import_attach)
     70 		drm_prime_gem_destroy(obj, vgem_obj->table);
     71 
     72 	drm_gem_object_release(obj);
     73 	kfree(vgem_obj);
     74 }
     75 
     76 static vm_fault_t vgem_gem_fault(struct vm_fault *vmf)
     77 {
     78 	struct vm_area_struct *vma = vmf->vma;
     79 	struct drm_vgem_gem_object *obj = vma->vm_private_data;
     80 	/* We don't use vmf->pgoff since that has the fake offset */
     81 	unsigned long vaddr = vmf->address;
     82 	vm_fault_t ret = VM_FAULT_SIGBUS;
     83 	loff_t num_pages;
     84 	pgoff_t page_offset;
     85 	page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT;
     86 
     87 	num_pages = DIV_ROUND_UP(obj->base.size, PAGE_SIZE);
     88 
     89 	if (page_offset >= num_pages)
     90 		return VM_FAULT_SIGBUS;
     91 
     92 	mutex_lock(&obj->pages_lock);
     93 	if (obj->pages) {
     94 		get_page(obj->pages[page_offset]);
     95 		vmf->page = obj->pages[page_offset];
     96 		ret = 0;
     97 	}
     98 	mutex_unlock(&obj->pages_lock);
     99 	if (ret) {
    100 		struct page *page;
    101 
    102 		page = shmem_read_mapping_page(
    103 					file_inode(obj->base.filp)->i_mapping,
    104 					page_offset);
    105 		if (!IS_ERR(page)) {
    106 			vmf->page = page;
    107 			ret = 0;
    108 		} else switch (PTR_ERR(page)) {
    109 			case -ENOSPC:
    110 			case -ENOMEM:
    111 				ret = VM_FAULT_OOM;
    112 				break;
    113 			case -EBUSY:
    114 				ret = VM_FAULT_RETRY;
    115 				break;
    116 			case -EFAULT:
    117 			case -EINVAL:
    118 				ret = VM_FAULT_SIGBUS;
    119 				break;
    120 			default:
    121 				WARN_ON(PTR_ERR(page));
    122 				ret = VM_FAULT_SIGBUS;
    123 				break;
    124 		}
    125 
    126 	}
    127 	return ret;
    128 }
    129 
    130 static const struct vm_operations_struct vgem_gem_vm_ops = {
    131 	.fault = vgem_gem_fault,
    132 	.open = drm_gem_vm_open,
    133 	.close = drm_gem_vm_close,
    134 };
    135 
    136 static int vgem_open(struct drm_device *dev, struct drm_file *file)
    137 {
    138 	struct vgem_file *vfile;
    139 	int ret;
    140 
    141 	vfile = kzalloc(sizeof(*vfile), GFP_KERNEL);
    142 	if (!vfile)
    143 		return -ENOMEM;
    144 
    145 	file->driver_priv = vfile;
    146 
    147 	ret = vgem_fence_open(vfile);
    148 	if (ret) {
    149 		kfree(vfile);
    150 		return ret;
    151 	}
    152 
    153 	return 0;
    154 }
    155 
    156 static void vgem_postclose(struct drm_device *dev, struct drm_file *file)
    157 {
    158 	struct vgem_file *vfile = file->driver_priv;
    159 
    160 	vgem_fence_close(vfile);
    161 	kfree(vfile);
    162 }
    163 
    164 static struct drm_vgem_gem_object *__vgem_gem_create(struct drm_device *dev,
    165 						unsigned long size)
    166 {
    167 	struct drm_vgem_gem_object *obj;
    168 	int ret;
    169 
    170 	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
    171 	if (!obj)
    172 		return ERR_PTR(-ENOMEM);
    173 
    174 	ret = drm_gem_object_init(dev, &obj->base, roundup(size, PAGE_SIZE));
    175 	if (ret) {
    176 		kfree(obj);
    177 		return ERR_PTR(ret);
    178 	}
    179 
    180 	mutex_init(&obj->pages_lock);
    181 
    182 	return obj;
    183 }
    184 
    185 static void __vgem_gem_destroy(struct drm_vgem_gem_object *obj)
    186 {
    187 	drm_gem_object_release(&obj->base);
    188 	kfree(obj);
    189 }
    190 
    191 static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
    192 					      struct drm_file *file,
    193 					      unsigned int *handle,
    194 					      unsigned long size)
    195 {
    196 	struct drm_vgem_gem_object *obj;
    197 	int ret;
    198 
    199 	obj = __vgem_gem_create(dev, size);
    200 	if (IS_ERR(obj))
    201 		return ERR_CAST(obj);
    202 
    203 	ret = drm_gem_handle_create(file, &obj->base, handle);
    204 	if (ret) {
    205 		drm_gem_object_put_unlocked(&obj->base);
    206 		return ERR_PTR(ret);
    207 	}
    208 
    209 	return &obj->base;
    210 }
    211 
    212 static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
    213 				struct drm_mode_create_dumb *args)
    214 {
    215 	struct drm_gem_object *gem_object;
    216 	u64 pitch, size;
    217 
    218 	pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
    219 	size = args->height * pitch;
    220 	if (size == 0)
    221 		return -EINVAL;
    222 
    223 	gem_object = vgem_gem_create(dev, file, &args->handle, size);
    224 	if (IS_ERR(gem_object))
    225 		return PTR_ERR(gem_object);
    226 
    227 	args->size = gem_object->size;
    228 	args->pitch = pitch;
    229 
    230 	drm_gem_object_put_unlocked(gem_object);
    231 
    232 	DRM_DEBUG("Created object of size %llu\n", args->size);
    233 
    234 	return 0;
    235 }
    236 
    237 static int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev,
    238 			     uint32_t handle, uint64_t *offset)
    239 {
    240 	struct drm_gem_object *obj;
    241 	int ret;
    242 
    243 	obj = drm_gem_object_lookup(file, handle);
    244 	if (!obj)
    245 		return -ENOENT;
    246 
    247 	if (!obj->filp) {
    248 		ret = -EINVAL;
    249 		goto unref;
    250 	}
    251 
    252 	ret = drm_gem_create_mmap_offset(obj);
    253 	if (ret)
    254 		goto unref;
    255 
    256 	*offset = drm_vma_node_offset_addr(&obj->vma_node);
    257 unref:
    258 	drm_gem_object_put_unlocked(obj);
    259 
    260 	return ret;
    261 }
    262 
    263 static struct drm_ioctl_desc vgem_ioctls[] = {
    264 	DRM_IOCTL_DEF_DRV(VGEM_FENCE_ATTACH, vgem_fence_attach_ioctl, DRM_RENDER_ALLOW),
    265 	DRM_IOCTL_DEF_DRV(VGEM_FENCE_SIGNAL, vgem_fence_signal_ioctl, DRM_RENDER_ALLOW),
    266 };
    267 
    268 static int vgem_mmap(struct file *filp, struct vm_area_struct *vma)
    269 {
    270 	unsigned long flags = vma->vm_flags;
    271 	int ret;
    272 
    273 	ret = drm_gem_mmap(filp, vma);
    274 	if (ret)
    275 		return ret;
    276 
    277 	/* Keep the WC mmaping set by drm_gem_mmap() but our pages
    278 	 * are ordinary and not special.
    279 	 */
    280 	vma->vm_flags = flags | VM_DONTEXPAND | VM_DONTDUMP;
    281 	return 0;
    282 }
    283 
    284 static const struct file_operations vgem_driver_fops = {
    285 	.owner		= THIS_MODULE,
    286 	.open		= drm_open,
    287 	.mmap		= vgem_mmap,
    288 	.poll		= drm_poll,
    289 	.read		= drm_read,
    290 	.unlocked_ioctl = drm_ioctl,
    291 	.compat_ioctl	= drm_compat_ioctl,
    292 	.release	= drm_release,
    293 };
    294 
    295 static struct page **vgem_pin_pages(struct drm_vgem_gem_object *bo)
    296 {
    297 	mutex_lock(&bo->pages_lock);
    298 	if (bo->pages_pin_count++ == 0) {
    299 		struct page **pages;
    300 
    301 		pages = drm_gem_get_pages(&bo->base);
    302 		if (IS_ERR(pages)) {
    303 			bo->pages_pin_count--;
    304 			mutex_unlock(&bo->pages_lock);
    305 			return pages;
    306 		}
    307 
    308 		bo->pages = pages;
    309 	}
    310 	mutex_unlock(&bo->pages_lock);
    311 
    312 	return bo->pages;
    313 }
    314 
    315 static void vgem_unpin_pages(struct drm_vgem_gem_object *bo)
    316 {
    317 	mutex_lock(&bo->pages_lock);
    318 	if (--bo->pages_pin_count == 0) {
    319 		drm_gem_put_pages(&bo->base, bo->pages, true, true);
    320 		bo->pages = NULL;
    321 	}
    322 	mutex_unlock(&bo->pages_lock);
    323 }
    324 
    325 static int vgem_prime_pin(struct drm_gem_object *obj)
    326 {
    327 	struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
    328 	long n_pages = obj->size >> PAGE_SHIFT;
    329 	struct page **pages;
    330 
    331 	pages = vgem_pin_pages(bo);
    332 	if (IS_ERR(pages))
    333 		return PTR_ERR(pages);
    334 
    335 	/* Flush the object from the CPU cache so that importers can rely
    336 	 * on coherent indirect access via the exported dma-address.
    337 	 */
    338 	drm_clflush_pages(pages, n_pages);
    339 
    340 	return 0;
    341 }
    342 
    343 static void vgem_prime_unpin(struct drm_gem_object *obj)
    344 {
    345 	struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
    346 
    347 	vgem_unpin_pages(bo);
    348 }
    349 
    350 static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj)
    351 {
    352 	struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
    353 
    354 	return drm_prime_pages_to_sg(bo->pages, bo->base.size >> PAGE_SHIFT);
    355 }
    356 
    357 static struct drm_gem_object* vgem_prime_import(struct drm_device *dev,
    358 						struct dma_buf *dma_buf)
    359 {
    360 	struct vgem_device *vgem = container_of(dev, typeof(*vgem), drm);
    361 
    362 	return drm_gem_prime_import_dev(dev, dma_buf, &vgem->platform->dev);
    363 }
    364 
    365 static struct drm_gem_object *vgem_prime_import_sg_table(struct drm_device *dev,
    366 			struct dma_buf_attachment *attach, struct sg_table *sg)
    367 {
    368 	struct drm_vgem_gem_object *obj;
    369 	int npages;
    370 
    371 	obj = __vgem_gem_create(dev, attach->dmabuf->size);
    372 	if (IS_ERR(obj))
    373 		return ERR_CAST(obj);
    374 
    375 	npages = PAGE_ALIGN(attach->dmabuf->size) / PAGE_SIZE;
    376 
    377 	obj->table = sg;
    378 	obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
    379 	if (!obj->pages) {
    380 		__vgem_gem_destroy(obj);
    381 		return ERR_PTR(-ENOMEM);
    382 	}
    383 
    384 	obj->pages_pin_count++; /* perma-pinned */
    385 	drm_prime_sg_to_page_addr_arrays(obj->table, obj->pages, NULL,
    386 					npages);
    387 	return &obj->base;
    388 }
    389 
    390 static void *vgem_prime_vmap(struct drm_gem_object *obj)
    391 {
    392 	struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
    393 	long n_pages = obj->size >> PAGE_SHIFT;
    394 	struct page **pages;
    395 
    396 	pages = vgem_pin_pages(bo);
    397 	if (IS_ERR(pages))
    398 		return NULL;
    399 
    400 	return vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL));
    401 }
    402 
    403 static void vgem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
    404 {
    405 	struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
    406 
    407 	vunmap(vaddr);
    408 	vgem_unpin_pages(bo);
    409 }
    410 
    411 static int vgem_prime_mmap(struct drm_gem_object *obj,
    412 			   struct vm_area_struct *vma)
    413 {
    414 	int ret;
    415 
    416 	if (obj->size < vma->vm_end - vma->vm_start)
    417 		return -EINVAL;
    418 
    419 	if (!obj->filp)
    420 		return -ENODEV;
    421 
    422 	ret = call_mmap(obj->filp, vma);
    423 	if (ret)
    424 		return ret;
    425 
    426 	fput(vma->vm_file);
    427 	vma->vm_file = get_file(obj->filp);
    428 	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
    429 	vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
    430 
    431 	return 0;
    432 }
    433 
    434 static void vgem_release(struct drm_device *dev)
    435 {
    436 	struct vgem_device *vgem = container_of(dev, typeof(*vgem), drm);
    437 
    438 	platform_device_unregister(vgem->platform);
    439 	drm_dev_fini(&vgem->drm);
    440 
    441 	kfree(vgem);
    442 }
    443 
    444 static struct drm_driver vgem_driver = {
    445 	.driver_features		= DRIVER_GEM | DRIVER_RENDER,
    446 	.release			= vgem_release,
    447 	.open				= vgem_open,
    448 	.postclose			= vgem_postclose,
    449 	.gem_free_object_unlocked	= vgem_gem_free_object,
    450 	.gem_vm_ops			= &vgem_gem_vm_ops,
    451 	.ioctls				= vgem_ioctls,
    452 	.num_ioctls 			= ARRAY_SIZE(vgem_ioctls),
    453 	.fops				= &vgem_driver_fops,
    454 
    455 	.dumb_create			= vgem_gem_dumb_create,
    456 	.dumb_map_offset		= vgem_gem_dumb_map,
    457 
    458 	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
    459 	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
    460 	.gem_prime_pin = vgem_prime_pin,
    461 	.gem_prime_unpin = vgem_prime_unpin,
    462 	.gem_prime_import = vgem_prime_import,
    463 	.gem_prime_import_sg_table = vgem_prime_import_sg_table,
    464 	.gem_prime_get_sg_table = vgem_prime_get_sg_table,
    465 	.gem_prime_vmap = vgem_prime_vmap,
    466 	.gem_prime_vunmap = vgem_prime_vunmap,
    467 	.gem_prime_mmap = vgem_prime_mmap,
    468 
    469 	.name	= DRIVER_NAME,
    470 	.desc	= DRIVER_DESC,
    471 	.date	= DRIVER_DATE,
    472 	.major	= DRIVER_MAJOR,
    473 	.minor	= DRIVER_MINOR,
    474 };
    475 
    476 static int __init vgem_init(void)
    477 {
    478 	int ret;
    479 
    480 	vgem_device = kzalloc(sizeof(*vgem_device), GFP_KERNEL);
    481 	if (!vgem_device)
    482 		return -ENOMEM;
    483 
    484 	vgem_device->platform =
    485 		platform_device_register_simple("vgem", -1, NULL, 0);
    486 	if (IS_ERR(vgem_device->platform)) {
    487 		ret = PTR_ERR(vgem_device->platform);
    488 		goto out_free;
    489 	}
    490 
    491 	dma_coerce_mask_and_coherent(&vgem_device->platform->dev,
    492 				     DMA_BIT_MASK(64));
    493 	ret = drm_dev_init(&vgem_device->drm, &vgem_driver,
    494 			   &vgem_device->platform->dev);
    495 	if (ret)
    496 		goto out_unregister;
    497 
    498 	/* Final step: expose the device/driver to userspace */
    499 	ret  = drm_dev_register(&vgem_device->drm, 0);
    500 	if (ret)
    501 		goto out_fini;
    502 
    503 	return 0;
    504 
    505 out_fini:
    506 	drm_dev_fini(&vgem_device->drm);
    507 out_unregister:
    508 	platform_device_unregister(vgem_device->platform);
    509 out_free:
    510 	kfree(vgem_device);
    511 	return ret;
    512 }
    513 
    514 static void __exit vgem_exit(void)
    515 {
    516 	drm_dev_unregister(&vgem_device->drm);
    517 	drm_dev_put(&vgem_device->drm);
    518 }
    519 
    520 module_init(vgem_init);
    521 module_exit(vgem_exit);
    522 
    523 MODULE_AUTHOR("Red Hat, Inc.");
    524 MODULE_AUTHOR("Intel Corporation");
    525 MODULE_DESCRIPTION(DRIVER_DESC);
    526 MODULE_LICENSE("GPL and additional rights");
    527