Home | History | Annotate | Line # | Download | only in drm
      1 /*	$NetBSD: drm_vm.c,v 1.3 2021/12/18 23:44:57 riastradh Exp $	*/
      2 
      3 /*
      4  * \file drm_vm.c
      5  * Memory mapping for DRM
      6  *
      7  * \author Rickard E. (Rik) Faith <faith (at) valinux.com>
      8  * \author Gareth Hughes <gareth (at) valinux.com>
      9  */
     10 
     11 /*
     12  * Created: Mon Jan  4 08:58:31 1999 by faith (at) valinux.com
     13  *
     14  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
     15  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
     16  * All Rights Reserved.
     17  *
     18  * Permission is hereby granted, free of charge, to any person obtaining a
     19  * copy of this software and associated documentation files (the "Software"),
     20  * to deal in the Software without restriction, including without limitation
     21  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     22  * and/or sell copies of the Software, and to permit persons to whom the
     23  * Software is furnished to do so, subject to the following conditions:
     24  *
     25  * The above copyright notice and this permission notice (including the next
     26  * paragraph) shall be included in all copies or substantial portions of the
     27  * Software.
     28  *
     29  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     30  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     31  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     32  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
     33  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     34  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     35  * OTHER DEALINGS IN THE SOFTWARE.
     36  */
     37 
     38 #include <sys/cdefs.h>
     39 __KERNEL_RCSID(0, "$NetBSD: drm_vm.c,v 1.3 2021/12/18 23:44:57 riastradh Exp $");
     40 
     41 #include <linux/export.h>
     42 #include <linux/pci.h>
     43 #include <linux/seq_file.h>
     44 #include <linux/vmalloc.h>
     45 
     46 #if defined(__ia64__)
     47 #include <linux/efi.h>
     48 #include <linux/slab.h>
     49 #endif
     50 #include <linux/mem_encrypt.h>
     51 
     52 #include <asm/pgtable.h>
     53 
     54 #include <drm/drm_agpsupport.h>
     55 #include <drm/drm_device.h>
     56 #include <drm/drm_drv.h>
     57 #include <drm/drm_file.h>
     58 #include <drm/drm_framebuffer.h>
     59 #include <drm/drm_gem.h>
     60 #include <drm/drm_print.h>
     61 
     62 #include "drm_internal.h"
     63 #include "drm_legacy.h"
     64 
     65 struct drm_vma_entry {
     66 	struct list_head head;
     67 	struct vm_area_struct *vma;
     68 	pid_t pid;
     69 };
     70 
     71 static void drm_vm_open(struct vm_area_struct *vma);
     72 static void drm_vm_close(struct vm_area_struct *vma);
     73 
     74 static pgprot_t drm_io_prot(struct drm_local_map *map,
     75 			    struct vm_area_struct *vma)
     76 {
     77 	pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
     78 
     79 	/* We don't want graphics memory to be mapped encrypted */
     80 	tmp = pgprot_decrypted(tmp);
     81 
     82 #if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__) || \
     83     defined(__mips__)
     84 	if (map->type == _DRM_REGISTERS && !(map->flags & _DRM_WRITE_COMBINING))
     85 		tmp = pgprot_noncached(tmp);
     86 	else
     87 		tmp = pgprot_writecombine(tmp);
     88 #elif defined(__ia64__)
     89 	if (efi_range_is_wc(vma->vm_start, vma->vm_end -
     90 				    vma->vm_start))
     91 		tmp = pgprot_writecombine(tmp);
     92 	else
     93 		tmp = pgprot_noncached(tmp);
     94 #elif defined(__sparc__) || defined(__arm__)
     95 	tmp = pgprot_noncached(tmp);
     96 #endif
     97 	return tmp;
     98 }
     99 
    100 static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
    101 {
    102 	pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
    103 
    104 #if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
    105 	tmp = pgprot_noncached_wc(tmp);
    106 #endif
    107 	return tmp;
    108 }
    109 
    110 /**
    111  * \c fault method for AGP virtual memory.
    112  *
    113  * \param vma virtual memory area.
    114  * \param address access address.
    115  * \return pointer to the page structure.
    116  *
    117  * Find the right map and if it's AGP memory find the real physical page to
    118  * map, get the page, increment the use count and return it.
    119  */
    120 #if IS_ENABLED(CONFIG_AGP)
    121 static vm_fault_t drm_vm_fault(struct vm_fault *vmf)
    122 {
    123 	struct vm_area_struct *vma = vmf->vma;
    124 	struct drm_file *priv = vma->vm_file->private_data;
    125 	struct drm_device *dev = priv->minor->dev;
    126 	struct drm_local_map *map = NULL;
    127 	struct drm_map_list *r_list;
    128 	struct drm_hash_item *hash;
    129 
    130 	/*
    131 	 * Find the right map
    132 	 */
    133 	if (!dev->agp)
    134 		goto vm_fault_error;
    135 
    136 	if (!dev->agp || !dev->agp->cant_use_aperture)
    137 		goto vm_fault_error;
    138 
    139 	if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
    140 		goto vm_fault_error;
    141 
    142 	r_list = drm_hash_entry(hash, struct drm_map_list, hash);
    143 	map = r_list->map;
    144 
    145 	if (map && map->type == _DRM_AGP) {
    146 		/*
    147 		 * Using vm_pgoff as a selector forces us to use this unusual
    148 		 * addressing scheme.
    149 		 */
    150 		resource_size_t offset = vmf->address - vma->vm_start;
    151 		resource_size_t baddr = map->offset + offset;
    152 		struct drm_agp_mem *agpmem;
    153 		struct page *page;
    154 
    155 #ifdef __alpha__
    156 		/*
    157 		 * Adjust to a bus-relative address
    158 		 */
    159 		baddr -= dev->hose->mem_space->start;
    160 #endif
    161 
    162 		/*
    163 		 * It's AGP memory - find the real physical page to map
    164 		 */
    165 		list_for_each_entry(agpmem, &dev->agp->memory, head) {
    166 			if (agpmem->bound <= baddr &&
    167 			    agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
    168 				break;
    169 		}
    170 
    171 		if (&agpmem->head == &dev->agp->memory)
    172 			goto vm_fault_error;
    173 
    174 		/*
    175 		 * Get the page, inc the use count, and return it
    176 		 */
    177 		offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
    178 		page = agpmem->memory->pages[offset];
    179 		get_page(page);
    180 		vmf->page = page;
    181 
    182 		DRM_DEBUG
    183 		    ("baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n",
    184 		     (unsigned long long)baddr,
    185 		     agpmem->memory->pages[offset],
    186 		     (unsigned long long)offset,
    187 		     page_count(page));
    188 		return 0;
    189 	}
    190 vm_fault_error:
    191 	return VM_FAULT_SIGBUS;	/* Disallow mremap */
    192 }
    193 #else
    194 static vm_fault_t drm_vm_fault(struct vm_fault *vmf)
    195 {
    196 	return VM_FAULT_SIGBUS;
    197 }
    198 #endif
    199 
    200 /**
    201  * \c nopage method for shared virtual memory.
    202  *
    203  * \param vma virtual memory area.
    204  * \param address access address.
    205  * \return pointer to the page structure.
    206  *
    207  * Get the mapping, find the real physical page to map, get the page, and
    208  * return it.
    209  */
    210 static vm_fault_t drm_vm_shm_fault(struct vm_fault *vmf)
    211 {
    212 	struct vm_area_struct *vma = vmf->vma;
    213 	struct drm_local_map *map = vma->vm_private_data;
    214 	unsigned long offset;
    215 	unsigned long i;
    216 	struct page *page;
    217 
    218 	if (!map)
    219 		return VM_FAULT_SIGBUS;	/* Nothing allocated */
    220 
    221 	offset = vmf->address - vma->vm_start;
    222 	i = (unsigned long)map->handle + offset;
    223 	page = vmalloc_to_page((void *)i);
    224 	if (!page)
    225 		return VM_FAULT_SIGBUS;
    226 	get_page(page);
    227 	vmf->page = page;
    228 
    229 	DRM_DEBUG("shm_fault 0x%lx\n", offset);
    230 	return 0;
    231 }
    232 
    233 /**
    234  * \c close method for shared virtual memory.
    235  *
    236  * \param vma virtual memory area.
    237  *
    238  * Deletes map information if we are the last
    239  * person to close a mapping and it's not in the global maplist.
    240  */
    241 static void drm_vm_shm_close(struct vm_area_struct *vma)
    242 {
    243 	struct drm_file *priv = vma->vm_file->private_data;
    244 	struct drm_device *dev = priv->minor->dev;
    245 	struct drm_vma_entry *pt, *temp;
    246 	struct drm_local_map *map;
    247 	struct drm_map_list *r_list;
    248 	int found_maps = 0;
    249 
    250 	DRM_DEBUG("0x%08lx,0x%08lx\n",
    251 		  vma->vm_start, vma->vm_end - vma->vm_start);
    252 
    253 	map = vma->vm_private_data;
    254 
    255 	mutex_lock(&dev->struct_mutex);
    256 	list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
    257 		if (pt->vma->vm_private_data == map)
    258 			found_maps++;
    259 		if (pt->vma == vma) {
    260 			list_del(&pt->head);
    261 			kfree(pt);
    262 		}
    263 	}
    264 
    265 	/* We were the only map that was found */
    266 	if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
    267 		/* Check to see if we are in the maplist, if we are not, then
    268 		 * we delete this mappings information.
    269 		 */
    270 		found_maps = 0;
    271 		list_for_each_entry(r_list, &dev->maplist, head) {
    272 			if (r_list->map == map)
    273 				found_maps++;
    274 		}
    275 
    276 		if (!found_maps) {
    277 			drm_dma_handle_t dmah;
    278 
    279 			switch (map->type) {
    280 			case _DRM_REGISTERS:
    281 			case _DRM_FRAME_BUFFER:
    282 				arch_phys_wc_del(map->mtrr);
    283 				iounmap(map->handle);
    284 				break;
    285 			case _DRM_SHM:
    286 				vfree(map->handle);
    287 				break;
    288 			case _DRM_AGP:
    289 			case _DRM_SCATTER_GATHER:
    290 				break;
    291 			case _DRM_CONSISTENT:
    292 				dmah.vaddr = map->handle;
    293 				dmah.busaddr = map->offset;
    294 				dmah.size = map->size;
    295 				__drm_legacy_pci_free(dev, &dmah);
    296 				break;
    297 			}
    298 			kfree(map);
    299 		}
    300 	}
    301 	mutex_unlock(&dev->struct_mutex);
    302 }
    303 
    304 /**
    305  * \c fault method for DMA virtual memory.
    306  *
    307  * \param address access address.
    308  * \return pointer to the page structure.
    309  *
    310  * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
    311  */
    312 static vm_fault_t drm_vm_dma_fault(struct vm_fault *vmf)
    313 {
    314 	struct vm_area_struct *vma = vmf->vma;
    315 	struct drm_file *priv = vma->vm_file->private_data;
    316 	struct drm_device *dev = priv->minor->dev;
    317 	struct drm_device_dma *dma = dev->dma;
    318 	unsigned long offset;
    319 	unsigned long page_nr;
    320 	struct page *page;
    321 
    322 	if (!dma)
    323 		return VM_FAULT_SIGBUS;	/* Error */
    324 	if (!dma->pagelist)
    325 		return VM_FAULT_SIGBUS;	/* Nothing allocated */
    326 
    327 	offset = vmf->address - vma->vm_start;
    328 					/* vm_[pg]off[set] should be 0 */
    329 	page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
    330 	page = virt_to_page((void *)dma->pagelist[page_nr]);
    331 
    332 	get_page(page);
    333 	vmf->page = page;
    334 
    335 	DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
    336 	return 0;
    337 }
    338 
    339 /**
    340  * \c fault method for scatter-gather virtual memory.
    341  *
    342  * \param address access address.
    343  * \return pointer to the page structure.
    344  *
    345  * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
    346  */
    347 static vm_fault_t drm_vm_sg_fault(struct vm_fault *vmf)
    348 {
    349 	struct vm_area_struct *vma = vmf->vma;
    350 	struct drm_local_map *map = vma->vm_private_data;
    351 	struct drm_file *priv = vma->vm_file->private_data;
    352 	struct drm_device *dev = priv->minor->dev;
    353 	struct drm_sg_mem *entry = dev->sg;
    354 	unsigned long offset;
    355 	unsigned long map_offset;
    356 	unsigned long page_offset;
    357 	struct page *page;
    358 
    359 	if (!entry)
    360 		return VM_FAULT_SIGBUS;	/* Error */
    361 	if (!entry->pagelist)
    362 		return VM_FAULT_SIGBUS;	/* Nothing allocated */
    363 
    364 	offset = vmf->address - vma->vm_start;
    365 	map_offset = map->offset - (unsigned long)dev->sg->virtual;
    366 	page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
    367 	page = entry->pagelist[page_offset];
    368 	get_page(page);
    369 	vmf->page = page;
    370 
    371 	return 0;
    372 }
    373 
    374 /** AGP virtual memory operations */
    375 static const struct vm_operations_struct drm_vm_ops = {
    376 	.fault = drm_vm_fault,
    377 	.open = drm_vm_open,
    378 	.close = drm_vm_close,
    379 };
    380 
    381 /** Shared virtual memory operations */
    382 static const struct vm_operations_struct drm_vm_shm_ops = {
    383 	.fault = drm_vm_shm_fault,
    384 	.open = drm_vm_open,
    385 	.close = drm_vm_shm_close,
    386 };
    387 
    388 /** DMA virtual memory operations */
    389 static const struct vm_operations_struct drm_vm_dma_ops = {
    390 	.fault = drm_vm_dma_fault,
    391 	.open = drm_vm_open,
    392 	.close = drm_vm_close,
    393 };
    394 
    395 /** Scatter-gather virtual memory operations */
    396 static const struct vm_operations_struct drm_vm_sg_ops = {
    397 	.fault = drm_vm_sg_fault,
    398 	.open = drm_vm_open,
    399 	.close = drm_vm_close,
    400 };
    401 
    402 static void drm_vm_open_locked(struct drm_device *dev,
    403 			       struct vm_area_struct *vma)
    404 {
    405 	struct drm_vma_entry *vma_entry;
    406 
    407 	DRM_DEBUG("0x%08lx,0x%08lx\n",
    408 		  vma->vm_start, vma->vm_end - vma->vm_start);
    409 
    410 	vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL);
    411 	if (vma_entry) {
    412 		vma_entry->vma = vma;
    413 		vma_entry->pid = current->pid;
    414 		list_add(&vma_entry->head, &dev->vmalist);
    415 	}
    416 }
    417 
    418 static void drm_vm_open(struct vm_area_struct *vma)
    419 {
    420 	struct drm_file *priv = vma->vm_file->private_data;
    421 	struct drm_device *dev = priv->minor->dev;
    422 
    423 	mutex_lock(&dev->struct_mutex);
    424 	drm_vm_open_locked(dev, vma);
    425 	mutex_unlock(&dev->struct_mutex);
    426 }
    427 
    428 static void drm_vm_close_locked(struct drm_device *dev,
    429 				struct vm_area_struct *vma)
    430 {
    431 	struct drm_vma_entry *pt, *temp;
    432 
    433 	DRM_DEBUG("0x%08lx,0x%08lx\n",
    434 		  vma->vm_start, vma->vm_end - vma->vm_start);
    435 
    436 	list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
    437 		if (pt->vma == vma) {
    438 			list_del(&pt->head);
    439 			kfree(pt);
    440 			break;
    441 		}
    442 	}
    443 }
    444 
    445 /**
    446  * \c close method for all virtual memory types.
    447  *
    448  * \param vma virtual memory area.
    449  *
    450  * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
    451  * free it.
    452  */
    453 static void drm_vm_close(struct vm_area_struct *vma)
    454 {
    455 	struct drm_file *priv = vma->vm_file->private_data;
    456 	struct drm_device *dev = priv->minor->dev;
    457 
    458 	mutex_lock(&dev->struct_mutex);
    459 	drm_vm_close_locked(dev, vma);
    460 	mutex_unlock(&dev->struct_mutex);
    461 }
    462 
    463 /**
    464  * mmap DMA memory.
    465  *
    466  * \param file_priv DRM file private.
    467  * \param vma virtual memory area.
    468  * \return zero on success or a negative number on failure.
    469  *
    470  * Sets the virtual memory area operations structure to vm_dma_ops, the file
    471  * pointer, and calls vm_open().
    472  */
    473 static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
    474 {
    475 	struct drm_file *priv = filp->private_data;
    476 	struct drm_device *dev;
    477 	struct drm_device_dma *dma;
    478 	unsigned long length = vma->vm_end - vma->vm_start;
    479 
    480 	dev = priv->minor->dev;
    481 	dma = dev->dma;
    482 	DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
    483 		  vma->vm_start, vma->vm_end, vma->vm_pgoff);
    484 
    485 	/* Length must match exact page count */
    486 	if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
    487 		return -EINVAL;
    488 	}
    489 
    490 	if (!capable(CAP_SYS_ADMIN) &&
    491 	    (dma->flags & _DRM_DMA_USE_PCI_RO)) {
    492 		vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
    493 #if defined(__i386__) || defined(__x86_64__)
    494 		pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
    495 #else
    496 		/* Ye gads this is ugly.  With more thought
    497 		   we could move this up higher and use
    498 		   `protection_map' instead.  */
    499 		vma->vm_page_prot =
    500 		    __pgprot(pte_val
    501 			     (pte_wrprotect
    502 			      (__pte(pgprot_val(vma->vm_page_prot)))));
    503 #endif
    504 	}
    505 
    506 	vma->vm_ops = &drm_vm_dma_ops;
    507 
    508 	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
    509 
    510 	drm_vm_open_locked(dev, vma);
    511 	return 0;
    512 }
    513 
    514 static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
    515 {
    516 #ifdef __alpha__
    517 	return dev->hose->dense_mem_base;
    518 #else
    519 	return 0;
    520 #endif
    521 }
    522 
    523 /**
    524  * mmap DMA memory.
    525  *
    526  * \param file_priv DRM file private.
    527  * \param vma virtual memory area.
    528  * \return zero on success or a negative number on failure.
    529  *
    530  * If the virtual memory area has no offset associated with it then it's a DMA
    531  * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
    532  * checks that the restricted flag is not set, sets the virtual memory operations
    533  * according to the mapping type and remaps the pages. Finally sets the file
    534  * pointer and calls vm_open().
    535  */
    536 static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
    537 {
    538 	struct drm_file *priv = filp->private_data;
    539 	struct drm_device *dev = priv->minor->dev;
    540 	struct drm_local_map *map = NULL;
    541 	resource_size_t offset = 0;
    542 	struct drm_hash_item *hash;
    543 
    544 	DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
    545 		  vma->vm_start, vma->vm_end, vma->vm_pgoff);
    546 
    547 	if (!priv->authenticated)
    548 		return -EACCES;
    549 
    550 	/* We check for "dma". On Apple's UniNorth, it's valid to have
    551 	 * the AGP mapped at physical address 0
    552 	 * --BenH.
    553 	 */
    554 	if (!vma->vm_pgoff
    555 #if IS_ENABLED(CONFIG_AGP)
    556 	    && (!dev->agp
    557 		|| dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
    558 #endif
    559 	    )
    560 		return drm_mmap_dma(filp, vma);
    561 
    562 	if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
    563 		DRM_ERROR("Could not find map\n");
    564 		return -EINVAL;
    565 	}
    566 
    567 	map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
    568 	if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
    569 		return -EPERM;
    570 
    571 	/* Check for valid size. */
    572 	if (map->size < vma->vm_end - vma->vm_start)
    573 		return -EINVAL;
    574 
    575 	if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
    576 		vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
    577 #if defined(__i386__) || defined(__x86_64__)
    578 		pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
    579 #else
    580 		/* Ye gads this is ugly.  With more thought
    581 		   we could move this up higher and use
    582 		   `protection_map' instead.  */
    583 		vma->vm_page_prot =
    584 		    __pgprot(pte_val
    585 			     (pte_wrprotect
    586 			      (__pte(pgprot_val(vma->vm_page_prot)))));
    587 #endif
    588 	}
    589 
    590 	switch (map->type) {
    591 #if !defined(__arm__)
    592 	case _DRM_AGP:
    593 		if (dev->agp && dev->agp->cant_use_aperture) {
    594 			/*
    595 			 * On some platforms we can't talk to bus dma address from the CPU, so for
    596 			 * memory of type DRM_AGP, we'll deal with sorting out the real physical
    597 			 * pages and mappings in fault()
    598 			 */
    599 #if defined(__powerpc__)
    600 			vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
    601 #endif
    602 			vma->vm_ops = &drm_vm_ops;
    603 			break;
    604 		}
    605 #endif
    606 		/* fall through - to _DRM_FRAME_BUFFER... */
    607 	case _DRM_FRAME_BUFFER:
    608 	case _DRM_REGISTERS:
    609 		offset = drm_core_get_reg_ofs(dev);
    610 		vma->vm_page_prot = drm_io_prot(map, vma);
    611 		if (io_remap_pfn_range(vma, vma->vm_start,
    612 				       (map->offset + offset) >> PAGE_SHIFT,
    613 				       vma->vm_end - vma->vm_start,
    614 				       vma->vm_page_prot))
    615 			return -EAGAIN;
    616 		DRM_DEBUG("   Type = %d; start = 0x%lx, end = 0x%lx,"
    617 			  " offset = 0x%llx\n",
    618 			  map->type,
    619 			  vma->vm_start, vma->vm_end, (unsigned long long)(map->offset + offset));
    620 
    621 		vma->vm_ops = &drm_vm_ops;
    622 		break;
    623 	case _DRM_CONSISTENT:
    624 		/* Consistent memory is really like shared memory. But
    625 		 * it's allocated in a different way, so avoid fault */
    626 		if (remap_pfn_range(vma, vma->vm_start,
    627 		    page_to_pfn(virt_to_page(map->handle)),
    628 		    vma->vm_end - vma->vm_start, vma->vm_page_prot))
    629 			return -EAGAIN;
    630 		vma->vm_page_prot = drm_dma_prot(map->type, vma);
    631 		/* fall through - to _DRM_SHM */
    632 	case _DRM_SHM:
    633 		vma->vm_ops = &drm_vm_shm_ops;
    634 		vma->vm_private_data = (void *)map;
    635 		break;
    636 	case _DRM_SCATTER_GATHER:
    637 		vma->vm_ops = &drm_vm_sg_ops;
    638 		vma->vm_private_data = (void *)map;
    639 		vma->vm_page_prot = drm_dma_prot(map->type, vma);
    640 		break;
    641 	default:
    642 		return -EINVAL;	/* This should never happen. */
    643 	}
    644 	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
    645 
    646 	drm_vm_open_locked(dev, vma);
    647 	return 0;
    648 }
    649 
    650 int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma)
    651 {
    652 	struct drm_file *priv = filp->private_data;
    653 	struct drm_device *dev = priv->minor->dev;
    654 	int ret;
    655 
    656 	if (drm_dev_is_unplugged(dev))
    657 		return -ENODEV;
    658 
    659 	mutex_lock(&dev->struct_mutex);
    660 	ret = drm_mmap_locked(filp, vma);
    661 	mutex_unlock(&dev->struct_mutex);
    662 
    663 	return ret;
    664 }
    665 EXPORT_SYMBOL(drm_legacy_mmap);
    666 
    667 #if IS_ENABLED(CONFIG_DRM_LEGACY)
    668 void drm_legacy_vma_flush(struct drm_device *dev)
    669 {
    670 	struct drm_vma_entry *vma, *vma_temp;
    671 
    672 	/* Clear vma list (only needed for legacy drivers) */
    673 	list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
    674 		list_del(&vma->head);
    675 		kfree(vma);
    676 	}
    677 }
    678 #endif
    679