Home | History | Annotate | Line # | Download | only in ttm
      1  1.2  riastrad /*	$NetBSD: ttm_bo_vm.c,v 1.3 2021/12/18 23:45:44 riastradh Exp $	*/
      2  1.2  riastrad 
      3  1.3  riastrad /* SPDX-License-Identifier: GPL-2.0 OR MIT */
      4  1.1  riastrad /**************************************************************************
      5  1.1  riastrad  *
      6  1.1  riastrad  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
      7  1.1  riastrad  * All Rights Reserved.
      8  1.1  riastrad  *
      9  1.1  riastrad  * Permission is hereby granted, free of charge, to any person obtaining a
     10  1.1  riastrad  * copy of this software and associated documentation files (the
     11  1.1  riastrad  * "Software"), to deal in the Software without restriction, including
     12  1.1  riastrad  * without limitation the rights to use, copy, modify, merge, publish,
     13  1.1  riastrad  * distribute, sub license, and/or sell copies of the Software, and to
     14  1.1  riastrad  * permit persons to whom the Software is furnished to do so, subject to
     15  1.1  riastrad  * the following conditions:
     16  1.1  riastrad  *
     17  1.1  riastrad  * The above copyright notice and this permission notice (including the
     18  1.1  riastrad  * next paragraph) shall be included in all copies or substantial portions
     19  1.1  riastrad  * of the Software.
     20  1.1  riastrad  *
     21  1.1  riastrad  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     22  1.1  riastrad  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     23  1.1  riastrad  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
     24  1.1  riastrad  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
     25  1.1  riastrad  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
     26  1.1  riastrad  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     27  1.1  riastrad  * USE OR OTHER DEALINGS IN THE SOFTWARE.
     28  1.1  riastrad  *
     29  1.1  riastrad  **************************************************************************/
     30  1.1  riastrad /*
     31  1.1  riastrad  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
     32  1.1  riastrad  */
     33  1.1  riastrad 
     34  1.2  riastrad #include <sys/cdefs.h>
     35  1.2  riastrad __KERNEL_RCSID(0, "$NetBSD: ttm_bo_vm.c,v 1.3 2021/12/18 23:45:44 riastradh Exp $");
     36  1.2  riastrad 
     37  1.1  riastrad #define pr_fmt(fmt) "[TTM] " fmt
     38  1.1  riastrad 
     39  1.3  riastrad #include <drm/ttm/ttm_module.h>
     40  1.3  riastrad #include <drm/ttm/ttm_bo_driver.h>
     41  1.3  riastrad #include <drm/ttm/ttm_placement.h>
     42  1.2  riastrad #include <drm/drm_vma_manager.h>
     43  1.1  riastrad #include <linux/mm.h>
     44  1.3  riastrad #include <linux/pfn_t.h>
     45  1.1  riastrad #include <linux/rbtree.h>
     46  1.1  riastrad #include <linux/module.h>
     47  1.1  riastrad #include <linux/uaccess.h>
     48  1.3  riastrad #include <linux/mem_encrypt.h>
     49  1.1  riastrad 
     50  1.3  riastrad static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
     51  1.2  riastrad 				struct vm_fault *vmf)
     52  1.1  riastrad {
     53  1.3  riastrad 	vm_fault_t ret = 0;
     54  1.3  riastrad 	int err = 0;
     55  1.2  riastrad 
     56  1.3  riastrad 	if (likely(!bo->moving))
     57  1.2  riastrad 		goto out_unlock;
     58  1.2  riastrad 
     59  1.2  riastrad 	/*
     60  1.2  riastrad 	 * Quick non-stalling check for idle.
     61  1.2  riastrad 	 */
     62  1.3  riastrad 	if (dma_fence_is_signaled(bo->moving))
     63  1.3  riastrad 		goto out_clear;
     64  1.2  riastrad 
     65  1.2  riastrad 	/*
     66  1.2  riastrad 	 * If possible, avoid waiting for GPU with mmap_sem
     67  1.2  riastrad 	 * held.
     68  1.2  riastrad 	 */
     69  1.2  riastrad 	if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
     70  1.2  riastrad 		ret = VM_FAULT_RETRY;
     71  1.2  riastrad 		if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
     72  1.2  riastrad 			goto out_unlock;
     73  1.1  riastrad 
     74  1.3  riastrad 		ttm_bo_get(bo);
     75  1.3  riastrad 		up_read(&vmf->vma->vm_mm->mmap_sem);
     76  1.3  riastrad 		(void) dma_fence_wait(bo->moving, true);
     77  1.3  riastrad 		dma_resv_unlock(bo->base.resv);
     78  1.3  riastrad 		ttm_bo_put(bo);
     79  1.2  riastrad 		goto out_unlock;
     80  1.1  riastrad 	}
     81  1.1  riastrad 
     82  1.2  riastrad 	/*
     83  1.2  riastrad 	 * Ordinary wait.
     84  1.2  riastrad 	 */
     85  1.3  riastrad 	err = dma_fence_wait(bo->moving, true);
     86  1.3  riastrad 	if (unlikely(err != 0)) {
     87  1.3  riastrad 		ret = (err != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
     88  1.2  riastrad 			VM_FAULT_NOPAGE;
     89  1.3  riastrad 		goto out_unlock;
     90  1.3  riastrad 	}
     91  1.3  riastrad 
     92  1.3  riastrad out_clear:
     93  1.3  riastrad 	dma_fence_put(bo->moving);
     94  1.3  riastrad 	bo->moving = NULL;
     95  1.1  riastrad 
     96  1.2  riastrad out_unlock:
     97  1.2  riastrad 	return ret;
     98  1.1  riastrad }
     99  1.1  riastrad 
    100  1.3  riastrad static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo,
    101  1.3  riastrad 				       unsigned long page_offset)
    102  1.1  riastrad {
    103  1.1  riastrad 	struct ttm_bo_device *bdev = bo->bdev;
    104  1.1  riastrad 
    105  1.3  riastrad 	if (bdev->driver->io_mem_pfn)
    106  1.3  riastrad 		return bdev->driver->io_mem_pfn(bo, page_offset);
    107  1.3  riastrad 
    108  1.3  riastrad 	return ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT)
    109  1.3  riastrad 		+ page_offset;
    110  1.3  riastrad }
    111  1.3  riastrad 
    112  1.3  riastrad /**
    113  1.3  riastrad  * ttm_bo_vm_reserve - Reserve a buffer object in a retryable vm callback
    114  1.3  riastrad  * @bo: The buffer object
    115  1.3  riastrad  * @vmf: The fault structure handed to the callback
    116  1.3  riastrad  *
    117  1.3  riastrad  * vm callbacks like fault() and *_mkwrite() allow for the mm_sem to be dropped
    118  1.3  riastrad  * during long waits, and after the wait the callback will be restarted. This
    119  1.3  riastrad  * is to allow other threads using the same virtual memory space concurrent
    120  1.3  riastrad  * access to map(), unmap() completely unrelated buffer objects. TTM buffer
    121  1.3  riastrad  * object reservations sometimes wait for GPU and should therefore be
    122  1.3  riastrad  * considered long waits. This function reserves the buffer object interruptibly
    123  1.3  riastrad  * taking this into account. Starvation is avoided by the vm system not
    124  1.3  riastrad  * allowing too many repeated restarts.
    125  1.3  riastrad  * This function is intended to be used in customized fault() and _mkwrite()
    126  1.3  riastrad  * handlers.
    127  1.3  riastrad  *
    128  1.3  riastrad  * Return:
    129  1.3  riastrad  *    0 on success and the bo was reserved.
    130  1.3  riastrad  *    VM_FAULT_RETRY if blocking wait.
    131  1.3  riastrad  *    VM_FAULT_NOPAGE if blocking wait and retrying was not allowed.
    132  1.3  riastrad  */
    133  1.3  riastrad vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
    134  1.3  riastrad 			     struct vm_fault *vmf)
    135  1.3  riastrad {
    136  1.1  riastrad 	/*
    137  1.1  riastrad 	 * Work around locking order reversal in fault / nopfn
    138  1.1  riastrad 	 * between mmap_sem and bo_reserve: Perform a trylock operation
    139  1.2  riastrad 	 * for reserve, and if it fails, retry the fault after waiting
    140  1.2  riastrad 	 * for the buffer to become unreserved.
    141  1.1  riastrad 	 */
    142  1.3  riastrad 	if (unlikely(!dma_resv_trylock(bo->base.resv))) {
    143  1.2  riastrad 		if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
    144  1.2  riastrad 			if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
    145  1.3  riastrad 				ttm_bo_get(bo);
    146  1.3  riastrad 				up_read(&vmf->vma->vm_mm->mmap_sem);
    147  1.3  riastrad 				if (!dma_resv_lock_interruptible(bo->base.resv,
    148  1.3  riastrad 								 NULL))
    149  1.3  riastrad 					dma_resv_unlock(bo->base.resv);
    150  1.3  riastrad 				ttm_bo_put(bo);
    151  1.2  riastrad 			}
    152  1.2  riastrad 
    153  1.2  riastrad 			return VM_FAULT_RETRY;
    154  1.2  riastrad 		}
    155  1.2  riastrad 
    156  1.3  riastrad 		if (dma_resv_lock_interruptible(bo->base.resv, NULL))
    157  1.3  riastrad 			return VM_FAULT_NOPAGE;
    158  1.1  riastrad 	}
    159  1.1  riastrad 
    160  1.3  riastrad 	return 0;
    161  1.3  riastrad }
    162  1.3  riastrad EXPORT_SYMBOL(ttm_bo_vm_reserve);
    163  1.3  riastrad 
    164  1.3  riastrad /**
    165  1.3  riastrad  * ttm_bo_vm_fault_reserved - TTM fault helper
    166  1.3  riastrad  * @vmf: The struct vm_fault given as argument to the fault callback
    167  1.3  riastrad  * @prot: The page protection to be used for this memory area.
    168  1.3  riastrad  * @num_prefault: Maximum number of prefault pages. The caller may want to
    169  1.3  riastrad  * specify this based on madvice settings and the size of the GPU object
    170  1.3  riastrad  * backed by the memory.
    171  1.3  riastrad  *
    172  1.3  riastrad  * This function inserts one or more page table entries pointing to the
    173  1.3  riastrad  * memory backing the buffer object, and then returns a return code
    174  1.3  riastrad  * instructing the caller to retry the page access.
    175  1.3  riastrad  *
    176  1.3  riastrad  * Return:
    177  1.3  riastrad  *   VM_FAULT_NOPAGE on success or pending signal
    178  1.3  riastrad  *   VM_FAULT_SIGBUS on unspecified error
    179  1.3  riastrad  *   VM_FAULT_OOM on out-of-memory
    180  1.3  riastrad  *   VM_FAULT_RETRY if retryable wait
    181  1.3  riastrad  */
    182  1.3  riastrad vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
    183  1.3  riastrad 				    pgprot_t prot,
    184  1.3  riastrad 				    pgoff_t num_prefault)
    185  1.3  riastrad {
    186  1.3  riastrad 	struct vm_area_struct *vma = vmf->vma;
    187  1.3  riastrad 	struct ttm_buffer_object *bo = vma->vm_private_data;
    188  1.3  riastrad 	struct ttm_bo_device *bdev = bo->bdev;
    189  1.3  riastrad 	unsigned long page_offset;
    190  1.3  riastrad 	unsigned long page_last;
    191  1.3  riastrad 	unsigned long pfn;
    192  1.3  riastrad 	struct ttm_tt *ttm = NULL;
    193  1.3  riastrad 	struct page *page;
    194  1.3  riastrad 	int err;
    195  1.3  riastrad 	pgoff_t i;
    196  1.3  riastrad 	vm_fault_t ret = VM_FAULT_NOPAGE;
    197  1.3  riastrad 	unsigned long address = vmf->address;
    198  1.3  riastrad 	struct ttm_mem_type_manager *man =
    199  1.3  riastrad 		&bdev->man[bo->mem.mem_type];
    200  1.3  riastrad 
    201  1.2  riastrad 	/*
    202  1.2  riastrad 	 * Refuse to fault imported pages. This should be handled
    203  1.2  riastrad 	 * (if at all) by redirecting mmap to the exporter.
    204  1.2  riastrad 	 */
    205  1.3  riastrad 	if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG))
    206  1.3  riastrad 		return VM_FAULT_SIGBUS;
    207  1.2  riastrad 
    208  1.1  riastrad 	if (bdev->driver->fault_reserve_notify) {
    209  1.3  riastrad 		struct dma_fence *moving = dma_fence_get(bo->moving);
    210  1.3  riastrad 
    211  1.3  riastrad 		err = bdev->driver->fault_reserve_notify(bo);
    212  1.3  riastrad 		switch (err) {
    213  1.1  riastrad 		case 0:
    214  1.1  riastrad 			break;
    215  1.1  riastrad 		case -EBUSY:
    216  1.1  riastrad 		case -ERESTARTSYS:
    217  1.3  riastrad 			return VM_FAULT_NOPAGE;
    218  1.1  riastrad 		default:
    219  1.3  riastrad 			return VM_FAULT_SIGBUS;
    220  1.3  riastrad 		}
    221  1.3  riastrad 
    222  1.3  riastrad 		if (bo->moving != moving) {
    223  1.3  riastrad 			spin_lock(&ttm_bo_glob.lru_lock);
    224  1.3  riastrad 			ttm_bo_move_to_lru_tail(bo, NULL);
    225  1.3  riastrad 			spin_unlock(&ttm_bo_glob.lru_lock);
    226  1.1  riastrad 		}
    227  1.3  riastrad 		dma_fence_put(moving);
    228  1.1  riastrad 	}
    229  1.1  riastrad 
    230  1.1  riastrad 	/*
    231  1.1  riastrad 	 * Wait for buffer data in transit, due to a pipelined
    232  1.1  riastrad 	 * move.
    233  1.1  riastrad 	 */
    234  1.3  riastrad 	ret = ttm_bo_vm_fault_idle(bo, vmf);
    235  1.3  riastrad 	if (unlikely(ret != 0))
    236  1.3  riastrad 		return ret;
    237  1.2  riastrad 
    238  1.3  riastrad 	err = ttm_mem_io_lock(man, true);
    239  1.3  riastrad 	if (unlikely(err != 0))
    240  1.3  riastrad 		return VM_FAULT_NOPAGE;
    241  1.3  riastrad 	err = ttm_mem_io_reserve_vm(bo);
    242  1.3  riastrad 	if (unlikely(err != 0)) {
    243  1.3  riastrad 		ret = VM_FAULT_SIGBUS;
    244  1.1  riastrad 		goto out_io_unlock;
    245  1.1  riastrad 	}
    246  1.1  riastrad 
    247  1.1  riastrad 	page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
    248  1.3  riastrad 		vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node);
    249  1.2  riastrad 	page_last = vma_pages(vma) + vma->vm_pgoff -
    250  1.3  riastrad 		drm_vma_node_start(&bo->base.vma_node);
    251  1.1  riastrad 
    252  1.1  riastrad 	if (unlikely(page_offset >= bo->num_pages)) {
    253  1.3  riastrad 		ret = VM_FAULT_SIGBUS;
    254  1.1  riastrad 		goto out_io_unlock;
    255  1.1  riastrad 	}
    256  1.1  riastrad 
    257  1.3  riastrad 	prot = ttm_io_prot(bo->mem.placement, prot);
    258  1.3  riastrad 	if (!bo->mem.bus.is_iomem) {
    259  1.3  riastrad 		struct ttm_operation_ctx ctx = {
    260  1.3  riastrad 			.interruptible = false,
    261  1.3  riastrad 			.no_wait_gpu = false,
    262  1.3  riastrad 			.flags = TTM_OPT_FLAG_FORCE_ALLOC
    263  1.3  riastrad 
    264  1.3  riastrad 		};
    265  1.2  riastrad 
    266  1.1  riastrad 		ttm = bo->ttm;
    267  1.3  riastrad 		if (ttm_tt_populate(bo->ttm, &ctx)) {
    268  1.3  riastrad 			ret = VM_FAULT_OOM;
    269  1.1  riastrad 			goto out_io_unlock;
    270  1.1  riastrad 		}
    271  1.3  riastrad 	} else {
    272  1.3  riastrad 		/* Iomem should not be marked encrypted */
    273  1.3  riastrad 		prot = pgprot_decrypted(prot);
    274  1.1  riastrad 	}
    275  1.1  riastrad 
    276  1.1  riastrad 	/*
    277  1.1  riastrad 	 * Speculatively prefault a number of pages. Only error on
    278  1.1  riastrad 	 * first page.
    279  1.1  riastrad 	 */
    280  1.3  riastrad 	for (i = 0; i < num_prefault; ++i) {
    281  1.3  riastrad 		if (bo->mem.bus.is_iomem) {
    282  1.3  riastrad 			pfn = ttm_bo_io_mem_pfn(bo, page_offset);
    283  1.3  riastrad 		} else {
    284  1.1  riastrad 			page = ttm->pages[page_offset];
    285  1.1  riastrad 			if (unlikely(!page && i == 0)) {
    286  1.3  riastrad 				ret = VM_FAULT_OOM;
    287  1.1  riastrad 				goto out_io_unlock;
    288  1.1  riastrad 			} else if (unlikely(!page)) {
    289  1.1  riastrad 				break;
    290  1.1  riastrad 			}
    291  1.3  riastrad 			page->index = drm_vma_node_start(&bo->base.vma_node) +
    292  1.2  riastrad 				page_offset;
    293  1.1  riastrad 			pfn = page_to_pfn(page);
    294  1.1  riastrad 		}
    295  1.1  riastrad 
    296  1.3  riastrad 		/*
    297  1.3  riastrad 		 * Note that the value of @prot at this point may differ from
    298  1.3  riastrad 		 * the value of @vma->vm_page_prot in the caching- and
    299  1.3  riastrad 		 * encryption bits. This is because the exact location of the
    300  1.3  riastrad 		 * data may not be known at mmap() time and may also change
    301  1.3  riastrad 		 * at arbitrary times while the data is mmap'ed.
    302  1.3  riastrad 		 * See vmf_insert_mixed_prot() for a discussion.
    303  1.3  riastrad 		 */
    304  1.2  riastrad 		if (vma->vm_flags & VM_MIXEDMAP)
    305  1.3  riastrad 			ret = vmf_insert_mixed_prot(vma, address,
    306  1.3  riastrad 						    __pfn_to_pfn_t(pfn, PFN_DEV),
    307  1.3  riastrad 						    prot);
    308  1.2  riastrad 		else
    309  1.3  riastrad 			ret = vmf_insert_pfn_prot(vma, address, pfn, prot);
    310  1.2  riastrad 
    311  1.3  riastrad 		/* Never error on prefaulted PTEs */
    312  1.3  riastrad 		if (unlikely((ret & VM_FAULT_ERROR))) {
    313  1.3  riastrad 			if (i == 0)
    314  1.3  riastrad 				goto out_io_unlock;
    315  1.3  riastrad 			else
    316  1.3  riastrad 				break;
    317  1.1  riastrad 		}
    318  1.1  riastrad 
    319  1.1  riastrad 		address += PAGE_SIZE;
    320  1.1  riastrad 		if (unlikely(++page_offset >= page_last))
    321  1.1  riastrad 			break;
    322  1.1  riastrad 	}
    323  1.3  riastrad 	ret = VM_FAULT_NOPAGE;
    324  1.1  riastrad out_io_unlock:
    325  1.1  riastrad 	ttm_mem_io_unlock(man);
    326  1.3  riastrad 	return ret;
    327  1.3  riastrad }
    328  1.3  riastrad EXPORT_SYMBOL(ttm_bo_vm_fault_reserved);
    329  1.3  riastrad 
    330  1.3  riastrad vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
    331  1.3  riastrad {
    332  1.3  riastrad 	struct vm_area_struct *vma = vmf->vma;
    333  1.3  riastrad 	pgprot_t prot;
    334  1.3  riastrad 	struct ttm_buffer_object *bo = vma->vm_private_data;
    335  1.3  riastrad 	vm_fault_t ret;
    336  1.3  riastrad 
    337  1.3  riastrad 	ret = ttm_bo_vm_reserve(bo, vmf);
    338  1.3  riastrad 	if (ret)
    339  1.3  riastrad 		return ret;
    340  1.3  riastrad 
    341  1.3  riastrad 	prot = vma->vm_page_prot;
    342  1.3  riastrad 	ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT);
    343  1.3  riastrad 	if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
    344  1.3  riastrad 		return ret;
    345  1.3  riastrad 
    346  1.3  riastrad 	dma_resv_unlock(bo->base.resv);
    347  1.3  riastrad 
    348  1.3  riastrad 	return ret;
    349  1.1  riastrad }
    350  1.3  riastrad EXPORT_SYMBOL(ttm_bo_vm_fault);
    351  1.1  riastrad 
    352  1.3  riastrad void ttm_bo_vm_open(struct vm_area_struct *vma)
    353  1.1  riastrad {
    354  1.3  riastrad 	struct ttm_buffer_object *bo = vma->vm_private_data;
    355  1.1  riastrad 
    356  1.2  riastrad 	WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping);
    357  1.2  riastrad 
    358  1.3  riastrad 	ttm_bo_get(bo);
    359  1.1  riastrad }
    360  1.3  riastrad EXPORT_SYMBOL(ttm_bo_vm_open);
    361  1.1  riastrad 
    362  1.3  riastrad void ttm_bo_vm_close(struct vm_area_struct *vma)
    363  1.1  riastrad {
    364  1.3  riastrad 	struct ttm_buffer_object *bo = vma->vm_private_data;
    365  1.1  riastrad 
    366  1.3  riastrad 	ttm_bo_put(bo);
    367  1.1  riastrad 	vma->vm_private_data = NULL;
    368  1.1  riastrad }
    369  1.3  riastrad EXPORT_SYMBOL(ttm_bo_vm_close);
    370  1.3  riastrad 
    371  1.3  riastrad static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo,
    372  1.3  riastrad 				 unsigned long offset,
    373  1.3  riastrad 				 uint8_t *buf, int len, int write)
    374  1.3  riastrad {
    375  1.3  riastrad 	unsigned long page = offset >> PAGE_SHIFT;
    376  1.3  riastrad 	unsigned long bytes_left = len;
    377  1.3  riastrad 	int ret;
    378  1.3  riastrad 
    379  1.3  riastrad 	/* Copy a page at a time, that way no extra virtual address
    380  1.3  riastrad 	 * mapping is needed
    381  1.3  riastrad 	 */
    382  1.3  riastrad 	offset -= page << PAGE_SHIFT;
    383  1.3  riastrad 	do {
    384  1.3  riastrad 		unsigned long bytes = min(bytes_left, PAGE_SIZE - offset);
    385  1.3  riastrad 		struct ttm_bo_kmap_obj map;
    386  1.3  riastrad 		void *ptr;
    387  1.3  riastrad 		bool is_iomem;
    388  1.3  riastrad 
    389  1.3  riastrad 		ret = ttm_bo_kmap(bo, page, 1, &map);
    390  1.3  riastrad 		if (ret)
    391  1.3  riastrad 			return ret;
    392  1.3  riastrad 
    393  1.3  riastrad 		ptr = (uint8_t *)ttm_kmap_obj_virtual(&map, &is_iomem) + offset;
    394  1.3  riastrad 		WARN_ON_ONCE(is_iomem);
    395  1.3  riastrad 		if (write)
    396  1.3  riastrad 			memcpy(ptr, buf, bytes);
    397  1.3  riastrad 		else
    398  1.3  riastrad 			memcpy(buf, ptr, bytes);
    399  1.3  riastrad 		ttm_bo_kunmap(&map);
    400  1.3  riastrad 
    401  1.3  riastrad 		page++;
    402  1.3  riastrad 		buf += bytes;
    403  1.3  riastrad 		bytes_left -= bytes;
    404  1.3  riastrad 		offset = 0;
    405  1.3  riastrad 	} while (bytes_left);
    406  1.3  riastrad 
    407  1.3  riastrad 	return len;
    408  1.3  riastrad }
    409  1.3  riastrad 
    410  1.3  riastrad int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
    411  1.3  riastrad 		     void *buf, int len, int write)
    412  1.3  riastrad {
    413  1.3  riastrad 	unsigned long offset = (addr) - vma->vm_start;
    414  1.3  riastrad 	struct ttm_buffer_object *bo = vma->vm_private_data;
    415  1.3  riastrad 	int ret;
    416  1.3  riastrad 
    417  1.3  riastrad 	if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->num_pages)
    418  1.3  riastrad 		return -EIO;
    419  1.3  riastrad 
    420  1.3  riastrad 	ret = ttm_bo_reserve(bo, true, false, NULL);
    421  1.3  riastrad 	if (ret)
    422  1.3  riastrad 		return ret;
    423  1.3  riastrad 
    424  1.3  riastrad 	switch (bo->mem.mem_type) {
    425  1.3  riastrad 	case TTM_PL_SYSTEM:
    426  1.3  riastrad 		if (unlikely(bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
    427  1.3  riastrad 			ret = ttm_tt_swapin(bo->ttm);
    428  1.3  riastrad 			if (unlikely(ret != 0))
    429  1.3  riastrad 				return ret;
    430  1.3  riastrad 		}
    431  1.3  riastrad 		/* fall through */
    432  1.3  riastrad 	case TTM_PL_TT:
    433  1.3  riastrad 		ret = ttm_bo_vm_access_kmap(bo, offset, buf, len, write);
    434  1.3  riastrad 		break;
    435  1.3  riastrad 	default:
    436  1.3  riastrad 		if (bo->bdev->driver->access_memory)
    437  1.3  riastrad 			ret = bo->bdev->driver->access_memory(
    438  1.3  riastrad 				bo, offset, buf, len, write);
    439  1.3  riastrad 		else
    440  1.3  riastrad 			ret = -EIO;
    441  1.3  riastrad 	}
    442  1.3  riastrad 
    443  1.3  riastrad 	ttm_bo_unreserve(bo);
    444  1.3  riastrad 
    445  1.3  riastrad 	return ret;
    446  1.3  riastrad }
    447  1.3  riastrad EXPORT_SYMBOL(ttm_bo_vm_access);
    448  1.1  riastrad 
    449  1.1  riastrad static const struct vm_operations_struct ttm_bo_vm_ops = {
    450  1.1  riastrad 	.fault = ttm_bo_vm_fault,
    451  1.1  riastrad 	.open = ttm_bo_vm_open,
    452  1.3  riastrad 	.close = ttm_bo_vm_close,
    453  1.3  riastrad 	.access = ttm_bo_vm_access
    454  1.1  riastrad };
    455  1.1  riastrad 
    456  1.2  riastrad static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
    457  1.2  riastrad 						  unsigned long offset,
    458  1.2  riastrad 						  unsigned long pages)
    459  1.2  riastrad {
    460  1.2  riastrad 	struct drm_vma_offset_node *node;
    461  1.2  riastrad 	struct ttm_buffer_object *bo = NULL;
    462  1.2  riastrad 
    463  1.3  riastrad 	drm_vma_offset_lock_lookup(bdev->vma_manager);
    464  1.2  riastrad 
    465  1.3  riastrad 	node = drm_vma_offset_lookup_locked(bdev->vma_manager, offset, pages);
    466  1.2  riastrad 	if (likely(node)) {
    467  1.3  riastrad 		bo = container_of(node, struct ttm_buffer_object,
    468  1.3  riastrad 				  base.vma_node);
    469  1.3  riastrad 		bo = ttm_bo_get_unless_zero(bo);
    470  1.2  riastrad 	}
    471  1.2  riastrad 
    472  1.3  riastrad 	drm_vma_offset_unlock_lookup(bdev->vma_manager);
    473  1.2  riastrad 
    474  1.2  riastrad 	if (!bo)
    475  1.2  riastrad 		pr_err("Could not find buffer object to map\n");
    476  1.2  riastrad 
    477  1.2  riastrad 	return bo;
    478  1.2  riastrad }
    479  1.2  riastrad 
    480  1.3  riastrad static void ttm_bo_mmap_vma_setup(struct ttm_buffer_object *bo, struct vm_area_struct *vma)
    481  1.3  riastrad {
    482  1.3  riastrad 	vma->vm_ops = &ttm_bo_vm_ops;
    483  1.3  riastrad 
    484  1.3  riastrad 	/*
    485  1.3  riastrad 	 * Note: We're transferring the bo reference to
    486  1.3  riastrad 	 * vma->vm_private_data here.
    487  1.3  riastrad 	 */
    488  1.3  riastrad 
    489  1.3  riastrad 	vma->vm_private_data = bo;
    490  1.3  riastrad 
    491  1.3  riastrad 	/*
    492  1.3  riastrad 	 * We'd like to use VM_PFNMAP on shared mappings, where
    493  1.3  riastrad 	 * (vma->vm_flags & VM_SHARED) != 0, for performance reasons,
    494  1.3  riastrad 	 * but for some reason VM_PFNMAP + x86 PAT + write-combine is very
    495  1.3  riastrad 	 * bad for performance. Until that has been sorted out, use
    496  1.3  riastrad 	 * VM_MIXEDMAP on all mappings. See freedesktop.org bug #75719
    497  1.3  riastrad 	 */
    498  1.3  riastrad 	vma->vm_flags |= VM_MIXEDMAP;
    499  1.3  riastrad 	vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
    500  1.3  riastrad }
    501  1.3  riastrad 
    502  1.1  riastrad int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
    503  1.1  riastrad 		struct ttm_bo_device *bdev)
    504  1.1  riastrad {
    505  1.1  riastrad 	struct ttm_bo_driver *driver;
    506  1.1  riastrad 	struct ttm_buffer_object *bo;
    507  1.1  riastrad 	int ret;
    508  1.1  riastrad 
    509  1.3  riastrad 	if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET_START))
    510  1.3  riastrad 		return -EINVAL;
    511  1.3  riastrad 
    512  1.2  riastrad 	bo = ttm_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma));
    513  1.2  riastrad 	if (unlikely(!bo))
    514  1.1  riastrad 		return -EINVAL;
    515  1.1  riastrad 
    516  1.1  riastrad 	driver = bo->bdev->driver;
    517  1.1  riastrad 	if (unlikely(!driver->verify_access)) {
    518  1.1  riastrad 		ret = -EPERM;
    519  1.1  riastrad 		goto out_unref;
    520  1.1  riastrad 	}
    521  1.1  riastrad 	ret = driver->verify_access(bo, filp);
    522  1.1  riastrad 	if (unlikely(ret != 0))
    523  1.1  riastrad 		goto out_unref;
    524  1.1  riastrad 
    525  1.3  riastrad 	ttm_bo_mmap_vma_setup(bo, vma);
    526  1.1  riastrad 	return 0;
    527  1.1  riastrad out_unref:
    528  1.3  riastrad 	ttm_bo_put(bo);
    529  1.1  riastrad 	return ret;
    530  1.1  riastrad }
    531  1.1  riastrad EXPORT_SYMBOL(ttm_bo_mmap);
    532  1.1  riastrad 
    533  1.3  riastrad int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
    534  1.1  riastrad {
    535  1.3  riastrad 	ttm_bo_get(bo);
    536  1.3  riastrad 	ttm_bo_mmap_vma_setup(bo, vma);
    537  1.1  riastrad 	return 0;
    538  1.1  riastrad }
    539  1.3  riastrad EXPORT_SYMBOL(ttm_bo_mmap_obj);
    540