Home | History | Annotate | Line # | Download | only in vmwgfx
      1  1.1  riastrad /*	$NetBSD: vmwgfx_page_dirty.c,v 1.2 2021/12/18 23:45:45 riastradh Exp $	*/
      2  1.1  riastrad 
      3  1.1  riastrad // SPDX-License-Identifier: GPL-2.0 OR MIT
      4  1.1  riastrad /**************************************************************************
      5  1.1  riastrad  *
      6  1.1  riastrad  * Copyright 2019 VMware, Inc., Palo Alto, CA., USA
      7  1.1  riastrad  *
      8  1.1  riastrad  * Permission is hereby granted, free of charge, to any person obtaining a
      9  1.1  riastrad  * copy of this software and associated documentation files (the
     10  1.1  riastrad  * "Software"), to deal in the Software without restriction, including
     11  1.1  riastrad  * without limitation the rights to use, copy, modify, merge, publish,
     12  1.1  riastrad  * distribute, sub license, and/or sell copies of the Software, and to
     13  1.1  riastrad  * permit persons to whom the Software is furnished to do so, subject to
     14  1.1  riastrad  * the following conditions:
     15  1.1  riastrad  *
     16  1.1  riastrad  * The above copyright notice and this permission notice (including the
     17  1.1  riastrad  * next paragraph) shall be included in all copies or substantial portions
     18  1.1  riastrad  * of the Software.
     19  1.1  riastrad  *
     20  1.1  riastrad  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     21  1.1  riastrad  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     22  1.1  riastrad  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
     23  1.1  riastrad  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
     24  1.1  riastrad  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
     25  1.1  riastrad  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     26  1.1  riastrad  * USE OR OTHER DEALINGS IN THE SOFTWARE.
     27  1.1  riastrad  *
     28  1.1  riastrad  **************************************************************************/
     29  1.1  riastrad #include <sys/cdefs.h>
     30  1.1  riastrad __KERNEL_RCSID(0, "$NetBSD: vmwgfx_page_dirty.c,v 1.2 2021/12/18 23:45:45 riastradh Exp $");
     31  1.1  riastrad 
     32  1.1  riastrad #include "vmwgfx_drv.h"
     33  1.1  riastrad 
     34  1.1  riastrad /*
     35  1.1  riastrad  * Different methods for tracking dirty:
     36  1.1  riastrad  * VMW_BO_DIRTY_PAGETABLE - Scan the pagetable for hardware dirty bits
     37  1.1  riastrad  * VMW_BO_DIRTY_MKWRITE - Write-protect page table entries and record write-
     38  1.1  riastrad  * accesses in the VM mkwrite() callback
     39  1.1  riastrad  */
     40  1.1  riastrad enum vmw_bo_dirty_method {
     41  1.1  riastrad 	VMW_BO_DIRTY_PAGETABLE,
     42  1.1  riastrad 	VMW_BO_DIRTY_MKWRITE,
     43  1.1  riastrad };
     44  1.1  riastrad 
     45  1.1  riastrad /*
     46  1.1  riastrad  * No dirtied pages at scan trigger a transition to the _MKWRITE method,
     47  1.1  riastrad  * similarly a certain percentage of dirty pages trigger a transition to
     48  1.1  riastrad  * the _PAGETABLE method. How many triggers should we wait for before
     49  1.1  riastrad  * changing method?
     50  1.1  riastrad  */
     51  1.1  riastrad #define VMW_DIRTY_NUM_CHANGE_TRIGGERS 2
     52  1.1  riastrad 
     53  1.1  riastrad /* Percentage to trigger a transition to the _PAGETABLE method */
     54  1.1  riastrad #define VMW_DIRTY_PERCENTAGE 10
     55  1.1  riastrad 
     56  1.1  riastrad /**
     57  1.1  riastrad  * struct vmw_bo_dirty - Dirty information for buffer objects
     58  1.1  riastrad  * @start: First currently dirty bit
     59  1.1  riastrad  * @end: Last currently dirty bit + 1
     60  1.1  riastrad  * @method: The currently used dirty method
     61  1.1  riastrad  * @change_count: Number of consecutive method change triggers
     62  1.1  riastrad  * @ref_count: Reference count for this structure
     63  1.1  riastrad  * @bitmap_size: The size of the bitmap in bits. Typically equal to the
     64  1.1  riastrad  * nuber of pages in the bo.
     65  1.1  riastrad  * @size: The accounting size for this struct.
     66  1.1  riastrad  * @bitmap: A bitmap where each bit represents a page. A set bit means a
     67  1.1  riastrad  * dirty page.
     68  1.1  riastrad  */
     69  1.1  riastrad struct vmw_bo_dirty {
     70  1.1  riastrad 	unsigned long start;
     71  1.1  riastrad 	unsigned long end;
     72  1.1  riastrad 	enum vmw_bo_dirty_method method;
     73  1.1  riastrad 	unsigned int change_count;
     74  1.1  riastrad 	unsigned int ref_count;
     75  1.1  riastrad 	unsigned long bitmap_size;
     76  1.1  riastrad 	size_t size;
     77  1.1  riastrad 	unsigned long bitmap[0];
     78  1.1  riastrad };
     79  1.1  riastrad 
     80  1.1  riastrad /**
     81  1.1  riastrad  * vmw_bo_dirty_scan_pagetable - Perform a pagetable scan for dirty bits
     82  1.1  riastrad  * @vbo: The buffer object to scan
     83  1.1  riastrad  *
     84  1.1  riastrad  * Scans the pagetable for dirty bits. Clear those bits and modify the
     85  1.1  riastrad  * dirty structure with the results. This function may change the
     86  1.1  riastrad  * dirty-tracking method.
     87  1.1  riastrad  */
     88  1.1  riastrad static void vmw_bo_dirty_scan_pagetable(struct vmw_buffer_object *vbo)
     89  1.1  riastrad {
     90  1.1  riastrad 	struct vmw_bo_dirty *dirty = vbo->dirty;
     91  1.1  riastrad 	pgoff_t offset = drm_vma_node_start(&vbo->base.base.vma_node);
     92  1.1  riastrad 	struct address_space *mapping = vbo->base.bdev->dev_mapping;
     93  1.1  riastrad 	pgoff_t num_marked;
     94  1.1  riastrad 
     95  1.1  riastrad 	num_marked = clean_record_shared_mapping_range
     96  1.1  riastrad 		(mapping,
     97  1.1  riastrad 		 offset, dirty->bitmap_size,
     98  1.1  riastrad 		 offset, &dirty->bitmap[0],
     99  1.1  riastrad 		 &dirty->start, &dirty->end);
    100  1.1  riastrad 	if (num_marked == 0)
    101  1.1  riastrad 		dirty->change_count++;
    102  1.1  riastrad 	else
    103  1.1  riastrad 		dirty->change_count = 0;
    104  1.1  riastrad 
    105  1.1  riastrad 	if (dirty->change_count > VMW_DIRTY_NUM_CHANGE_TRIGGERS) {
    106  1.1  riastrad 		dirty->change_count = 0;
    107  1.1  riastrad 		dirty->method = VMW_BO_DIRTY_MKWRITE;
    108  1.1  riastrad 		wp_shared_mapping_range(mapping,
    109  1.1  riastrad 					offset, dirty->bitmap_size);
    110  1.1  riastrad 		clean_record_shared_mapping_range(mapping,
    111  1.1  riastrad 						  offset, dirty->bitmap_size,
    112  1.1  riastrad 						  offset, &dirty->bitmap[0],
    113  1.1  riastrad 						  &dirty->start, &dirty->end);
    114  1.1  riastrad 	}
    115  1.1  riastrad }
    116  1.1  riastrad 
    117  1.1  riastrad /**
    118  1.1  riastrad  * vmw_bo_dirty_scan_mkwrite - Reset the mkwrite dirty-tracking method
    119  1.1  riastrad  * @vbo: The buffer object to scan
    120  1.1  riastrad  *
    121  1.1  riastrad  * Write-protect pages written to so that consecutive write accesses will
    122  1.1  riastrad  * trigger a call to mkwrite.
    123  1.1  riastrad  *
    124  1.1  riastrad  * This function may change the dirty-tracking method.
    125  1.1  riastrad  */
    126  1.1  riastrad static void vmw_bo_dirty_scan_mkwrite(struct vmw_buffer_object *vbo)
    127  1.1  riastrad {
    128  1.1  riastrad 	struct vmw_bo_dirty *dirty = vbo->dirty;
    129  1.1  riastrad 	unsigned long offset = drm_vma_node_start(&vbo->base.base.vma_node);
    130  1.1  riastrad 	struct address_space *mapping = vbo->base.bdev->dev_mapping;
    131  1.1  riastrad 	pgoff_t num_marked;
    132  1.1  riastrad 
    133  1.1  riastrad 	if (dirty->end <= dirty->start)
    134  1.1  riastrad 		return;
    135  1.1  riastrad 
    136  1.1  riastrad 	num_marked = wp_shared_mapping_range(vbo->base.bdev->dev_mapping,
    137  1.1  riastrad 					dirty->start + offset,
    138  1.1  riastrad 					dirty->end - dirty->start);
    139  1.1  riastrad 
    140  1.1  riastrad 	if (100UL * num_marked / dirty->bitmap_size >
    141  1.1  riastrad 	    VMW_DIRTY_PERCENTAGE) {
    142  1.1  riastrad 		dirty->change_count++;
    143  1.1  riastrad 	} else {
    144  1.1  riastrad 		dirty->change_count = 0;
    145  1.1  riastrad 	}
    146  1.1  riastrad 
    147  1.1  riastrad 	if (dirty->change_count > VMW_DIRTY_NUM_CHANGE_TRIGGERS) {
    148  1.1  riastrad 		pgoff_t start = 0;
    149  1.1  riastrad 		pgoff_t end = dirty->bitmap_size;
    150  1.1  riastrad 
    151  1.1  riastrad 		dirty->method = VMW_BO_DIRTY_PAGETABLE;
    152  1.1  riastrad 		clean_record_shared_mapping_range(mapping, offset, end, offset,
    153  1.1  riastrad 						  &dirty->bitmap[0],
    154  1.1  riastrad 						  &start, &end);
    155  1.1  riastrad 		bitmap_clear(&dirty->bitmap[0], 0, dirty->bitmap_size);
    156  1.1  riastrad 		if (dirty->start < dirty->end)
    157  1.1  riastrad 			bitmap_set(&dirty->bitmap[0], dirty->start,
    158  1.1  riastrad 				   dirty->end - dirty->start);
    159  1.1  riastrad 		dirty->change_count = 0;
    160  1.1  riastrad 	}
    161  1.1  riastrad }
    162  1.1  riastrad 
    163  1.1  riastrad /**
    164  1.1  riastrad  * vmw_bo_dirty_scan - Scan for dirty pages and add them to the dirty
    165  1.1  riastrad  * tracking structure
    166  1.1  riastrad  * @vbo: The buffer object to scan
    167  1.1  riastrad  *
    168  1.1  riastrad  * This function may change the dirty tracking method.
    169  1.1  riastrad  */
    170  1.1  riastrad void vmw_bo_dirty_scan(struct vmw_buffer_object *vbo)
    171  1.1  riastrad {
    172  1.1  riastrad 	struct vmw_bo_dirty *dirty = vbo->dirty;
    173  1.1  riastrad 
    174  1.1  riastrad 	if (dirty->method == VMW_BO_DIRTY_PAGETABLE)
    175  1.1  riastrad 		vmw_bo_dirty_scan_pagetable(vbo);
    176  1.1  riastrad 	else
    177  1.1  riastrad 		vmw_bo_dirty_scan_mkwrite(vbo);
    178  1.1  riastrad }
    179  1.1  riastrad 
    180  1.1  riastrad /**
    181  1.1  riastrad  * vmw_bo_dirty_pre_unmap - write-protect and pick up dirty pages before
    182  1.1  riastrad  * an unmap_mapping_range operation.
    183  1.1  riastrad  * @vbo: The buffer object,
    184  1.1  riastrad  * @start: First page of the range within the buffer object.
    185  1.1  riastrad  * @end: Last page of the range within the buffer object + 1.
    186  1.1  riastrad  *
    187  1.1  riastrad  * If we're using the _PAGETABLE scan method, we may leak dirty pages
    188  1.1  riastrad  * when calling unmap_mapping_range(). This function makes sure we pick
    189  1.1  riastrad  * up all dirty pages.
    190  1.1  riastrad  */
    191  1.1  riastrad static void vmw_bo_dirty_pre_unmap(struct vmw_buffer_object *vbo,
    192  1.1  riastrad 				   pgoff_t start, pgoff_t end)
    193  1.1  riastrad {
    194  1.1  riastrad 	struct vmw_bo_dirty *dirty = vbo->dirty;
    195  1.1  riastrad 	unsigned long offset = drm_vma_node_start(&vbo->base.base.vma_node);
    196  1.1  riastrad 	struct address_space *mapping = vbo->base.bdev->dev_mapping;
    197  1.1  riastrad 
    198  1.1  riastrad 	if (dirty->method != VMW_BO_DIRTY_PAGETABLE || start >= end)
    199  1.1  riastrad 		return;
    200  1.1  riastrad 
    201  1.1  riastrad 	wp_shared_mapping_range(mapping, start + offset, end - start);
    202  1.1  riastrad 	clean_record_shared_mapping_range(mapping, start + offset,
    203  1.1  riastrad 					  end - start, offset,
    204  1.1  riastrad 					  &dirty->bitmap[0], &dirty->start,
    205  1.1  riastrad 					  &dirty->end);
    206  1.1  riastrad }
    207  1.1  riastrad 
    208  1.1  riastrad /**
    209  1.1  riastrad  * vmw_bo_dirty_unmap - Clear all ptes pointing to a range within a bo
    210  1.1  riastrad  * @vbo: The buffer object,
    211  1.1  riastrad  * @start: First page of the range within the buffer object.
    212  1.1  riastrad  * @end: Last page of the range within the buffer object + 1.
    213  1.1  riastrad  *
    214  1.1  riastrad  * This is similar to ttm_bo_unmap_virtual_locked() except it takes a subrange.
    215  1.1  riastrad  */
    216  1.1  riastrad void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo,
    217  1.1  riastrad 			pgoff_t start, pgoff_t end)
    218  1.1  riastrad {
    219  1.1  riastrad 	unsigned long offset = drm_vma_node_start(&vbo->base.base.vma_node);
    220  1.1  riastrad 	struct address_space *mapping = vbo->base.bdev->dev_mapping;
    221  1.1  riastrad 
    222  1.1  riastrad 	vmw_bo_dirty_pre_unmap(vbo, start, end);
    223  1.1  riastrad 	unmap_shared_mapping_range(mapping, (offset + start) << PAGE_SHIFT,
    224  1.1  riastrad 				   (loff_t) (end - start) << PAGE_SHIFT);
    225  1.1  riastrad }
    226  1.1  riastrad 
    227  1.1  riastrad /**
    228  1.1  riastrad  * vmw_bo_dirty_add - Add a dirty-tracking user to a buffer object
    229  1.1  riastrad  * @vbo: The buffer object
    230  1.1  riastrad  *
    231  1.1  riastrad  * This function registers a dirty-tracking user to a buffer object.
    232  1.1  riastrad  * A user can be for example a resource or a vma in a special user-space
    233  1.1  riastrad  * mapping.
    234  1.1  riastrad  *
    235  1.1  riastrad  * Return: Zero on success, -ENOMEM on memory allocation failure.
    236  1.1  riastrad  */
    237  1.1  riastrad int vmw_bo_dirty_add(struct vmw_buffer_object *vbo)
    238  1.1  riastrad {
    239  1.1  riastrad 	struct vmw_bo_dirty *dirty = vbo->dirty;
    240  1.1  riastrad 	pgoff_t num_pages = vbo->base.num_pages;
    241  1.1  riastrad 	size_t size, acc_size;
    242  1.1  riastrad 	int ret;
    243  1.1  riastrad 	static struct ttm_operation_ctx ctx = {
    244  1.1  riastrad 		.interruptible = false,
    245  1.1  riastrad 		.no_wait_gpu = false
    246  1.1  riastrad 	};
    247  1.1  riastrad 
    248  1.1  riastrad 	if (dirty) {
    249  1.1  riastrad 		dirty->ref_count++;
    250  1.1  riastrad 		return 0;
    251  1.1  riastrad 	}
    252  1.1  riastrad 
    253  1.1  riastrad 	size = sizeof(*dirty) + BITS_TO_LONGS(num_pages) * sizeof(long);
    254  1.1  riastrad 	acc_size = ttm_round_pot(size);
    255  1.1  riastrad 	ret = ttm_mem_global_alloc(&ttm_mem_glob, acc_size, &ctx);
    256  1.1  riastrad 	if (ret) {
    257  1.1  riastrad 		VMW_DEBUG_USER("Out of graphics memory for buffer object "
    258  1.1  riastrad 			       "dirty tracker.\n");
    259  1.1  riastrad 		return ret;
    260  1.1  riastrad 	}
    261  1.1  riastrad 	dirty = kvzalloc(size, GFP_KERNEL);
    262  1.1  riastrad 	if (!dirty) {
    263  1.1  riastrad 		ret = -ENOMEM;
    264  1.1  riastrad 		goto out_no_dirty;
    265  1.1  riastrad 	}
    266  1.1  riastrad 
    267  1.1  riastrad 	dirty->size = acc_size;
    268  1.1  riastrad 	dirty->bitmap_size = num_pages;
    269  1.1  riastrad 	dirty->start = dirty->bitmap_size;
    270  1.1  riastrad 	dirty->end = 0;
    271  1.1  riastrad 	dirty->ref_count = 1;
    272  1.1  riastrad 	if (num_pages < PAGE_SIZE / sizeof(pte_t)) {
    273  1.1  riastrad 		dirty->method = VMW_BO_DIRTY_PAGETABLE;
    274  1.1  riastrad 	} else {
    275  1.1  riastrad 		struct address_space *mapping = vbo->base.bdev->dev_mapping;
    276  1.1  riastrad 		pgoff_t offset = drm_vma_node_start(&vbo->base.base.vma_node);
    277  1.1  riastrad 
    278  1.1  riastrad 		dirty->method = VMW_BO_DIRTY_MKWRITE;
    279  1.1  riastrad 
    280  1.1  riastrad 		/* Write-protect and then pick up already dirty bits */
    281  1.1  riastrad 		wp_shared_mapping_range(mapping, offset, num_pages);
    282  1.1  riastrad 		clean_record_shared_mapping_range(mapping, offset, num_pages,
    283  1.1  riastrad 						  offset,
    284  1.1  riastrad 						  &dirty->bitmap[0],
    285  1.1  riastrad 						  &dirty->start, &dirty->end);
    286  1.1  riastrad 	}
    287  1.1  riastrad 
    288  1.1  riastrad 	vbo->dirty = dirty;
    289  1.1  riastrad 
    290  1.1  riastrad 	return 0;
    291  1.1  riastrad 
    292  1.1  riastrad out_no_dirty:
    293  1.1  riastrad 	ttm_mem_global_free(&ttm_mem_glob, acc_size);
    294  1.1  riastrad 	return ret;
    295  1.1  riastrad }
    296  1.1  riastrad 
    297  1.1  riastrad /**
    298  1.1  riastrad  * vmw_bo_dirty_release - Release a dirty-tracking user from a buffer object
    299  1.1  riastrad  * @vbo: The buffer object
    300  1.1  riastrad  *
    301  1.1  riastrad  * This function releases a dirty-tracking user from a buffer object.
    302  1.1  riastrad  * If the reference count reaches zero, then the dirty-tracking object is
    303  1.1  riastrad  * freed and the pointer to it cleared.
    304  1.1  riastrad  *
    305  1.1  riastrad  * Return: Zero on success, -ENOMEM on memory allocation failure.
    306  1.1  riastrad  */
    307  1.1  riastrad void vmw_bo_dirty_release(struct vmw_buffer_object *vbo)
    308  1.1  riastrad {
    309  1.1  riastrad 	struct vmw_bo_dirty *dirty = vbo->dirty;
    310  1.1  riastrad 
    311  1.1  riastrad 	if (dirty && --dirty->ref_count == 0) {
    312  1.1  riastrad 		size_t acc_size = dirty->size;
    313  1.1  riastrad 
    314  1.1  riastrad 		kvfree(dirty);
    315  1.1  riastrad 		ttm_mem_global_free(&ttm_mem_glob, acc_size);
    316  1.1  riastrad 		vbo->dirty = NULL;
    317  1.1  riastrad 	}
    318  1.1  riastrad }
    319  1.1  riastrad 
    320  1.1  riastrad /**
    321  1.1  riastrad  * vmw_bo_dirty_transfer_to_res - Pick up a resource's dirty region from
    322  1.1  riastrad  * its backing mob.
    323  1.1  riastrad  * @res: The resource
    324  1.1  riastrad  *
    325  1.1  riastrad  * This function will pick up all dirty ranges affecting the resource from
    326  1.1  riastrad  * it's backup mob, and call vmw_resource_dirty_update() once for each
    327  1.1  riastrad  * range. The transferred ranges will be cleared from the backing mob's
    328  1.1  riastrad  * dirty tracking.
    329  1.1  riastrad  */
    330  1.1  riastrad void vmw_bo_dirty_transfer_to_res(struct vmw_resource *res)
    331  1.1  riastrad {
    332  1.1  riastrad 	struct vmw_buffer_object *vbo = res->backup;
    333  1.1  riastrad 	struct vmw_bo_dirty *dirty = vbo->dirty;
    334  1.1  riastrad 	pgoff_t start, cur, end;
    335  1.1  riastrad 	unsigned long res_start = res->backup_offset;
    336  1.1  riastrad 	unsigned long res_end = res->backup_offset + res->backup_size;
    337  1.1  riastrad 
    338  1.1  riastrad 	WARN_ON_ONCE(res_start & ~PAGE_MASK);
    339  1.1  riastrad 	res_start >>= PAGE_SHIFT;
    340  1.1  riastrad 	res_end = DIV_ROUND_UP(res_end, PAGE_SIZE);
    341  1.1  riastrad 
    342  1.1  riastrad 	if (res_start >= dirty->end || res_end <= dirty->start)
    343  1.1  riastrad 		return;
    344  1.1  riastrad 
    345  1.1  riastrad 	cur = max(res_start, dirty->start);
    346  1.1  riastrad 	res_end = max(res_end, dirty->end);
    347  1.1  riastrad 	while (cur < res_end) {
    348  1.1  riastrad 		unsigned long num;
    349  1.1  riastrad 
    350  1.1  riastrad 		start = find_next_bit(&dirty->bitmap[0], res_end, cur);
    351  1.1  riastrad 		if (start >= res_end)
    352  1.1  riastrad 			break;
    353  1.1  riastrad 
    354  1.1  riastrad 		end = find_next_zero_bit(&dirty->bitmap[0], res_end, start + 1);
    355  1.1  riastrad 		cur = end + 1;
    356  1.1  riastrad 		num = end - start;
    357  1.1  riastrad 		bitmap_clear(&dirty->bitmap[0], start, num);
    358  1.1  riastrad 		vmw_resource_dirty_update(res, start, end);
    359  1.1  riastrad 	}
    360  1.1  riastrad 
    361  1.1  riastrad 	if (res_start <= dirty->start && res_end > dirty->start)
    362  1.1  riastrad 		dirty->start = res_end;
    363  1.1  riastrad 	if (res_start < dirty->end && res_end >= dirty->end)
    364  1.1  riastrad 		dirty->end = res_start;
    365  1.1  riastrad }
    366  1.1  riastrad 
    367  1.1  riastrad /**
    368  1.1  riastrad  * vmw_bo_dirty_clear_res - Clear a resource's dirty region from
    369  1.1  riastrad  * its backing mob.
    370  1.1  riastrad  * @res: The resource
    371  1.1  riastrad  *
    372  1.1  riastrad  * This function will clear all dirty ranges affecting the resource from
    373  1.1  riastrad  * it's backup mob's dirty tracking.
    374  1.1  riastrad  */
    375  1.1  riastrad void vmw_bo_dirty_clear_res(struct vmw_resource *res)
    376  1.1  riastrad {
    377  1.1  riastrad 	unsigned long res_start = res->backup_offset;
    378  1.1  riastrad 	unsigned long res_end = res->backup_offset + res->backup_size;
    379  1.1  riastrad 	struct vmw_buffer_object *vbo = res->backup;
    380  1.1  riastrad 	struct vmw_bo_dirty *dirty = vbo->dirty;
    381  1.1  riastrad 
    382  1.1  riastrad 	res_start >>= PAGE_SHIFT;
    383  1.1  riastrad 	res_end = DIV_ROUND_UP(res_end, PAGE_SIZE);
    384  1.1  riastrad 
    385  1.1  riastrad 	if (res_start >= dirty->end || res_end <= dirty->start)
    386  1.1  riastrad 		return;
    387  1.1  riastrad 
    388  1.1  riastrad 	res_start = max(res_start, dirty->start);
    389  1.1  riastrad 	res_end = min(res_end, dirty->end);
    390  1.1  riastrad 	bitmap_clear(&dirty->bitmap[0], res_start, res_end - res_start);
    391  1.1  riastrad 
    392  1.1  riastrad 	if (res_start <= dirty->start && res_end > dirty->start)
    393  1.1  riastrad 		dirty->start = res_end;
    394  1.1  riastrad 	if (res_start < dirty->end && res_end >= dirty->end)
    395  1.1  riastrad 		dirty->end = res_start;
    396  1.1  riastrad }
    397  1.1  riastrad 
    398  1.1  riastrad vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf)
    399  1.1  riastrad {
    400  1.1  riastrad 	struct vm_area_struct *vma = vmf->vma;
    401  1.1  riastrad 	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
    402  1.1  riastrad 	    vma->vm_private_data;
    403  1.1  riastrad 	vm_fault_t ret;
    404  1.1  riastrad 	unsigned long page_offset;
    405  1.1  riastrad 	unsigned int save_flags;
    406  1.1  riastrad 	struct vmw_buffer_object *vbo =
    407  1.1  riastrad 		container_of(bo, typeof(*vbo), base);
    408  1.1  riastrad 
    409  1.1  riastrad 	/*
    410  1.1  riastrad 	 * mkwrite() doesn't handle the VM_FAULT_RETRY return value correctly.
    411  1.1  riastrad 	 * So make sure the TTM helpers are aware.
    412  1.1  riastrad 	 */
    413  1.1  riastrad 	save_flags = vmf->flags;
    414  1.1  riastrad 	vmf->flags &= ~FAULT_FLAG_ALLOW_RETRY;
    415  1.1  riastrad 	ret = ttm_bo_vm_reserve(bo, vmf);
    416  1.1  riastrad 	vmf->flags = save_flags;
    417  1.1  riastrad 	if (ret)
    418  1.1  riastrad 		return ret;
    419  1.1  riastrad 
    420  1.1  riastrad 	page_offset = vmf->pgoff - drm_vma_node_start(&bo->base.vma_node);
    421  1.1  riastrad 	if (unlikely(page_offset >= bo->num_pages)) {
    422  1.1  riastrad 		ret = VM_FAULT_SIGBUS;
    423  1.1  riastrad 		goto out_unlock;
    424  1.1  riastrad 	}
    425  1.1  riastrad 
    426  1.1  riastrad 	if (vbo->dirty && vbo->dirty->method == VMW_BO_DIRTY_MKWRITE &&
    427  1.1  riastrad 	    !test_bit(page_offset, &vbo->dirty->bitmap[0])) {
    428  1.1  riastrad 		struct vmw_bo_dirty *dirty = vbo->dirty;
    429  1.1  riastrad 
    430  1.1  riastrad 		__set_bit(page_offset, &dirty->bitmap[0]);
    431  1.1  riastrad 		dirty->start = min(dirty->start, page_offset);
    432  1.1  riastrad 		dirty->end = max(dirty->end, page_offset + 1);
    433  1.1  riastrad 	}
    434  1.1  riastrad 
    435  1.1  riastrad out_unlock:
    436  1.1  riastrad 	dma_resv_unlock(bo->base.resv);
    437  1.1  riastrad 	return ret;
    438  1.1  riastrad }
    439  1.1  riastrad 
    440  1.1  riastrad vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf)
    441  1.1  riastrad {
    442  1.1  riastrad 	struct vm_area_struct *vma = vmf->vma;
    443  1.1  riastrad 	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
    444  1.1  riastrad 	    vma->vm_private_data;
    445  1.1  riastrad 	struct vmw_buffer_object *vbo =
    446  1.1  riastrad 		container_of(bo, struct vmw_buffer_object, base);
    447  1.1  riastrad 	pgoff_t num_prefault;
    448  1.1  riastrad 	pgprot_t prot;
    449  1.1  riastrad 	vm_fault_t ret;
    450  1.1  riastrad 
    451  1.1  riastrad 	ret = ttm_bo_vm_reserve(bo, vmf);
    452  1.1  riastrad 	if (ret)
    453  1.1  riastrad 		return ret;
    454  1.1  riastrad 
    455  1.1  riastrad 	num_prefault = (vma->vm_flags & VM_RAND_READ) ? 1 :
    456  1.1  riastrad 		TTM_BO_VM_NUM_PREFAULT;
    457  1.1  riastrad 
    458  1.1  riastrad 	if (vbo->dirty) {
    459  1.1  riastrad 		pgoff_t allowed_prefault;
    460  1.1  riastrad 		unsigned long page_offset;
    461  1.1  riastrad 
    462  1.1  riastrad 		page_offset = vmf->pgoff -
    463  1.1  riastrad 			drm_vma_node_start(&bo->base.vma_node);
    464  1.1  riastrad 		if (page_offset >= bo->num_pages ||
    465  1.1  riastrad 		    vmw_resources_clean(vbo, page_offset,
    466  1.1  riastrad 					page_offset + PAGE_SIZE,
    467  1.1  riastrad 					&allowed_prefault)) {
    468  1.1  riastrad 			ret = VM_FAULT_SIGBUS;
    469  1.1  riastrad 			goto out_unlock;
    470  1.1  riastrad 		}
    471  1.1  riastrad 
    472  1.1  riastrad 		num_prefault = min(num_prefault, allowed_prefault);
    473  1.1  riastrad 	}
    474  1.1  riastrad 
    475  1.1  riastrad 	/*
    476  1.1  riastrad 	 * If we don't track dirty using the MKWRITE method, make sure
    477  1.1  riastrad 	 * sure the page protection is write-enabled so we don't get
    478  1.1  riastrad 	 * a lot of unnecessary write faults.
    479  1.1  riastrad 	 */
    480  1.1  riastrad 	if (vbo->dirty && vbo->dirty->method == VMW_BO_DIRTY_MKWRITE)
    481  1.1  riastrad 		prot = vma->vm_page_prot;
    482  1.1  riastrad 	else
    483  1.1  riastrad 		prot = vm_get_page_prot(vma->vm_flags);
    484  1.1  riastrad 
    485  1.1  riastrad 	ret = ttm_bo_vm_fault_reserved(vmf, prot, num_prefault);
    486  1.1  riastrad 	if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
    487  1.1  riastrad 		return ret;
    488  1.1  riastrad 
    489  1.1  riastrad out_unlock:
    490  1.1  riastrad 	dma_resv_unlock(bo->base.resv);
    491  1.1  riastrad 
    492  1.1  riastrad 	return ret;
    493  1.1  riastrad }
    494