Home | History | Annotate | Line # | Download | only in uvm
uvm_page_array.c revision 1.1.2.3
      1  1.1.2.3  yamt /*	$NetBSD: uvm_page_array.c,v 1.1.2.3 2011/11/26 15:19:06 yamt Exp $	*/
      2  1.1.2.1  yamt 
      3  1.1.2.1  yamt /*-
      4  1.1.2.1  yamt  * Copyright (c)2011 YAMAMOTO Takashi,
      5  1.1.2.1  yamt  * All rights reserved.
      6  1.1.2.1  yamt  *
      7  1.1.2.1  yamt  * Redistribution and use in source and binary forms, with or without
      8  1.1.2.1  yamt  * modification, are permitted provided that the following conditions
      9  1.1.2.1  yamt  * are met:
     10  1.1.2.1  yamt  * 1. Redistributions of source code must retain the above copyright
     11  1.1.2.1  yamt  *    notice, this list of conditions and the following disclaimer.
     12  1.1.2.1  yamt  * 2. Redistributions in binary form must reproduce the above copyright
     13  1.1.2.1  yamt  *    notice, this list of conditions and the following disclaimer in the
     14  1.1.2.1  yamt  *    documentation and/or other materials provided with the distribution.
     15  1.1.2.1  yamt  *
     16  1.1.2.1  yamt  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
     17  1.1.2.1  yamt  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     18  1.1.2.1  yamt  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     19  1.1.2.1  yamt  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     20  1.1.2.1  yamt  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     21  1.1.2.1  yamt  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     22  1.1.2.1  yamt  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     23  1.1.2.1  yamt  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     24  1.1.2.1  yamt  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     25  1.1.2.1  yamt  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     26  1.1.2.1  yamt  * SUCH DAMAGE.
     27  1.1.2.1  yamt  */
     28  1.1.2.1  yamt 
     29  1.1.2.1  yamt #include <sys/cdefs.h>
     30  1.1.2.3  yamt __KERNEL_RCSID(0, "$NetBSD: uvm_page_array.c,v 1.1.2.3 2011/11/26 15:19:06 yamt Exp $");
     31  1.1.2.1  yamt 
     32  1.1.2.1  yamt #include <sys/param.h>
     33  1.1.2.1  yamt #include <sys/systm.h>
     34  1.1.2.1  yamt 
     35  1.1.2.1  yamt #include <uvm/uvm_extern.h>
     36  1.1.2.1  yamt #include <uvm/uvm_object.h>
     37  1.1.2.1  yamt #include <uvm/uvm_page.h>
     38  1.1.2.1  yamt #include <uvm/uvm_page_array.h>
     39  1.1.2.1  yamt 
     40  1.1.2.1  yamt /*
     41  1.1.2.1  yamt  * uvm_page_array_init: initialize the array.
     42  1.1.2.1  yamt  */
     43  1.1.2.1  yamt 
     44  1.1.2.1  yamt void
     45  1.1.2.1  yamt uvm_page_array_init(struct uvm_page_array *ar)
     46  1.1.2.1  yamt {
     47  1.1.2.1  yamt 
     48  1.1.2.1  yamt 	ar->ar_idx = ar->ar_npages = 0;
     49  1.1.2.1  yamt }
     50  1.1.2.1  yamt 
     51  1.1.2.1  yamt /*
     52  1.1.2.1  yamt  * uvm_page_array_fini: clean up the array.
     53  1.1.2.1  yamt  */
     54  1.1.2.1  yamt 
     55  1.1.2.1  yamt void
     56  1.1.2.1  yamt uvm_page_array_fini(struct uvm_page_array *ar)
     57  1.1.2.1  yamt {
     58  1.1.2.1  yamt 
     59  1.1.2.1  yamt 	/*
     60  1.1.2.1  yamt 	 * currently nothing to do.
     61  1.1.2.1  yamt 	 */
     62  1.1.2.1  yamt #if defined(DIAGNOSTIC)
     63  1.1.2.1  yamt 	/*
     64  1.1.2.1  yamt 	 * poison to trigger assertion in uvm_page_array_peek to
     65  1.1.2.1  yamt 	 * detect usage errors.
     66  1.1.2.1  yamt 	 */
     67  1.1.2.1  yamt 	ar->ar_npages = 1;
     68  1.1.2.1  yamt 	ar->ar_idx = 1000;
     69  1.1.2.1  yamt #endif /* defined(DIAGNOSTIC) */
     70  1.1.2.1  yamt }
     71  1.1.2.1  yamt 
     72  1.1.2.1  yamt /*
     73  1.1.2.1  yamt  * uvm_page_array_clear: forget the cached pages and initialize the array.
     74  1.1.2.1  yamt  */
     75  1.1.2.1  yamt 
     76  1.1.2.1  yamt void
     77  1.1.2.1  yamt uvm_page_array_clear(struct uvm_page_array *ar)
     78  1.1.2.1  yamt {
     79  1.1.2.1  yamt 
     80  1.1.2.1  yamt 	KASSERT(ar->ar_idx <= ar->ar_npages);
     81  1.1.2.1  yamt 	uvm_page_array_init(ar);
     82  1.1.2.1  yamt }
     83  1.1.2.1  yamt 
     84  1.1.2.1  yamt /*
     85  1.1.2.1  yamt  * uvm_page_array_peek: return the next cached page.
     86  1.1.2.1  yamt  */
     87  1.1.2.1  yamt 
     88  1.1.2.1  yamt struct vm_page *
     89  1.1.2.1  yamt uvm_page_array_peek(struct uvm_page_array *ar)
     90  1.1.2.1  yamt {
     91  1.1.2.1  yamt 
     92  1.1.2.1  yamt 	KASSERT(ar->ar_idx <= ar->ar_npages);
     93  1.1.2.1  yamt 	if (ar->ar_idx == ar->ar_npages) {
     94  1.1.2.1  yamt 		return NULL;
     95  1.1.2.1  yamt 	}
     96  1.1.2.1  yamt 	return ar->ar_pages[ar->ar_idx];
     97  1.1.2.1  yamt }
     98  1.1.2.1  yamt 
     99  1.1.2.1  yamt /*
    100  1.1.2.1  yamt  * uvm_page_array_advance: advance the array to the next cached page
    101  1.1.2.1  yamt  */
    102  1.1.2.1  yamt 
    103  1.1.2.1  yamt void
    104  1.1.2.1  yamt uvm_page_array_advance(struct uvm_page_array *ar)
    105  1.1.2.1  yamt {
    106  1.1.2.1  yamt 
    107  1.1.2.1  yamt 	KASSERT(ar->ar_idx <= ar->ar_npages);
    108  1.1.2.1  yamt 	ar->ar_idx++;
    109  1.1.2.1  yamt 	KASSERT(ar->ar_idx <= ar->ar_npages);
    110  1.1.2.1  yamt }
    111  1.1.2.1  yamt 
    112  1.1.2.1  yamt /*
    113  1.1.2.1  yamt  * uvm_page_array_fill: lookup pages and keep them cached.
    114  1.1.2.1  yamt  *
    115  1.1.2.1  yamt  * return 0 on success.  in that case, cache the result in the array
    116  1.1.2.1  yamt  * so that they will be picked by later uvm_page_array_peek.
    117  1.1.2.1  yamt  *
    118  1.1.2.3  yamt  * nwant is a number of pages to fetch.  a caller should consider it a hint.
    119  1.1.2.3  yamt  * nwant == 0 means a caller have no specific idea.
    120  1.1.2.3  yamt  *
    121  1.1.2.1  yamt  * return ENOENT if no pages are found.
    122  1.1.2.1  yamt  *
    123  1.1.2.1  yamt  * called with object lock held.
    124  1.1.2.1  yamt  */
    125  1.1.2.1  yamt 
    126  1.1.2.1  yamt int
    127  1.1.2.1  yamt uvm_page_array_fill(struct uvm_page_array *ar, struct uvm_object *uobj,
    128  1.1.2.3  yamt     voff_t off, unsigned int nwant, unsigned int flags)
    129  1.1.2.1  yamt {
    130  1.1.2.1  yamt 	unsigned int npages;
    131  1.1.2.1  yamt #if defined(DEBUG)
    132  1.1.2.1  yamt 	unsigned int i;
    133  1.1.2.1  yamt #endif /* defined(DEBUG) */
    134  1.1.2.3  yamt 	unsigned int maxpages = __arraycount(ar->ar_pages);
    135  1.1.2.3  yamt 	const bool dense = (flags & UVM_PAGE_ARRAY_FILL_DENSE) != 0;
    136  1.1.2.3  yamt 	const bool backward = (flags & UVM_PAGE_ARRAY_FILL_BACKWARD) != 0;
    137  1.1.2.1  yamt 
    138  1.1.2.3  yamt 	if (nwant != 0 && nwant < maxpages) {
    139  1.1.2.3  yamt 		maxpages = nwant;
    140  1.1.2.3  yamt 	}
    141  1.1.2.1  yamt 	KASSERT(mutex_owned(uobj->vmobjlock));
    142  1.1.2.1  yamt 	KASSERT(uvm_page_array_peek(ar) == NULL);
    143  1.1.2.3  yamt 	if ((flags & UVM_PAGE_ARRAY_FILL_DIRTYONLY) != 0) {
    144  1.1.2.3  yamt 		npages =
    145  1.1.2.3  yamt 		    (backward ? radix_tree_gang_lookup_tagged_node_reverse :
    146  1.1.2.3  yamt 		    radix_tree_gang_lookup_tagged_node)(
    147  1.1.2.3  yamt 		    &uobj->uo_pages, off >> PAGE_SHIFT, (void **)ar->ar_pages,
    148  1.1.2.3  yamt 		    maxpages, dense, UVM_PAGE_DIRTY_TAG);
    149  1.1.2.1  yamt 	} else {
    150  1.1.2.3  yamt 		npages =
    151  1.1.2.3  yamt 		    (backward ? radix_tree_gang_lookup_node_reverse :
    152  1.1.2.3  yamt 		    radix_tree_gang_lookup_node)(
    153  1.1.2.3  yamt 		    &uobj->uo_pages, off >> PAGE_SHIFT, (void **)ar->ar_pages,
    154  1.1.2.3  yamt 		    maxpages, dense);
    155  1.1.2.1  yamt 	}
    156  1.1.2.1  yamt 	if (npages == 0) {
    157  1.1.2.1  yamt 		uvm_page_array_clear(ar);
    158  1.1.2.1  yamt 		return ENOENT;
    159  1.1.2.1  yamt 	}
    160  1.1.2.1  yamt 	KASSERT(npages <= maxpages);
    161  1.1.2.1  yamt 	ar->ar_npages = npages;
    162  1.1.2.1  yamt 	ar->ar_idx = 0;
    163  1.1.2.1  yamt #if defined(DEBUG)
    164  1.1.2.1  yamt 	for (i = 0; i < ar->ar_npages; i++) {
    165  1.1.2.1  yamt 		struct vm_page * const pg = ar->ar_pages[i];
    166  1.1.2.1  yamt 
    167  1.1.2.1  yamt 		KASSERT(pg != NULL);
    168  1.1.2.1  yamt 		KASSERT(pg->uobject == uobj);
    169  1.1.2.1  yamt 		KASSERT(pg->offset >= off);
    170  1.1.2.1  yamt 		KASSERT(i == 0 || pg->offset > ar->ar_pages[i - 1]->offset);
    171  1.1.2.1  yamt 	}
    172  1.1.2.1  yamt #endif /* defined(DEBUG) */
    173  1.1.2.1  yamt 	return 0;
    174  1.1.2.1  yamt }
    175  1.1.2.2  yamt 
    176  1.1.2.2  yamt /*
    177  1.1.2.2  yamt  * uvm_page_array_fill_and_peek:
    178  1.1.2.2  yamt  * same as uvm_page_array_peek except that, if the array is empty, try to fill
    179  1.1.2.2  yamt  * it first.
    180  1.1.2.2  yamt  */
    181  1.1.2.2  yamt 
    182  1.1.2.2  yamt struct vm_page *
    183  1.1.2.2  yamt uvm_page_array_fill_and_peek(struct uvm_page_array *a, struct uvm_object *uobj,
    184  1.1.2.3  yamt     voff_t off, unsigned int nwant, unsigned int flags)
    185  1.1.2.2  yamt {
    186  1.1.2.2  yamt 	struct vm_page *pg;
    187  1.1.2.2  yamt 	int error;
    188  1.1.2.2  yamt 
    189  1.1.2.2  yamt 	pg = uvm_page_array_peek(a);
    190  1.1.2.2  yamt 	if (pg != NULL) {
    191  1.1.2.2  yamt 		return pg;
    192  1.1.2.2  yamt 	}
    193  1.1.2.3  yamt 	error = uvm_page_array_fill(a, uobj, off, nwant, flags);
    194  1.1.2.2  yamt 	if (error != 0) {
    195  1.1.2.2  yamt 		return NULL;
    196  1.1.2.2  yamt 	}
    197  1.1.2.2  yamt 	pg = uvm_page_array_peek(a);
    198  1.1.2.2  yamt 	KASSERT(pg != NULL);
    199  1.1.2.2  yamt 	return pg;
    200  1.1.2.2  yamt }
    201