Home | History | Annotate | Line # | Download | only in uvm
uvm_page_array.c revision 1.6
      1 /*	$NetBSD: uvm_page_array.c,v 1.6 2020/05/25 21:15:10 ad Exp $	*/
      2 
      3 /*-
      4  * Copyright (c)2011 YAMAMOTO Takashi,
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  *
     16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
     17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     26  * SUCH DAMAGE.
     27  */
     28 
     29 #include <sys/cdefs.h>
     30 __KERNEL_RCSID(0, "$NetBSD: uvm_page_array.c,v 1.6 2020/05/25 21:15:10 ad Exp $");
     31 
     32 #include <sys/param.h>
     33 #include <sys/systm.h>
     34 
     35 #include <uvm/uvm_extern.h>
     36 #include <uvm/uvm_object.h>
     37 #include <uvm/uvm_page.h>
     38 #include <uvm/uvm_page_array.h>
     39 
     40 /*
     41  * uvm_page_array_init: initialize the array.
     42  */
     43 
     44 void
     45 uvm_page_array_init(struct uvm_page_array *ar, struct uvm_object *uobj,
     46     unsigned int flags)
     47 {
     48 
     49 	ar->ar_idx = 0;
     50 	ar->ar_npages = 0;
     51 	ar->ar_uobj = uobj;
     52 	ar->ar_flags = flags;
     53 }
     54 
     55 /*
     56  * uvm_page_array_fini: clean up the array.
     57  */
     58 
     59 void
     60 uvm_page_array_fini(struct uvm_page_array *ar)
     61 {
     62 
     63 	/*
     64 	 * currently nothing to do.
     65 	 */
     66 #if defined(DIAGNOSTIC)
     67 	/*
     68 	 * poison to trigger assertion in uvm_page_array_peek to
     69 	 * detect usage errors.
     70 	 */
     71 	ar->ar_npages = 1;
     72 	ar->ar_idx = 1000;
     73 #endif /* defined(DIAGNOSTIC) */
     74 }
     75 
     76 /*
     77  * uvm_page_array_clear: forget the cached pages and initialize the array.
     78  */
     79 
     80 void
     81 uvm_page_array_clear(struct uvm_page_array *ar)
     82 {
     83 
     84 	KASSERT(ar->ar_idx <= ar->ar_npages);
     85 	ar->ar_idx = 0;
     86 	ar->ar_npages = 0;
     87 }
     88 
     89 /*
     90  * uvm_page_array_peek: return the next cached page.
     91  */
     92 
     93 struct vm_page *
     94 uvm_page_array_peek(struct uvm_page_array *ar)
     95 {
     96 
     97 	KASSERT(ar->ar_idx <= ar->ar_npages);
     98 	if (ar->ar_idx == ar->ar_npages) {
     99 		return NULL;
    100 	}
    101 	return ar->ar_pages[ar->ar_idx];
    102 }
    103 
    104 /*
    105  * uvm_page_array_advance: advance the array to the next cached page
    106  */
    107 
    108 void
    109 uvm_page_array_advance(struct uvm_page_array *ar)
    110 {
    111 
    112 	KASSERT(ar->ar_idx <= ar->ar_npages);
    113 	ar->ar_idx++;
    114 	KASSERT(ar->ar_idx <= ar->ar_npages);
    115 }
    116 
    117 /*
    118  * uvm_page_array_fill: lookup pages and keep them cached.
    119  *
    120  * return 0 on success.  in that case, cache the result in the array
    121  * so that they will be picked by later uvm_page_array_peek.
    122  *
    123  * nwant is a number of pages to fetch.  a caller should consider it a hint.
    124  * nwant == 0 means a caller have no specific idea.
    125  *
    126  * return ENOENT if no pages are found.
    127  *
    128  * called with object lock held.
    129  */
    130 
    131 int
    132 uvm_page_array_fill(struct uvm_page_array *ar, voff_t off, unsigned int nwant)
    133 {
    134 	unsigned int npages;
    135 #if defined(DEBUG)
    136 	unsigned int i;
    137 #endif /* defined(DEBUG) */
    138 	unsigned int maxpages = __arraycount(ar->ar_pages);
    139 	struct uvm_object *uobj = ar->ar_uobj;
    140 	const int flags = ar->ar_flags;
    141 	const bool dense = (flags & UVM_PAGE_ARRAY_FILL_DENSE) != 0;
    142 	const bool backward = (flags & UVM_PAGE_ARRAY_FILL_BACKWARD) != 0;
    143 
    144 	if (nwant != 0 && nwant < maxpages) {
    145 		maxpages = nwant;
    146 	}
    147 #if 0 /* called from DDB for "show obj/f" without lock */
    148 	KASSERT(rw_lock_held(uobj->vmobjlock));
    149 #endif
    150 	KASSERT(uvm_page_array_peek(ar) == NULL);
    151 	if ((flags & UVM_PAGE_ARRAY_FILL_DIRTY) != 0) {
    152 		unsigned int tagmask = UVM_PAGE_DIRTY_TAG;
    153 
    154 		if ((flags & UVM_PAGE_ARRAY_FILL_WRITEBACK) != 0) {
    155 			tagmask |= UVM_PAGE_WRITEBACK_TAG;
    156 		}
    157 		npages =
    158 		    (backward ? radix_tree_gang_lookup_tagged_node_reverse :
    159 		    radix_tree_gang_lookup_tagged_node)(
    160 		    &uobj->uo_pages, off >> PAGE_SHIFT, (void **)ar->ar_pages,
    161 		    maxpages, dense, tagmask);
    162 	} else {
    163 		npages =
    164 		    (backward ? radix_tree_gang_lookup_node_reverse :
    165 		    radix_tree_gang_lookup_node)(
    166 		    &uobj->uo_pages, off >> PAGE_SHIFT, (void **)ar->ar_pages,
    167 		    maxpages, dense);
    168 	}
    169 	if (npages == 0) {
    170 		if (flags != 0) {
    171 			/*
    172 			 * if dense or looking for tagged entries (or
    173 			 * working backwards), fail right away.
    174 			 */
    175 			uvm_page_array_clear(ar);
    176 			return ENOENT;
    177 		} else {
    178 			/*
    179 			 * there's nothing else to be found with the current
    180 			 * set of arguments, in the current version of the
    181 			 * tree.
    182 			 *
    183 			 * minimize repeated tree lookups by "finding" some
    184 			 * null pointers, in case the caller keeps looping
    185 			 * (a common use case).
    186 			 */
    187 			npages = maxpages;
    188 			memset(ar->ar_pages, 0, sizeof(ar->ar_pages[0]) * npages);
    189 		}
    190 	}
    191 	KASSERT(npages <= maxpages);
    192 	ar->ar_npages = npages;
    193 	ar->ar_idx = 0;
    194 #if defined(DEBUG)
    195 	for (i = 0; i < ar->ar_npages; i++) {
    196 		struct vm_page * const pg = ar->ar_pages[i];
    197 
    198 		if (!dense && pg == NULL) {
    199 			continue;
    200 		}
    201 		KDASSERT(pg != NULL);
    202 		KDASSERT(pg->uobject == uobj);
    203 		if (backward) {
    204 			KDASSERT(pg->offset <= off);
    205 			KDASSERT(i == 0 ||
    206 			    pg->offset < ar->ar_pages[i - 1]->offset);
    207 		} else {
    208 			KDASSERT(pg->offset >= off);
    209 			KDASSERT(i == 0 ||
    210 			    pg->offset > ar->ar_pages[i - 1]->offset);
    211 		}
    212 	}
    213 #endif /* defined(DEBUG) */
    214 	return 0;
    215 }
    216 
    217 /*
    218  * uvm_page_array_fill_and_peek:
    219  * same as uvm_page_array_peek except that, if the array is empty, try to fill
    220  * it first.
    221  */
    222 
    223 struct vm_page *
    224 uvm_page_array_fill_and_peek(struct uvm_page_array *a, voff_t off,
    225     unsigned int nwant)
    226 {
    227 	struct vm_page *pg;
    228 	int error;
    229 
    230 	pg = uvm_page_array_peek(a);
    231 	if (pg != NULL) {
    232 		return pg;
    233 	}
    234 	error = uvm_page_array_fill(a, off, nwant);
    235 	if (error != 0) {
    236 		return NULL;
    237 	}
    238 	pg = uvm_page_array_peek(a);
    239 	return pg;
    240 }
    241