Home | History | Annotate | Line # | Download | only in uvm
uvm_readahead.c revision 1.8.12.1
      1 /*	$NetBSD: uvm_readahead.c,v 1.8.12.1 2012/09/12 06:15:36 tls Exp $	*/
      2 
      3 /*-
      4  * Copyright (c)2003, 2005, 2009 YAMAMOTO Takashi,
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  *
     16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
     17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     26  * SUCH DAMAGE.
     27  */
     28 
     29 /*
     30  * uvm_object read-ahead
     31  *
     32  * TODO:
     33  *	- tune.
     34  *	- handle multiple streams.
     35  *	- find a better way to deal with PGO_LOCKED pager requests.
     36  *	  (currently just ignored)
     37  *	- consider the amount of memory in the system.
     38  *	- consider the speed of the underlying device.
     39  *	- consider filesystem block size / block layout.
     40  */
     41 
     42 #include <sys/cdefs.h>
     43 __KERNEL_RCSID(0, "$NetBSD: uvm_readahead.c,v 1.8.12.1 2012/09/12 06:15:36 tls Exp $");
     44 
     45 #include <sys/param.h>
     46 #include <sys/pool.h>
     47 
     48 #include <uvm/uvm.h>
     49 #include <uvm/uvm_readahead.h>
     50 
     51 #if defined(READAHEAD_DEBUG)
     52 #define	DPRINTF(a)	printf a
     53 #else /* defined(READAHEAD_DEBUG) */
     54 #define	DPRINTF(a)	/* nothing */
     55 #endif /* defined(READAHEAD_DEBUG) */
     56 
     57 #if defined(sun2) || defined(sun3)
     58 /* XXX: on sun2 and sun3 MAXPHYS is 0xe000 */
     59 #undef MAXPHYS
     60 #define MAXPHYS		0x8000	/* XXX */
     61 #endif
     62 
     63 static off_t ra_startio(struct uvm_object *, off_t, size_t, size_t);
     64 static struct uvm_ractx *ra_allocctx(void);
     65 static void ra_freectx(struct uvm_ractx *);
     66 
     67 static struct pool_cache ractx_cache;
     68 
     69 /*
     70  * uvm_ra_init: initialize readahead module.
     71  */
     72 
     73 void
     74 uvm_ra_init(void)
     75 {
     76 
     77 	pool_cache_bootstrap(&ractx_cache, sizeof(struct uvm_ractx), 0, 0, 0,
     78 	    "ractx", NULL, IPL_NONE, NULL, NULL, NULL);
     79 }
     80 
     81 static struct uvm_ractx *
     82 ra_allocctx(void)
     83 {
     84 
     85 	return pool_cache_get(&ractx_cache, PR_NOWAIT);
     86 }
     87 
     88 static void
     89 ra_freectx(struct uvm_ractx *ra)
     90 {
     91 
     92 	pool_cache_put(&ractx_cache, ra);
     93 }
     94 
     95 /*
     96  * ra_startio: start i/o for read-ahead.
     97  *
     98  * => start i/o for each RA_IOCHUNK sized chunk.
     99  * => return offset to which we started i/o.
    100  *
    101  * => If the next layer up has given us less than IOCHUNK, assume
    102  *    it knew best (don't always perform minimal readahead).
    103  */
    104 
    105 static off_t
    106 ra_startio(struct uvm_object *uobj, off_t off, size_t sz, size_t chunksz)
    107 {
    108 	const off_t endoff = off + sz;
    109 
    110 	DPRINTF(("%s: uobj=%p, off=%" PRIu64 ", endoff=%" PRIu64 "\n",
    111 	    __func__, uobj, off, endoff));
    112 	off = trunc_page(off);
    113 	while (off < endoff) {
    114 		const size_t chunksize = MIN(chunksz, round_page(sz));
    115 		int error;
    116 		size_t donebytes;
    117 		int npages;
    118 		int orignpages;
    119 		size_t bytelen;
    120 
    121 		if ((chunksize & (chunksize - 1)) != 0) {
    122 		    panic("bad chunksize %d, iochunk %d, request size %d",
    123 			  (int)chunksize, (int)chunksz, (int)sz);
    124 		}
    125 		/* KASSERT((chunksize & (chunksize - 1)) == 0); */
    126 		KASSERT((off & PAGE_MASK) == 0);
    127 		bytelen = ((off + chunksize) & -(off_t)chunksize) - off;
    128 		if ((bytelen & PAGE_MASK) != 0) {
    129 			panic("bad bytelen %d with off %d, chunksize %d"
    130 			      "(iochunk %d, sz %d)",
    131 			      (int)bytelen, (int)off, (int)chunksize,
    132 			      (int)chunksz, (int)sz);
    133 		}
    134 		KASSERT((bytelen & PAGE_MASK) == 0);
    135 		npages = orignpages = bytelen >> PAGE_SHIFT;
    136 		KASSERT(npages != 0);
    137 
    138 		/*
    139 		 * use UVM_ADV_RANDOM to avoid recursion.
    140 		 */
    141 
    142 		mutex_enter(uobj->vmobjlock);
    143 		error = (*uobj->pgops->pgo_get)(uobj, off, NULL,
    144 		    &npages, 0, VM_PROT_READ, UVM_ADV_RANDOM, 0);
    145 		DPRINTF(("%s:  off=%" PRIu64 ", bytelen=%zu -> %d\n",
    146 		    __func__, off, bytelen, error));
    147 		if (error != 0 && error != EBUSY) {
    148 			if (error != EINVAL) { /* maybe past EOF */
    149 				DPRINTF(("%s: error=%d\n", __func__, error));
    150 			}
    151 			break;
    152 		}
    153 		KASSERT(orignpages == npages);
    154 		donebytes = orignpages << PAGE_SHIFT;
    155 		off += donebytes;
    156 	}
    157 
    158 	return off;
    159 }
    160 
    161 /* ------------------------------------------------------------ */
    162 
    163 /*
    164  * uvm_ra_allocctx: allocate a context.
    165  */
    166 
    167 struct uvm_ractx *
    168 uvm_ra_allocctx(void)
    169 {
    170 	struct uvm_ractx *ra;
    171 
    172 	ra = ra_allocctx();
    173 	if (ra != NULL) {
    174 		ra->ra_flags = 0;
    175 		ra->ra_iochunk = MAXPHYS;
    176 	}
    177 
    178 	return ra;
    179 }
    180 
    181 /*
    182  * uvm_ra_freectx: free a context.
    183  */
    184 
    185 void
    186 uvm_ra_freectx(struct uvm_ractx *ra)
    187 {
    188 
    189 	KASSERT(ra != NULL);
    190 	ra_freectx(ra);
    191 }
    192 
    193 /*
    194  * uvm_ra_request: update a read-ahead context and start i/o if appropriate.
    195  *
    196  * => called when [reqoff, reqoff+reqsize) is requested.
    197  * => object must be locked by caller, will return locked.
    198  */
    199 
    200 void
    201 uvm_ra_request(struct uvm_ractx *ra, int advice, struct uvm_object *uobj,
    202     off_t reqoff, size_t reqsize)
    203 {
    204 
    205 	KASSERT(mutex_owned(uobj->vmobjlock));
    206 
    207 	if (ra == NULL || advice == UVM_ADV_RANDOM) {
    208 		return;
    209 	}
    210 
    211 	if (advice == UVM_ADV_SEQUENTIAL) {
    212 
    213 		/*
    214 		 * always do read-ahead with a large window.
    215 		 */
    216 
    217 		if ((ra->ra_flags & UVM_RA_VALID) == 0) {
    218 			ra->ra_winstart = ra->ra_next = 0;
    219 			ra->ra_flags |= UVM_RA_VALID;
    220 		}
    221 		if (reqoff < ra->ra_winstart) {
    222 			ra->ra_next = reqoff;
    223 		}
    224 		ra->ra_winsize = UVM_RA_WINSIZE_SEQUENTIAL;
    225 		goto do_readahead;
    226 	}
    227 
    228 	/*
    229 	 * a request with UVM_ADV_NORMAL hint.  (ie. no hint)
    230 	 *
    231 	 * we keep a sliding window in order to determine:
    232 	 *	- if the previous read-ahead was successful or not.
    233 	 *	- how many bytes to read-ahead.
    234 	 */
    235 
    236 	/*
    237 	 * if it's the first request for this context,
    238 	 * initialize context and return.
    239 	 */
    240 
    241 	if ((ra->ra_flags & UVM_RA_VALID) == 0) {
    242 initialize:
    243 		ra->ra_winstart = ra->ra_next = reqoff + reqsize;
    244 		ra->ra_winsize = UVM_RA_WINSIZE_INIT;
    245 		ra->ra_flags |= UVM_RA_VALID;
    246 		goto done;
    247 	}
    248 
    249 	/*
    250 	 * if it isn't in our window,
    251 	 * initialize context and return.
    252 	 * (read-ahead miss)
    253 	 */
    254 
    255 	if (reqoff < ra->ra_winstart ||
    256 	    ra->ra_winstart + ra->ra_winsize < reqoff) {
    257 
    258 		/*
    259 		 * ... unless we seem to be reading the same chunk repeatedly.
    260 		 *
    261 		 * XXX should have some margin?
    262 		 */
    263 
    264 		if (reqoff + reqsize == ra->ra_winstart) {
    265 			DPRINTF(("%s: %p: same block: off=%" PRIu64
    266 			    ", size=%zd, winstart=%" PRIu64 "\n",
    267 			    __func__, ra, reqoff, reqsize, ra->ra_winstart));
    268 			goto done;
    269 		}
    270 		goto initialize;
    271 	}
    272 
    273 	/*
    274 	 * it's in our window. (read-ahead hit)
    275 	 *	- start read-ahead i/o if appropriate.
    276 	 *	- advance and enlarge window.
    277 	 */
    278 
    279 do_readahead:
    280 
    281 	/*
    282 	 * don't bother to read-ahead behind current request.
    283 	 */
    284 
    285 	if (reqoff > ra->ra_next) {
    286 		ra->ra_next = reqoff;
    287 	}
    288 
    289 	/*
    290 	 * try to make [reqoff, reqoff+ra_winsize) in-core.
    291 	 * note that [reqoff, ra_next) is considered already done.
    292 	 */
    293 
    294 	if (reqoff + ra->ra_winsize > ra->ra_next) {
    295 		off_t raoff = MAX(reqoff, ra->ra_next);
    296 		size_t rasize = reqoff + ra->ra_winsize - ra->ra_next;
    297 
    298 #if defined(DIAGNOSTIC)
    299 		if (rasize > UVM_RA_WINSIZE_MAX) {
    300 			printf("%s: corrupted context", __func__);
    301 			rasize = UVM_RA_WINSIZE_MAX;
    302 		}
    303 #endif /* defined(DIAGNOSTIC) */
    304 
    305 		/*
    306 		 * issue read-ahead only if we can start big enough i/o.
    307 		 * otherwise we end up with a stream of small i/o.
    308 		 */
    309 
    310 		if (rasize >= UVM_RA_MINSIZE) {
    311 			off_t next;
    312 
    313 			mutex_exit(uobj->vmobjlock);
    314 			next = ra_startio(uobj, raoff, rasize, ra->ra_iochunk);
    315 			mutex_enter(uobj->vmobjlock);
    316 			ra->ra_next = next;
    317 		}
    318 	}
    319 
    320 	/*
    321 	 * update window.
    322 	 *
    323 	 * enlarge window by reqsize, so that it grows in a predictable manner
    324 	 * regardless of the size of each read(2).
    325 	 */
    326 
    327 	ra->ra_winstart = reqoff + reqsize;
    328 	ra->ra_winsize = MIN(UVM_RA_WINSIZE_MAX, ra->ra_winsize + reqsize);
    329 
    330 done:;
    331 }
    332 
    333 int
    334 uvm_readahead(struct uvm_object *uobj, off_t off, off_t size,
    335 	      struct uvm_ractx *ra)
    336 {
    337 
    338 	/*
    339 	 * don't allow too much read-ahead.
    340 	 */
    341 	if (size > UVM_RA_WINSIZE_MAX) {
    342 		size = UVM_RA_WINSIZE_MAX;
    343 	}
    344 	ra_startio(uobj, off, size, ra->ra_iochunk);
    345 	return 0;
    346 }
    347