Home | History | Annotate | Line # | Download | only in uvm
uvm_readahead.c revision 1.1.2.13
      1 /*	$NetBSD: uvm_readahead.c,v 1.1.2.13 2005/11/20 05:00:38 yamt Exp $	*/
      2 
      3 /*-
      4  * Copyright (c)2003, 2005 YAMAMOTO Takashi,
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  *
     16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
     17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     26  * SUCH DAMAGE.
     27  */
     28 
     29 #include <sys/cdefs.h>
     30 __KERNEL_RCSID(0, "$NetBSD: uvm_readahead.c,v 1.1.2.13 2005/11/20 05:00:38 yamt Exp $");
     31 
     32 #include <sys/param.h>
     33 #include <sys/pool.h>
     34 
     35 #include <uvm/uvm.h>
     36 #include <uvm/uvm_readahead.h>
     37 
     38 #if defined(READAHEAD_DEBUG)
     39 #define	DPRINTF(a)	printf a
     40 #else /* defined(READAHEAD_DEBUG) */
     41 #define	DPRINTF(a)	/* nothing */
     42 #endif /* defined(READAHEAD_DEBUG) */
     43 
     44 /*
     45  * uvm_ractx: read-ahead context.
     46  */
     47 
     48 struct uvm_ractx {
     49 	int ra_flags;
     50 #define	RA_VALID	1
     51 	off_t ra_winstart;	/* window start offset */
     52 	size_t ra_winsize;	/* window size */
     53 	off_t ra_next;		/* next offset to read-ahead */
     54 };
     55 
     56 /*
     57  * XXX tune
     58  * XXX should consider the amount of memory in the system.
     59  * XXX should consider the speed of the underlying device.
     60  */
     61 
     62 #define	RA_WINSIZE_INIT	MAXPHYS			/* initial window size */
     63 #define	RA_WINSIZE_MAX	(MAXPHYS * 8)		/* max window size */
     64 #define	RA_WINSIZE_SEQENTIAL	RA_WINSIZE_MAX	/* fixed window size used for
     65 						   SEQUENTIAL hint */
     66 #define	RA_MINSIZE	(MAXPHYS * 2)		/* min size to start i/o */
     67 #define	RA_IOCHUNK	MAXPHYS			/* read-ahead i/o chunk size */
     68 
     69 static off_t ra_startio(struct uvm_object *, off_t, size_t);
     70 static struct uvm_ractx *ra_allocctx(void);
     71 static void ra_freectx(struct uvm_ractx *);
     72 
     73 POOL_INIT(ractx_pool, sizeof(struct uvm_ractx), 0, 0, 0, "ractx",
     74     &pool_allocator_nointr);
     75 
     76 static struct uvm_ractx *
     77 ra_allocctx(void)
     78 {
     79 
     80 	return pool_get(&ractx_pool, PR_NOWAIT);
     81 }
     82 
     83 static void
     84 ra_freectx(struct uvm_ractx *ra)
     85 {
     86 
     87 	pool_put(&ractx_pool, ra);
     88 }
     89 
     90 /*
     91  * ra_startio: start i/o for read-ahead.
     92  *
     93  * => start i/o for each RA_IOCHUNK sized chunk.
     94  * => return offset to which we started i/o.
     95  */
     96 
     97 static off_t
     98 ra_startio(struct uvm_object *uobj, off_t off, size_t sz)
     99 {
    100 	const off_t endoff = off + sz;
    101 
    102 	DPRINTF(("%s: uobj=%p, off=%" PRIu64 ", endoff=%" PRIu64 "\n",
    103 	    __func__, uobj, off, endoff));
    104 	off = trunc_page(off);
    105 	while (off < endoff) {
    106 		const size_t chunksize = RA_IOCHUNK;
    107 		int error;
    108 		size_t donebytes;
    109 		int npages;
    110 		int orignpages;
    111 		size_t bytelen;
    112 
    113 		KASSERT((chunksize & (chunksize - 1)) == 0);
    114 		KASSERT((off & PAGE_MASK) == 0);
    115 		bytelen = ((off + chunksize) & -(off_t)chunksize) - off;
    116 		KASSERT((bytelen & PAGE_MASK) == 0);
    117 		npages = orignpages = bytelen >> PAGE_SHIFT;
    118 		KASSERT(npages != 0);
    119 
    120 		/*
    121 		 * use UVM_ADV_RANDOM to avoid recursion.
    122 		 */
    123 
    124 		simple_lock(&uobj->vmobjlock);
    125 		error = (*uobj->pgops->pgo_get)(uobj, off, NULL,
    126 		    &npages, 0, VM_PROT_READ, UVM_ADV_RANDOM, 0);
    127 		DPRINTF(("%s:  off=%" PRIu64 ", bytelen=%zu -> %d\n",
    128 		    __func__, off, bytelen, error));
    129 		if (error != 0 && error != EBUSY) {
    130 			if (error != EINVAL) { /* maybe past EOF */
    131 				DPRINTF(("%s: error=%d\n", __func__, error));
    132 			}
    133 			break;
    134 		}
    135 		KASSERT(orignpages == npages);
    136 		donebytes = orignpages << PAGE_SHIFT;
    137 		off += donebytes;
    138 	}
    139 
    140 	return off;
    141 }
    142 
    143 /* ------------------------------------------------------------ */
    144 
    145 struct uvm_ractx *
    146 uvm_ra_allocctx(void)
    147 {
    148 	struct uvm_ractx *ra;
    149 
    150 	ra = ra_allocctx();
    151 	if (ra != NULL) {
    152 		ra->ra_flags = 0;
    153 	}
    154 
    155 	return ra;
    156 }
    157 
    158 void
    159 uvm_ra_freectx(struct uvm_ractx *ra)
    160 {
    161 
    162 	KASSERT(ra != NULL);
    163 	ra_freectx(ra);
    164 }
    165 
    166 /*
    167  * uvm_ra_request: start i/o for read-ahead if appropriate.
    168  *
    169  * => called by filesystems when [reqoff, reqoff+reqsize) is requested.
    170  */
    171 
    172 void
    173 uvm_ra_request(struct uvm_ractx *ra, int advice, struct uvm_object *uobj,
    174     off_t reqoff, size_t reqsize)
    175 {
    176 
    177 	if (ra == NULL || advice == UVM_ADV_RANDOM) {
    178 		return;
    179 	}
    180 
    181 	/*
    182 	 * XXX needs locking?  maybe.
    183 	 * but the worst effect is merely a bad read-ahead.
    184 	 */
    185 
    186 	if (advice == UVM_ADV_SEQUENTIAL) {
    187 
    188 		/*
    189 		 * always do read-ahead with a large window.
    190 		 */
    191 
    192 		if ((ra->ra_flags & RA_VALID) == 0) {
    193 			ra->ra_winstart = ra->ra_next = 0;
    194 			ra->ra_flags |= RA_VALID;
    195 		}
    196 		if (reqoff < ra->ra_winstart) {
    197 			ra->ra_next = reqoff;
    198 		}
    199 		ra->ra_winsize = RA_WINSIZE_SEQENTIAL;
    200 		goto do_readahead;
    201 	}
    202 
    203 	/*
    204 	 * a request with UVM_ADV_NORMAL hint.  (ie. no hint)
    205 	 *
    206 	 * we keep a sliding window in order to determine:
    207 	 *	- if the previous read-ahead was successful or not.
    208 	 *	- how many bytes to read-ahead.
    209 	 */
    210 
    211 	/*
    212 	 * if it's the first request for this context,
    213 	 * initialize context and return.
    214 	 */
    215 
    216 	if ((ra->ra_flags & RA_VALID) == 0) {
    217 initialize:
    218 		ra->ra_winstart = ra->ra_next = reqoff + reqsize;
    219 		ra->ra_winsize = RA_WINSIZE_INIT;
    220 		ra->ra_flags |= RA_VALID;
    221 		goto done;
    222 	}
    223 
    224 	/*
    225 	 * if it isn't in our window,
    226 	 * initialize context and return.
    227 	 * (read-ahead miss)
    228 	 */
    229 
    230 	if (reqoff < ra->ra_winstart ||
    231 	    ra->ra_winstart + ra->ra_winsize < reqoff) {
    232 
    233 		/*
    234 		 * ... unless we seem to be reading the same chunk repeatedly.
    235 		 */
    236 
    237 		if (reqoff + reqsize == ra->ra_winstart) {
    238 			DPRINTF(("%s: %p: same block: off=%" PRIu64
    239 			    ", size=%zd, winstart=%" PRIu64 "\n",
    240 			    __func__, ra, reqoff, reqsize, ra->ra_winstart));
    241 			goto done;
    242 		}
    243 		goto initialize;
    244 	}
    245 
    246 	/*
    247 	 * it's in our window. (read-ahead hit)
    248 	 *	- start read-ahead i/o if appropriate.
    249 	 *	- advance and enlarge window.
    250 	 */
    251 
    252 do_readahead:
    253 
    254 	/*
    255 	 * don't bother to read-ahead behind current request.
    256 	 */
    257 
    258 	if (reqoff > ra->ra_next) {
    259 		ra->ra_next = reqoff;
    260 	}
    261 
    262 	/*
    263 	 * try to make [reqoff, reqoff+ra_winsize) in-core.
    264 	 * note that [reqoff, ra_next) is considered already done.
    265 	 */
    266 
    267 	if (reqoff + ra->ra_winsize > ra->ra_next) {
    268 		off_t raoff = MAX(reqoff, ra->ra_next);
    269 		size_t rasize = reqoff + ra->ra_winsize - ra->ra_next;
    270 
    271 #if defined(DIAGNOSTIC)
    272 		if (rasize > RA_WINSIZE_MAX) {
    273 
    274 			/*
    275 			 * shouldn't happen as far as we're protected by
    276 			 * kernel_lock.
    277 			 */
    278 
    279 			printf("%s: corrupted context", __func__);
    280 			rasize = RA_WINSIZE_MAX;
    281 		}
    282 #endif /* defined(DIAGNOSTIC) */
    283 
    284 		/*
    285 		 * issue read-ahead only if we can start big enough i/o.
    286 		 * otherwise we end up with a stream of small i/o.
    287 		 */
    288 
    289 		if (rasize >= RA_MINSIZE) {
    290 			ra->ra_next = ra_startio(uobj, raoff, rasize);
    291 		}
    292 	}
    293 
    294 	/*
    295 	 * update window.
    296 	 *
    297 	 * enlarge window by reqsize, so that it grows in a predictable manner
    298 	 * regardless of the size of each read(2).
    299 	 */
    300 
    301 	ra->ra_winstart = reqoff + reqsize;
    302 	ra->ra_winsize = MIN(RA_WINSIZE_MAX, ra->ra_winsize + reqsize);
    303 
    304 done:;
    305 }
    306