Home | History | Annotate | Line # | Download | only in uvm
      1  1.16        ad /*	$NetBSD: uvm_readahead.c,v 1.16 2023/09/23 18:21:12 ad Exp $	*/
      2   1.2      yamt 
      3   1.2      yamt /*-
      4   1.6      yamt  * Copyright (c)2003, 2005, 2009 YAMAMOTO Takashi,
      5   1.2      yamt  * All rights reserved.
      6   1.2      yamt  *
      7   1.2      yamt  * Redistribution and use in source and binary forms, with or without
      8   1.2      yamt  * modification, are permitted provided that the following conditions
      9   1.2      yamt  * are met:
     10   1.2      yamt  * 1. Redistributions of source code must retain the above copyright
     11   1.2      yamt  *    notice, this list of conditions and the following disclaimer.
     12   1.2      yamt  * 2. Redistributions in binary form must reproduce the above copyright
     13   1.2      yamt  *    notice, this list of conditions and the following disclaimer in the
     14   1.2      yamt  *    documentation and/or other materials provided with the distribution.
     15   1.2      yamt  *
     16   1.2      yamt  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
     17   1.2      yamt  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     18   1.2      yamt  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     19   1.2      yamt  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     20   1.2      yamt  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     21   1.2      yamt  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     22   1.2      yamt  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     23   1.2      yamt  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     24   1.2      yamt  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     25   1.2      yamt  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     26   1.2      yamt  * SUCH DAMAGE.
     27   1.2      yamt  */
     28   1.2      yamt 
     29   1.2      yamt /*
     30   1.2      yamt  * uvm_object read-ahead
     31   1.2      yamt  *
     32   1.2      yamt  * TODO:
     33   1.2      yamt  *	- tune.
     34   1.2      yamt  *	- handle multiple streams.
     35   1.2      yamt  *	- find a better way to deal with PGO_LOCKED pager requests.
     36   1.2      yamt  *	  (currently just ignored)
     37   1.2      yamt  *	- consider the amount of memory in the system.
     38   1.2      yamt  *	- consider the speed of the underlying device.
     39   1.2      yamt  *	- consider filesystem block size / block layout.
     40   1.2      yamt  */
     41   1.2      yamt 
     42   1.2      yamt #include <sys/cdefs.h>
     43  1.16        ad __KERNEL_RCSID(0, "$NetBSD: uvm_readahead.c,v 1.16 2023/09/23 18:21:12 ad Exp $");
     44   1.2      yamt 
     45   1.2      yamt #include <sys/param.h>
     46  1.16        ad #include <sys/kmem.h>
     47   1.2      yamt 
     48   1.2      yamt #include <uvm/uvm.h>
     49   1.2      yamt #include <uvm/uvm_readahead.h>
     50   1.2      yamt 
     51   1.2      yamt #if defined(READAHEAD_DEBUG)
     52   1.2      yamt #define	DPRINTF(a)	printf a
     53   1.2      yamt #else /* defined(READAHEAD_DEBUG) */
     54   1.2      yamt #define	DPRINTF(a)	/* nothing */
     55   1.2      yamt #endif /* defined(READAHEAD_DEBUG) */
     56   1.2      yamt 
     57   1.2      yamt /*
     58   1.2      yamt  * uvm_ractx: read-ahead context.
     59   1.2      yamt  */
     60   1.2      yamt 
     61   1.2      yamt struct uvm_ractx {
     62   1.2      yamt 	int ra_flags;
     63   1.2      yamt #define	RA_VALID	1
     64   1.2      yamt 	off_t ra_winstart;	/* window start offset */
     65   1.2      yamt 	size_t ra_winsize;	/* window size */
     66   1.2      yamt 	off_t ra_next;		/* next offset to read-ahead */
     67   1.2      yamt };
     68   1.2      yamt 
     69   1.7   tsutsui #if defined(sun2) || defined(sun3)
     70   1.7   tsutsui /* XXX: on sun2 and sun3 MAXPHYS is 0xe000 */
     71   1.4   tsutsui #undef MAXPHYS
     72   1.4   tsutsui #define MAXPHYS		0x8000	/* XXX */
     73   1.4   tsutsui #endif
     74   1.4   tsutsui 
     75   1.2      yamt #define	RA_WINSIZE_INIT	MAXPHYS			/* initial window size */
     76   1.9   mlelstv #define	RA_WINSIZE_MAX	(MAXPHYS * 16)		/* max window size */
     77   1.2      yamt #define	RA_WINSIZE_SEQENTIAL	RA_WINSIZE_MAX	/* fixed window size used for
     78   1.2      yamt 						   SEQUENTIAL hint */
     79   1.2      yamt #define	RA_MINSIZE	(MAXPHYS * 2)		/* min size to start i/o */
     80   1.2      yamt #define	RA_IOCHUNK	MAXPHYS			/* read-ahead i/o chunk size */
     81   1.2      yamt 
     82   1.2      yamt static off_t ra_startio(struct uvm_object *, off_t, size_t);
     83   1.2      yamt static struct uvm_ractx *ra_allocctx(void);
     84   1.2      yamt static void ra_freectx(struct uvm_ractx *);
     85   1.2      yamt 
     86   1.5        ad /*
     87   1.5        ad  * uvm_ra_init: initialize readahead module.
     88   1.5        ad  */
     89   1.5        ad 
     90   1.5        ad void
     91   1.5        ad uvm_ra_init(void)
     92   1.5        ad {
     93   1.5        ad 
     94   1.5        ad }
     95   1.2      yamt 
     96   1.2      yamt static struct uvm_ractx *
     97   1.2      yamt ra_allocctx(void)
     98   1.2      yamt {
     99   1.2      yamt 
    100  1.16        ad 	return kmem_alloc(sizeof(struct uvm_ractx), KM_NOSLEEP);
    101   1.2      yamt }
    102   1.2      yamt 
    103   1.2      yamt static void
    104   1.2      yamt ra_freectx(struct uvm_ractx *ra)
    105   1.2      yamt {
    106   1.2      yamt 
    107  1.16        ad 	kmem_free(ra, sizeof(struct uvm_ractx));
    108   1.2      yamt }
    109   1.2      yamt 
    110   1.2      yamt /*
    111   1.2      yamt  * ra_startio: start i/o for read-ahead.
    112   1.2      yamt  *
    113   1.2      yamt  * => start i/o for each RA_IOCHUNK sized chunk.
    114   1.2      yamt  * => return offset to which we started i/o.
    115   1.2      yamt  */
    116   1.2      yamt 
    117   1.2      yamt static off_t
    118   1.2      yamt ra_startio(struct uvm_object *uobj, off_t off, size_t sz)
    119   1.2      yamt {
    120   1.2      yamt 	const off_t endoff = off + sz;
    121   1.2      yamt 
    122   1.2      yamt 	DPRINTF(("%s: uobj=%p, off=%" PRIu64 ", endoff=%" PRIu64 "\n",
    123   1.2      yamt 	    __func__, uobj, off, endoff));
    124  1.10  jdolecek 
    125  1.13        ad 	KASSERT(rw_write_held(uobj->vmobjlock));
    126  1.13        ad 
    127  1.10  jdolecek 	/*
    128  1.10  jdolecek 	 * Don't issue read-ahead if the last page of the range is already cached.
    129  1.10  jdolecek 	 * The assumption is that since the access is sequential, the intermediate
    130  1.10  jdolecek 	 * pages would have similar LRU stats, and hence likely to be still in cache
    131  1.10  jdolecek 	 * too. This speeds up I/O using cache, since it avoids lookups and temporary
    132  1.10  jdolecek 	 * allocations done by full pgo_get.
    133  1.10  jdolecek 	 */
    134  1.10  jdolecek 	struct vm_page *pg = uvm_pagelookup(uobj, trunc_page(endoff - 1));
    135  1.10  jdolecek 	if (pg != NULL) {
    136  1.10  jdolecek 		DPRINTF(("%s:  off=%" PRIu64 ", sz=%zu already cached\n",
    137  1.10  jdolecek 		    __func__, off, sz));
    138  1.10  jdolecek 		return endoff;
    139  1.10  jdolecek 	}
    140  1.10  jdolecek 
    141   1.2      yamt 	off = trunc_page(off);
    142   1.2      yamt 	while (off < endoff) {
    143   1.2      yamt 		const size_t chunksize = RA_IOCHUNK;
    144   1.2      yamt 		int error;
    145   1.2      yamt 		size_t donebytes;
    146   1.2      yamt 		int npages;
    147   1.2      yamt 		int orignpages;
    148   1.2      yamt 		size_t bytelen;
    149   1.2      yamt 
    150   1.2      yamt 		KASSERT((chunksize & (chunksize - 1)) == 0);
    151   1.2      yamt 		KASSERT((off & PAGE_MASK) == 0);
    152   1.2      yamt 		bytelen = ((off + chunksize) & -(off_t)chunksize) - off;
    153   1.2      yamt 		KASSERT((bytelen & PAGE_MASK) == 0);
    154   1.2      yamt 		npages = orignpages = bytelen >> PAGE_SHIFT;
    155   1.2      yamt 		KASSERT(npages != 0);
    156   1.2      yamt 
    157   1.2      yamt 		/*
    158   1.2      yamt 		 * use UVM_ADV_RANDOM to avoid recursion.
    159   1.2      yamt 		 */
    160   1.2      yamt 
    161   1.2      yamt 		error = (*uobj->pgops->pgo_get)(uobj, off, NULL,
    162  1.10  jdolecek 		    &npages, 0, VM_PROT_READ, UVM_ADV_RANDOM, PGO_NOTIMESTAMP);
    163  1.13        ad 		rw_enter(uobj->vmobjlock, RW_WRITER);
    164   1.2      yamt 		DPRINTF(("%s:  off=%" PRIu64 ", bytelen=%zu -> %d\n",
    165   1.2      yamt 		    __func__, off, bytelen, error));
    166   1.2      yamt 		if (error != 0 && error != EBUSY) {
    167   1.2      yamt 			if (error != EINVAL) { /* maybe past EOF */
    168   1.2      yamt 				DPRINTF(("%s: error=%d\n", __func__, error));
    169   1.2      yamt 			}
    170   1.2      yamt 			break;
    171   1.2      yamt 		}
    172   1.2      yamt 		KASSERT(orignpages == npages);
    173   1.2      yamt 		donebytes = orignpages << PAGE_SHIFT;
    174   1.2      yamt 		off += donebytes;
    175   1.2      yamt 	}
    176   1.2      yamt 
    177   1.2      yamt 	return off;
    178   1.2      yamt }
    179   1.2      yamt 
    180   1.2      yamt /* ------------------------------------------------------------ */
    181   1.2      yamt 
    182   1.2      yamt /*
    183   1.2      yamt  * uvm_ra_allocctx: allocate a context.
    184   1.2      yamt  */
    185   1.2      yamt 
    186   1.2      yamt struct uvm_ractx *
    187   1.2      yamt uvm_ra_allocctx(void)
    188   1.2      yamt {
    189   1.2      yamt 	struct uvm_ractx *ra;
    190   1.2      yamt 
    191   1.2      yamt 	ra = ra_allocctx();
    192   1.2      yamt 	if (ra != NULL) {
    193   1.2      yamt 		ra->ra_flags = 0;
    194   1.2      yamt 	}
    195   1.2      yamt 
    196   1.2      yamt 	return ra;
    197   1.2      yamt }
    198   1.2      yamt 
    199   1.2      yamt /*
    200   1.2      yamt  * uvm_ra_freectx: free a context.
    201   1.2      yamt  */
    202   1.2      yamt 
    203   1.2      yamt void
    204   1.2      yamt uvm_ra_freectx(struct uvm_ractx *ra)
    205   1.2      yamt {
    206   1.2      yamt 
    207   1.2      yamt 	KASSERT(ra != NULL);
    208   1.2      yamt 	ra_freectx(ra);
    209   1.2      yamt }
    210   1.2      yamt 
    211   1.2      yamt /*
    212   1.2      yamt  * uvm_ra_request: update a read-ahead context and start i/o if appropriate.
    213   1.2      yamt  *
    214   1.2      yamt  * => called when [reqoff, reqoff+reqsize) is requested.
    215   1.5        ad  * => object must be locked by caller, will return locked.
    216   1.2      yamt  */
    217   1.2      yamt 
    218   1.2      yamt void
    219   1.2      yamt uvm_ra_request(struct uvm_ractx *ra, int advice, struct uvm_object *uobj,
    220   1.2      yamt     off_t reqoff, size_t reqsize)
    221   1.2      yamt {
    222   1.2      yamt 
    223  1.11        ad 	KASSERT(rw_write_held(uobj->vmobjlock));
    224   1.5        ad 
    225   1.2      yamt 	if (ra == NULL || advice == UVM_ADV_RANDOM) {
    226   1.2      yamt 		return;
    227   1.2      yamt 	}
    228   1.2      yamt 
    229   1.2      yamt 	if (advice == UVM_ADV_SEQUENTIAL) {
    230   1.2      yamt 
    231   1.2      yamt 		/*
    232   1.2      yamt 		 * always do read-ahead with a large window.
    233   1.2      yamt 		 */
    234   1.2      yamt 
    235   1.2      yamt 		if ((ra->ra_flags & RA_VALID) == 0) {
    236   1.2      yamt 			ra->ra_winstart = ra->ra_next = 0;
    237   1.2      yamt 			ra->ra_flags |= RA_VALID;
    238   1.2      yamt 		}
    239   1.2      yamt 		if (reqoff < ra->ra_winstart) {
    240   1.2      yamt 			ra->ra_next = reqoff;
    241   1.2      yamt 		}
    242   1.2      yamt 		ra->ra_winsize = RA_WINSIZE_SEQENTIAL;
    243   1.2      yamt 		goto do_readahead;
    244   1.2      yamt 	}
    245   1.2      yamt 
    246   1.2      yamt 	/*
    247   1.2      yamt 	 * a request with UVM_ADV_NORMAL hint.  (ie. no hint)
    248   1.2      yamt 	 *
    249   1.2      yamt 	 * we keep a sliding window in order to determine:
    250   1.2      yamt 	 *	- if the previous read-ahead was successful or not.
    251   1.2      yamt 	 *	- how many bytes to read-ahead.
    252   1.2      yamt 	 */
    253   1.2      yamt 
    254   1.2      yamt 	/*
    255   1.2      yamt 	 * if it's the first request for this context,
    256   1.2      yamt 	 * initialize context and return.
    257   1.2      yamt 	 */
    258   1.2      yamt 
    259   1.2      yamt 	if ((ra->ra_flags & RA_VALID) == 0) {
    260   1.2      yamt initialize:
    261   1.2      yamt 		ra->ra_winstart = ra->ra_next = reqoff + reqsize;
    262   1.2      yamt 		ra->ra_winsize = RA_WINSIZE_INIT;
    263   1.2      yamt 		ra->ra_flags |= RA_VALID;
    264   1.2      yamt 		goto done;
    265   1.2      yamt 	}
    266   1.2      yamt 
    267   1.2      yamt 	/*
    268   1.2      yamt 	 * if it isn't in our window,
    269   1.2      yamt 	 * initialize context and return.
    270   1.2      yamt 	 * (read-ahead miss)
    271   1.2      yamt 	 */
    272   1.2      yamt 
    273   1.2      yamt 	if (reqoff < ra->ra_winstart ||
    274   1.2      yamt 	    ra->ra_winstart + ra->ra_winsize < reqoff) {
    275   1.2      yamt 
    276   1.2      yamt 		/*
    277   1.2      yamt 		 * ... unless we seem to be reading the same chunk repeatedly.
    278   1.2      yamt 		 *
    279   1.2      yamt 		 * XXX should have some margin?
    280   1.2      yamt 		 */
    281   1.2      yamt 
    282   1.2      yamt 		if (reqoff + reqsize == ra->ra_winstart) {
    283   1.2      yamt 			DPRINTF(("%s: %p: same block: off=%" PRIu64
    284   1.2      yamt 			    ", size=%zd, winstart=%" PRIu64 "\n",
    285   1.2      yamt 			    __func__, ra, reqoff, reqsize, ra->ra_winstart));
    286   1.2      yamt 			goto done;
    287   1.2      yamt 		}
    288   1.2      yamt 		goto initialize;
    289   1.2      yamt 	}
    290   1.2      yamt 
    291   1.2      yamt 	/*
    292   1.2      yamt 	 * it's in our window. (read-ahead hit)
    293   1.2      yamt 	 *	- start read-ahead i/o if appropriate.
    294   1.2      yamt 	 *	- advance and enlarge window.
    295   1.2      yamt 	 */
    296   1.2      yamt 
    297   1.2      yamt do_readahead:
    298   1.2      yamt 
    299   1.2      yamt 	/*
    300   1.2      yamt 	 * don't bother to read-ahead behind current request.
    301   1.2      yamt 	 */
    302   1.2      yamt 
    303   1.2      yamt 	if (reqoff > ra->ra_next) {
    304   1.2      yamt 		ra->ra_next = reqoff;
    305   1.2      yamt 	}
    306   1.2      yamt 
    307   1.2      yamt 	/*
    308   1.2      yamt 	 * try to make [reqoff, reqoff+ra_winsize) in-core.
    309   1.2      yamt 	 * note that [reqoff, ra_next) is considered already done.
    310   1.2      yamt 	 */
    311   1.2      yamt 
    312   1.2      yamt 	if (reqoff + ra->ra_winsize > ra->ra_next) {
    313   1.2      yamt 		off_t raoff = MAX(reqoff, ra->ra_next);
    314   1.2      yamt 		size_t rasize = reqoff + ra->ra_winsize - ra->ra_next;
    315   1.2      yamt 
    316   1.2      yamt #if defined(DIAGNOSTIC)
    317   1.2      yamt 		if (rasize > RA_WINSIZE_MAX) {
    318   1.2      yamt 			printf("%s: corrupted context", __func__);
    319   1.2      yamt 			rasize = RA_WINSIZE_MAX;
    320   1.2      yamt 		}
    321   1.2      yamt #endif /* defined(DIAGNOSTIC) */
    322   1.2      yamt 
    323   1.2      yamt 		/*
    324   1.2      yamt 		 * issue read-ahead only if we can start big enough i/o.
    325   1.2      yamt 		 * otherwise we end up with a stream of small i/o.
    326   1.2      yamt 		 */
    327   1.2      yamt 
    328   1.2      yamt 		if (rasize >= RA_MINSIZE) {
    329   1.6      yamt 			off_t next;
    330   1.6      yamt 
    331   1.6      yamt 			next = ra_startio(uobj, raoff, rasize);
    332   1.6      yamt 			ra->ra_next = next;
    333   1.2      yamt 		}
    334   1.2      yamt 	}
    335   1.2      yamt 
    336   1.2      yamt 	/*
    337   1.2      yamt 	 * update window.
    338   1.2      yamt 	 *
    339   1.2      yamt 	 * enlarge window by reqsize, so that it grows in a predictable manner
    340   1.2      yamt 	 * regardless of the size of each read(2).
    341   1.2      yamt 	 */
    342   1.2      yamt 
    343   1.2      yamt 	ra->ra_winstart = reqoff + reqsize;
    344   1.2      yamt 	ra->ra_winsize = MIN(RA_WINSIZE_MAX, ra->ra_winsize + reqsize);
    345   1.2      yamt 
    346   1.2      yamt done:;
    347   1.2      yamt }
    348   1.6      yamt 
    349   1.6      yamt int
    350   1.6      yamt uvm_readahead(struct uvm_object *uobj, off_t off, off_t size)
    351   1.6      yamt {
    352   1.6      yamt 
    353   1.6      yamt 	/*
    354   1.6      yamt 	 * don't allow too much read-ahead.
    355   1.6      yamt 	 */
    356   1.6      yamt 	if (size > RA_WINSIZE_MAX) {
    357   1.6      yamt 		size = RA_WINSIZE_MAX;
    358   1.6      yamt 	}
    359  1.13        ad 	rw_enter(uobj->vmobjlock, RW_WRITER);
    360   1.6      yamt 	ra_startio(uobj, off, size);
    361  1.13        ad 	rw_exit(uobj->vmobjlock);
    362   1.6      yamt 	return 0;
    363   1.6      yamt }
    364