uvm_readahead.c revision 1.1.2.14 1 1.1.2.14 yamt /* $NetBSD: uvm_readahead.c,v 1.1.2.14 2005/11/22 07:19:51 yamt Exp $ */
2 1.1.2.1 yamt
3 1.1.2.1 yamt /*-
4 1.1.2.1 yamt * Copyright (c)2003, 2005 YAMAMOTO Takashi,
5 1.1.2.1 yamt * All rights reserved.
6 1.1.2.1 yamt *
7 1.1.2.1 yamt * Redistribution and use in source and binary forms, with or without
8 1.1.2.1 yamt * modification, are permitted provided that the following conditions
9 1.1.2.1 yamt * are met:
10 1.1.2.1 yamt * 1. Redistributions of source code must retain the above copyright
11 1.1.2.1 yamt * notice, this list of conditions and the following disclaimer.
12 1.1.2.1 yamt * 2. Redistributions in binary form must reproduce the above copyright
13 1.1.2.1 yamt * notice, this list of conditions and the following disclaimer in the
14 1.1.2.1 yamt * documentation and/or other materials provided with the distribution.
15 1.1.2.1 yamt *
16 1.1.2.1 yamt * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 1.1.2.1 yamt * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 1.1.2.1 yamt * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 1.1.2.1 yamt * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 1.1.2.1 yamt * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 1.1.2.1 yamt * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 1.1.2.1 yamt * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 1.1.2.1 yamt * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 1.1.2.1 yamt * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 1.1.2.1 yamt * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 1.1.2.1 yamt * SUCH DAMAGE.
27 1.1.2.1 yamt */
28 1.1.2.1 yamt
29 1.1.2.14 yamt /*
30 1.1.2.14 yamt * uvm_object read-ahead
31 1.1.2.14 yamt *
32 1.1.2.14 yamt * TODO:
33 1.1.2.14 yamt * - tune.
34 1.1.2.14 yamt * - handle multiple streams.
35 1.1.2.14 yamt * - find a better way to deal with PGO_LOCKED pager requests.
36 1.1.2.14 yamt * (currently just ignored)
37 1.1.2.14 yamt * - consider the amount of memory in the system.
38 1.1.2.14 yamt * - consider the speed of the underlying device.
39 1.1.2.14 yamt * - consider filesystem block size / block layout.
40 1.1.2.14 yamt */
41 1.1.2.14 yamt
42 1.1.2.1 yamt #include <sys/cdefs.h>
43 1.1.2.14 yamt __KERNEL_RCSID(0, "$NetBSD: uvm_readahead.c,v 1.1.2.14 2005/11/22 07:19:51 yamt Exp $");
44 1.1.2.1 yamt
45 1.1.2.1 yamt #include <sys/param.h>
46 1.1.2.1 yamt #include <sys/pool.h>
47 1.1.2.1 yamt
48 1.1.2.1 yamt #include <uvm/uvm.h>
49 1.1.2.1 yamt #include <uvm/uvm_readahead.h>
50 1.1.2.1 yamt
51 1.1.2.6 yamt #if defined(READAHEAD_DEBUG)
52 1.1.2.6 yamt #define DPRINTF(a) printf a
53 1.1.2.6 yamt #else /* defined(READAHEAD_DEBUG) */
54 1.1.2.6 yamt #define DPRINTF(a) /* nothing */
55 1.1.2.6 yamt #endif /* defined(READAHEAD_DEBUG) */
56 1.1.2.6 yamt
57 1.1.2.5 yamt /*
58 1.1.2.5 yamt * uvm_ractx: read-ahead context.
59 1.1.2.5 yamt */
60 1.1.2.5 yamt
61 1.1.2.1 yamt struct uvm_ractx {
62 1.1.2.1 yamt int ra_flags;
63 1.1.2.1 yamt #define RA_VALID 1
64 1.1.2.5 yamt off_t ra_winstart; /* window start offset */
65 1.1.2.5 yamt size_t ra_winsize; /* window size */
66 1.1.2.5 yamt off_t ra_next; /* next offset to read-ahead */
67 1.1.2.1 yamt };
68 1.1.2.1 yamt
69 1.1.2.5 yamt #define RA_WINSIZE_INIT MAXPHYS /* initial window size */
70 1.1.2.5 yamt #define RA_WINSIZE_MAX (MAXPHYS * 8) /* max window size */
71 1.1.2.5 yamt #define RA_WINSIZE_SEQENTIAL RA_WINSIZE_MAX /* fixed window size used for
72 1.1.2.5 yamt SEQUENTIAL hint */
73 1.1.2.5 yamt #define RA_MINSIZE (MAXPHYS * 2) /* min size to start i/o */
74 1.1.2.5 yamt #define RA_IOCHUNK MAXPHYS /* read-ahead i/o chunk size */
75 1.1.2.1 yamt
76 1.1.2.1 yamt static off_t ra_startio(struct uvm_object *, off_t, size_t);
77 1.1.2.1 yamt static struct uvm_ractx *ra_allocctx(void);
78 1.1.2.1 yamt static void ra_freectx(struct uvm_ractx *);
79 1.1.2.1 yamt
80 1.1.2.1 yamt POOL_INIT(ractx_pool, sizeof(struct uvm_ractx), 0, 0, 0, "ractx",
81 1.1.2.1 yamt &pool_allocator_nointr);
82 1.1.2.1 yamt
83 1.1.2.1 yamt static struct uvm_ractx *
84 1.1.2.1 yamt ra_allocctx(void)
85 1.1.2.1 yamt {
86 1.1.2.1 yamt
87 1.1.2.1 yamt return pool_get(&ractx_pool, PR_NOWAIT);
88 1.1.2.1 yamt }
89 1.1.2.1 yamt
90 1.1.2.1 yamt static void
91 1.1.2.1 yamt ra_freectx(struct uvm_ractx *ra)
92 1.1.2.1 yamt {
93 1.1.2.1 yamt
94 1.1.2.1 yamt pool_put(&ractx_pool, ra);
95 1.1.2.1 yamt }
96 1.1.2.1 yamt
97 1.1.2.5 yamt /*
98 1.1.2.5 yamt * ra_startio: start i/o for read-ahead.
99 1.1.2.5 yamt *
100 1.1.2.5 yamt * => start i/o for each RA_IOCHUNK sized chunk.
101 1.1.2.5 yamt * => return offset to which we started i/o.
102 1.1.2.5 yamt */
103 1.1.2.5 yamt
104 1.1.2.1 yamt static off_t
105 1.1.2.1 yamt ra_startio(struct uvm_object *uobj, off_t off, size_t sz)
106 1.1.2.1 yamt {
107 1.1.2.1 yamt const off_t endoff = off + sz;
108 1.1.2.1 yamt
109 1.1.2.6 yamt DPRINTF(("%s: uobj=%p, off=%" PRIu64 ", endoff=%" PRIu64 "\n",
110 1.1.2.6 yamt __func__, uobj, off, endoff));
111 1.1.2.1 yamt off = trunc_page(off);
112 1.1.2.1 yamt while (off < endoff) {
113 1.1.2.5 yamt const size_t chunksize = RA_IOCHUNK;
114 1.1.2.1 yamt int error;
115 1.1.2.1 yamt size_t donebytes;
116 1.1.2.1 yamt int npages;
117 1.1.2.1 yamt int orignpages;
118 1.1.2.1 yamt size_t bytelen;
119 1.1.2.1 yamt
120 1.1.2.1 yamt KASSERT((chunksize & (chunksize - 1)) == 0);
121 1.1.2.1 yamt KASSERT((off & PAGE_MASK) == 0);
122 1.1.2.1 yamt bytelen = ((off + chunksize) & -(off_t)chunksize) - off;
123 1.1.2.1 yamt KASSERT((bytelen & PAGE_MASK) == 0);
124 1.1.2.1 yamt npages = orignpages = bytelen >> PAGE_SHIFT;
125 1.1.2.1 yamt KASSERT(npages != 0);
126 1.1.2.11 yamt
127 1.1.2.11 yamt /*
128 1.1.2.11 yamt * use UVM_ADV_RANDOM to avoid recursion.
129 1.1.2.11 yamt */
130 1.1.2.11 yamt
131 1.1.2.1 yamt simple_lock(&uobj->vmobjlock);
132 1.1.2.1 yamt error = (*uobj->pgops->pgo_get)(uobj, off, NULL,
133 1.1.2.11 yamt &npages, 0, VM_PROT_READ, UVM_ADV_RANDOM, 0);
134 1.1.2.11 yamt DPRINTF(("%s: off=%" PRIu64 ", bytelen=%zu -> %d\n",
135 1.1.2.11 yamt __func__, off, bytelen, error));
136 1.1.2.10 yamt if (error != 0 && error != EBUSY) {
137 1.1.2.5 yamt if (error != EINVAL) { /* maybe past EOF */
138 1.1.2.6 yamt DPRINTF(("%s: error=%d\n", __func__, error));
139 1.1.2.1 yamt }
140 1.1.2.1 yamt break;
141 1.1.2.1 yamt }
142 1.1.2.4 yamt KASSERT(orignpages == npages);
143 1.1.2.1 yamt donebytes = orignpages << PAGE_SHIFT;
144 1.1.2.1 yamt off += donebytes;
145 1.1.2.1 yamt }
146 1.1.2.1 yamt
147 1.1.2.1 yamt return off;
148 1.1.2.1 yamt }
149 1.1.2.1 yamt
150 1.1.2.1 yamt /* ------------------------------------------------------------ */
151 1.1.2.1 yamt
152 1.1.2.1 yamt struct uvm_ractx *
153 1.1.2.9 yamt uvm_ra_allocctx(void)
154 1.1.2.1 yamt {
155 1.1.2.1 yamt struct uvm_ractx *ra;
156 1.1.2.1 yamt
157 1.1.2.1 yamt ra = ra_allocctx();
158 1.1.2.1 yamt if (ra != NULL) {
159 1.1.2.1 yamt ra->ra_flags = 0;
160 1.1.2.1 yamt }
161 1.1.2.1 yamt
162 1.1.2.1 yamt return ra;
163 1.1.2.1 yamt }
164 1.1.2.1 yamt
165 1.1.2.1 yamt void
166 1.1.2.1 yamt uvm_ra_freectx(struct uvm_ractx *ra)
167 1.1.2.1 yamt {
168 1.1.2.1 yamt
169 1.1.2.1 yamt KASSERT(ra != NULL);
170 1.1.2.1 yamt ra_freectx(ra);
171 1.1.2.1 yamt }
172 1.1.2.1 yamt
173 1.1.2.5 yamt /*
174 1.1.2.5 yamt * uvm_ra_request: start i/o for read-ahead if appropriate.
175 1.1.2.5 yamt *
176 1.1.2.5 yamt * => called by filesystems when [reqoff, reqoff+reqsize) is requested.
177 1.1.2.5 yamt */
178 1.1.2.5 yamt
179 1.1.2.1 yamt void
180 1.1.2.9 yamt uvm_ra_request(struct uvm_ractx *ra, int advice, struct uvm_object *uobj,
181 1.1.2.1 yamt off_t reqoff, size_t reqsize)
182 1.1.2.1 yamt {
183 1.1.2.1 yamt
184 1.1.2.9 yamt if (ra == NULL || advice == UVM_ADV_RANDOM) {
185 1.1.2.1 yamt return;
186 1.1.2.1 yamt }
187 1.1.2.1 yamt
188 1.1.2.9 yamt /*
189 1.1.2.9 yamt * XXX needs locking? maybe.
190 1.1.2.9 yamt * but the worst effect is merely a bad read-ahead.
191 1.1.2.9 yamt */
192 1.1.2.2 yamt
193 1.1.2.9 yamt if (advice == UVM_ADV_SEQUENTIAL) {
194 1.1.2.5 yamt
195 1.1.2.5 yamt /*
196 1.1.2.5 yamt * always do read-ahead with a large window.
197 1.1.2.5 yamt */
198 1.1.2.5 yamt
199 1.1.2.9 yamt if ((ra->ra_flags & RA_VALID) == 0) {
200 1.1.2.9 yamt ra->ra_winstart = ra->ra_next = 0;
201 1.1.2.9 yamt ra->ra_flags |= RA_VALID;
202 1.1.2.9 yamt }
203 1.1.2.13 yamt if (reqoff < ra->ra_winstart) {
204 1.1.2.2 yamt ra->ra_next = reqoff;
205 1.1.2.2 yamt }
206 1.1.2.2 yamt ra->ra_winsize = RA_WINSIZE_SEQENTIAL;
207 1.1.2.2 yamt goto do_readahead;
208 1.1.2.2 yamt }
209 1.1.2.2 yamt
210 1.1.2.5 yamt /*
211 1.1.2.9 yamt * a request with UVM_ADV_NORMAL hint. (ie. no hint)
212 1.1.2.5 yamt *
213 1.1.2.5 yamt * we keep a sliding window in order to determine:
214 1.1.2.5 yamt * - if the previous read-ahead was successful or not.
215 1.1.2.5 yamt * - how many bytes to read-ahead.
216 1.1.2.5 yamt */
217 1.1.2.5 yamt
218 1.1.2.5 yamt /*
219 1.1.2.5 yamt * if it's the first request for this context,
220 1.1.2.5 yamt * initialize context and return.
221 1.1.2.5 yamt */
222 1.1.2.5 yamt
223 1.1.2.1 yamt if ((ra->ra_flags & RA_VALID) == 0) {
224 1.1.2.1 yamt initialize:
225 1.1.2.1 yamt ra->ra_winstart = ra->ra_next = reqoff + reqsize;
226 1.1.2.1 yamt ra->ra_winsize = RA_WINSIZE_INIT;
227 1.1.2.1 yamt ra->ra_flags |= RA_VALID;
228 1.1.2.9 yamt goto done;
229 1.1.2.1 yamt }
230 1.1.2.1 yamt
231 1.1.2.5 yamt /*
232 1.1.2.5 yamt * if it isn't in our window,
233 1.1.2.5 yamt * initialize context and return.
234 1.1.2.5 yamt * (read-ahead miss)
235 1.1.2.5 yamt */
236 1.1.2.5 yamt
237 1.1.2.1 yamt if (reqoff < ra->ra_winstart ||
238 1.1.2.1 yamt ra->ra_winstart + ra->ra_winsize < reqoff) {
239 1.1.2.12 yamt
240 1.1.2.12 yamt /*
241 1.1.2.12 yamt * ... unless we seem to be reading the same chunk repeatedly.
242 1.1.2.12 yamt */
243 1.1.2.12 yamt
244 1.1.2.12 yamt if (reqoff + reqsize == ra->ra_winstart) {
245 1.1.2.12 yamt DPRINTF(("%s: %p: same block: off=%" PRIu64
246 1.1.2.12 yamt ", size=%zd, winstart=%" PRIu64 "\n",
247 1.1.2.12 yamt __func__, ra, reqoff, reqsize, ra->ra_winstart));
248 1.1.2.12 yamt goto done;
249 1.1.2.12 yamt }
250 1.1.2.1 yamt goto initialize;
251 1.1.2.1 yamt }
252 1.1.2.1 yamt
253 1.1.2.1 yamt /*
254 1.1.2.5 yamt * it's in our window. (read-ahead hit)
255 1.1.2.5 yamt * - start read-ahead i/o if appropriate.
256 1.1.2.5 yamt * - advance and enlarge window.
257 1.1.2.1 yamt */
258 1.1.2.1 yamt
259 1.1.2.2 yamt do_readahead:
260 1.1.2.5 yamt
261 1.1.2.5 yamt /*
262 1.1.2.5 yamt * don't bother to read-ahead behind current request.
263 1.1.2.5 yamt */
264 1.1.2.5 yamt
265 1.1.2.1 yamt if (reqoff > ra->ra_next) {
266 1.1.2.1 yamt ra->ra_next = reqoff;
267 1.1.2.1 yamt }
268 1.1.2.1 yamt
269 1.1.2.5 yamt /*
270 1.1.2.5 yamt * try to make [reqoff, reqoff+ra_winsize) in-core.
271 1.1.2.8 yamt * note that [reqoff, ra_next) is considered already done.
272 1.1.2.5 yamt */
273 1.1.2.5 yamt
274 1.1.2.1 yamt if (reqoff + ra->ra_winsize > ra->ra_next) {
275 1.1.2.1 yamt off_t raoff = MAX(reqoff, ra->ra_next);
276 1.1.2.1 yamt size_t rasize = reqoff + ra->ra_winsize - ra->ra_next;
277 1.1.2.1 yamt
278 1.1.2.9 yamt #if defined(DIAGNOSTIC)
279 1.1.2.9 yamt if (rasize > RA_WINSIZE_MAX) {
280 1.1.2.9 yamt
281 1.1.2.9 yamt /*
282 1.1.2.9 yamt * shouldn't happen as far as we're protected by
283 1.1.2.9 yamt * kernel_lock.
284 1.1.2.9 yamt */
285 1.1.2.9 yamt
286 1.1.2.9 yamt printf("%s: corrupted context", __func__);
287 1.1.2.9 yamt rasize = RA_WINSIZE_MAX;
288 1.1.2.9 yamt }
289 1.1.2.9 yamt #endif /* defined(DIAGNOSTIC) */
290 1.1.2.9 yamt
291 1.1.2.5 yamt /*
292 1.1.2.5 yamt * issue read-ahead only if we can start big enough i/o.
293 1.1.2.5 yamt * otherwise we end up with a stream of small i/o.
294 1.1.2.5 yamt */
295 1.1.2.5 yamt
296 1.1.2.1 yamt if (rasize >= RA_MINSIZE) {
297 1.1.2.1 yamt ra->ra_next = ra_startio(uobj, raoff, rasize);
298 1.1.2.1 yamt }
299 1.1.2.1 yamt }
300 1.1.2.1 yamt
301 1.1.2.1 yamt /*
302 1.1.2.5 yamt * update window.
303 1.1.2.5 yamt *
304 1.1.2.5 yamt * enlarge window by reqsize, so that it grows in a predictable manner
305 1.1.2.5 yamt * regardless of the size of each read(2).
306 1.1.2.1 yamt */
307 1.1.2.1 yamt
308 1.1.2.1 yamt ra->ra_winstart = reqoff + reqsize;
309 1.1.2.1 yamt ra->ra_winsize = MIN(RA_WINSIZE_MAX, ra->ra_winsize + reqsize);
310 1.1.2.9 yamt
311 1.1.2.9 yamt done:;
312 1.1.2.1 yamt }
313