uvm_readahead.c revision 1.10 1 1.10 jdolecek /* $NetBSD: uvm_readahead.c,v 1.10 2018/05/19 15:18:02 jdolecek Exp $ */
2 1.2 yamt
3 1.2 yamt /*-
4 1.6 yamt * Copyright (c)2003, 2005, 2009 YAMAMOTO Takashi,
5 1.2 yamt * All rights reserved.
6 1.2 yamt *
7 1.2 yamt * Redistribution and use in source and binary forms, with or without
8 1.2 yamt * modification, are permitted provided that the following conditions
9 1.2 yamt * are met:
10 1.2 yamt * 1. Redistributions of source code must retain the above copyright
11 1.2 yamt * notice, this list of conditions and the following disclaimer.
12 1.2 yamt * 2. Redistributions in binary form must reproduce the above copyright
13 1.2 yamt * notice, this list of conditions and the following disclaimer in the
14 1.2 yamt * documentation and/or other materials provided with the distribution.
15 1.2 yamt *
16 1.2 yamt * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 1.2 yamt * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 1.2 yamt * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 1.2 yamt * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 1.2 yamt * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 1.2 yamt * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 1.2 yamt * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 1.2 yamt * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 1.2 yamt * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 1.2 yamt * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 1.2 yamt * SUCH DAMAGE.
27 1.2 yamt */
28 1.2 yamt
29 1.2 yamt /*
30 1.2 yamt * uvm_object read-ahead
31 1.2 yamt *
32 1.2 yamt * TODO:
33 1.2 yamt * - tune.
34 1.2 yamt * - handle multiple streams.
35 1.2 yamt * - find a better way to deal with PGO_LOCKED pager requests.
36 1.2 yamt * (currently just ignored)
37 1.2 yamt * - consider the amount of memory in the system.
38 1.2 yamt * - consider the speed of the underlying device.
39 1.2 yamt * - consider filesystem block size / block layout.
40 1.2 yamt */
41 1.2 yamt
42 1.2 yamt #include <sys/cdefs.h>
43 1.10 jdolecek __KERNEL_RCSID(0, "$NetBSD: uvm_readahead.c,v 1.10 2018/05/19 15:18:02 jdolecek Exp $");
44 1.2 yamt
45 1.2 yamt #include <sys/param.h>
46 1.2 yamt #include <sys/pool.h>
47 1.2 yamt
48 1.2 yamt #include <uvm/uvm.h>
49 1.2 yamt #include <uvm/uvm_readahead.h>
50 1.2 yamt
51 1.2 yamt #if defined(READAHEAD_DEBUG)
52 1.2 yamt #define DPRINTF(a) printf a
53 1.2 yamt #else /* defined(READAHEAD_DEBUG) */
54 1.2 yamt #define DPRINTF(a) /* nothing */
55 1.2 yamt #endif /* defined(READAHEAD_DEBUG) */
56 1.2 yamt
57 1.2 yamt /*
58 1.2 yamt * uvm_ractx: read-ahead context.
59 1.2 yamt */
60 1.2 yamt
61 1.2 yamt struct uvm_ractx {
62 1.2 yamt int ra_flags;
63 1.2 yamt #define RA_VALID 1
64 1.2 yamt off_t ra_winstart; /* window start offset */
65 1.2 yamt size_t ra_winsize; /* window size */
66 1.2 yamt off_t ra_next; /* next offset to read-ahead */
67 1.2 yamt };
68 1.2 yamt
69 1.7 tsutsui #if defined(sun2) || defined(sun3)
70 1.7 tsutsui /* XXX: on sun2 and sun3 MAXPHYS is 0xe000 */
71 1.4 tsutsui #undef MAXPHYS
72 1.4 tsutsui #define MAXPHYS 0x8000 /* XXX */
73 1.4 tsutsui #endif
74 1.4 tsutsui
75 1.2 yamt #define RA_WINSIZE_INIT MAXPHYS /* initial window size */
76 1.9 mlelstv #define RA_WINSIZE_MAX (MAXPHYS * 16) /* max window size */
77 1.2 yamt #define RA_WINSIZE_SEQENTIAL RA_WINSIZE_MAX /* fixed window size used for
78 1.2 yamt SEQUENTIAL hint */
79 1.2 yamt #define RA_MINSIZE (MAXPHYS * 2) /* min size to start i/o */
80 1.2 yamt #define RA_IOCHUNK MAXPHYS /* read-ahead i/o chunk size */
81 1.2 yamt
82 1.2 yamt static off_t ra_startio(struct uvm_object *, off_t, size_t);
83 1.2 yamt static struct uvm_ractx *ra_allocctx(void);
84 1.2 yamt static void ra_freectx(struct uvm_ractx *);
85 1.2 yamt
86 1.5 ad static struct pool_cache ractx_cache;
87 1.5 ad
88 1.5 ad /*
89 1.5 ad * uvm_ra_init: initialize readahead module.
90 1.5 ad */
91 1.5 ad
92 1.5 ad void
93 1.5 ad uvm_ra_init(void)
94 1.5 ad {
95 1.5 ad
96 1.5 ad pool_cache_bootstrap(&ractx_cache, sizeof(struct uvm_ractx), 0, 0, 0,
97 1.5 ad "ractx", NULL, IPL_NONE, NULL, NULL, NULL);
98 1.5 ad }
99 1.2 yamt
100 1.2 yamt static struct uvm_ractx *
101 1.2 yamt ra_allocctx(void)
102 1.2 yamt {
103 1.2 yamt
104 1.5 ad return pool_cache_get(&ractx_cache, PR_NOWAIT);
105 1.2 yamt }
106 1.2 yamt
107 1.2 yamt static void
108 1.2 yamt ra_freectx(struct uvm_ractx *ra)
109 1.2 yamt {
110 1.2 yamt
111 1.5 ad pool_cache_put(&ractx_cache, ra);
112 1.2 yamt }
113 1.2 yamt
114 1.2 yamt /*
115 1.2 yamt * ra_startio: start i/o for read-ahead.
116 1.2 yamt *
117 1.2 yamt * => start i/o for each RA_IOCHUNK sized chunk.
118 1.2 yamt * => return offset to which we started i/o.
119 1.2 yamt */
120 1.2 yamt
121 1.2 yamt static off_t
122 1.2 yamt ra_startio(struct uvm_object *uobj, off_t off, size_t sz)
123 1.2 yamt {
124 1.2 yamt const off_t endoff = off + sz;
125 1.2 yamt
126 1.2 yamt DPRINTF(("%s: uobj=%p, off=%" PRIu64 ", endoff=%" PRIu64 "\n",
127 1.2 yamt __func__, uobj, off, endoff));
128 1.10 jdolecek
129 1.10 jdolecek /*
130 1.10 jdolecek * Don't issue read-ahead if the last page of the range is already cached.
131 1.10 jdolecek * The assumption is that since the access is sequential, the intermediate
132 1.10 jdolecek * pages would have similar LRU stats, and hence likely to be still in cache
133 1.10 jdolecek * too. This speeds up I/O using cache, since it avoids lookups and temporary
134 1.10 jdolecek * allocations done by full pgo_get.
135 1.10 jdolecek */
136 1.10 jdolecek mutex_enter(uobj->vmobjlock);
137 1.10 jdolecek struct vm_page *pg = uvm_pagelookup(uobj, trunc_page(endoff - 1));
138 1.10 jdolecek mutex_exit(uobj->vmobjlock);
139 1.10 jdolecek if (pg != NULL) {
140 1.10 jdolecek DPRINTF(("%s: off=%" PRIu64 ", sz=%zu already cached\n",
141 1.10 jdolecek __func__, off, sz));
142 1.10 jdolecek return endoff;
143 1.10 jdolecek }
144 1.10 jdolecek
145 1.2 yamt off = trunc_page(off);
146 1.2 yamt while (off < endoff) {
147 1.2 yamt const size_t chunksize = RA_IOCHUNK;
148 1.2 yamt int error;
149 1.2 yamt size_t donebytes;
150 1.2 yamt int npages;
151 1.2 yamt int orignpages;
152 1.2 yamt size_t bytelen;
153 1.2 yamt
154 1.2 yamt KASSERT((chunksize & (chunksize - 1)) == 0);
155 1.2 yamt KASSERT((off & PAGE_MASK) == 0);
156 1.2 yamt bytelen = ((off + chunksize) & -(off_t)chunksize) - off;
157 1.2 yamt KASSERT((bytelen & PAGE_MASK) == 0);
158 1.2 yamt npages = orignpages = bytelen >> PAGE_SHIFT;
159 1.2 yamt KASSERT(npages != 0);
160 1.2 yamt
161 1.2 yamt /*
162 1.2 yamt * use UVM_ADV_RANDOM to avoid recursion.
163 1.2 yamt */
164 1.2 yamt
165 1.8 rmind mutex_enter(uobj->vmobjlock);
166 1.2 yamt error = (*uobj->pgops->pgo_get)(uobj, off, NULL,
167 1.10 jdolecek &npages, 0, VM_PROT_READ, UVM_ADV_RANDOM, PGO_NOTIMESTAMP);
168 1.2 yamt DPRINTF(("%s: off=%" PRIu64 ", bytelen=%zu -> %d\n",
169 1.2 yamt __func__, off, bytelen, error));
170 1.2 yamt if (error != 0 && error != EBUSY) {
171 1.2 yamt if (error != EINVAL) { /* maybe past EOF */
172 1.2 yamt DPRINTF(("%s: error=%d\n", __func__, error));
173 1.2 yamt }
174 1.2 yamt break;
175 1.2 yamt }
176 1.2 yamt KASSERT(orignpages == npages);
177 1.2 yamt donebytes = orignpages << PAGE_SHIFT;
178 1.2 yamt off += donebytes;
179 1.2 yamt }
180 1.2 yamt
181 1.2 yamt return off;
182 1.2 yamt }
183 1.2 yamt
184 1.2 yamt /* ------------------------------------------------------------ */
185 1.2 yamt
186 1.2 yamt /*
187 1.2 yamt * uvm_ra_allocctx: allocate a context.
188 1.2 yamt */
189 1.2 yamt
190 1.2 yamt struct uvm_ractx *
191 1.2 yamt uvm_ra_allocctx(void)
192 1.2 yamt {
193 1.2 yamt struct uvm_ractx *ra;
194 1.2 yamt
195 1.2 yamt ra = ra_allocctx();
196 1.2 yamt if (ra != NULL) {
197 1.2 yamt ra->ra_flags = 0;
198 1.2 yamt }
199 1.2 yamt
200 1.2 yamt return ra;
201 1.2 yamt }
202 1.2 yamt
203 1.2 yamt /*
204 1.2 yamt * uvm_ra_freectx: free a context.
205 1.2 yamt */
206 1.2 yamt
207 1.2 yamt void
208 1.2 yamt uvm_ra_freectx(struct uvm_ractx *ra)
209 1.2 yamt {
210 1.2 yamt
211 1.2 yamt KASSERT(ra != NULL);
212 1.2 yamt ra_freectx(ra);
213 1.2 yamt }
214 1.2 yamt
215 1.2 yamt /*
216 1.2 yamt * uvm_ra_request: update a read-ahead context and start i/o if appropriate.
217 1.2 yamt *
218 1.2 yamt * => called when [reqoff, reqoff+reqsize) is requested.
219 1.5 ad * => object must be locked by caller, will return locked.
220 1.2 yamt */
221 1.2 yamt
222 1.2 yamt void
223 1.2 yamt uvm_ra_request(struct uvm_ractx *ra, int advice, struct uvm_object *uobj,
224 1.2 yamt off_t reqoff, size_t reqsize)
225 1.2 yamt {
226 1.2 yamt
227 1.8 rmind KASSERT(mutex_owned(uobj->vmobjlock));
228 1.5 ad
229 1.2 yamt if (ra == NULL || advice == UVM_ADV_RANDOM) {
230 1.2 yamt return;
231 1.2 yamt }
232 1.2 yamt
233 1.2 yamt if (advice == UVM_ADV_SEQUENTIAL) {
234 1.2 yamt
235 1.2 yamt /*
236 1.2 yamt * always do read-ahead with a large window.
237 1.2 yamt */
238 1.2 yamt
239 1.2 yamt if ((ra->ra_flags & RA_VALID) == 0) {
240 1.2 yamt ra->ra_winstart = ra->ra_next = 0;
241 1.2 yamt ra->ra_flags |= RA_VALID;
242 1.2 yamt }
243 1.2 yamt if (reqoff < ra->ra_winstart) {
244 1.2 yamt ra->ra_next = reqoff;
245 1.2 yamt }
246 1.2 yamt ra->ra_winsize = RA_WINSIZE_SEQENTIAL;
247 1.2 yamt goto do_readahead;
248 1.2 yamt }
249 1.2 yamt
250 1.2 yamt /*
251 1.2 yamt * a request with UVM_ADV_NORMAL hint. (ie. no hint)
252 1.2 yamt *
253 1.2 yamt * we keep a sliding window in order to determine:
254 1.2 yamt * - if the previous read-ahead was successful or not.
255 1.2 yamt * - how many bytes to read-ahead.
256 1.2 yamt */
257 1.2 yamt
258 1.2 yamt /*
259 1.2 yamt * if it's the first request for this context,
260 1.2 yamt * initialize context and return.
261 1.2 yamt */
262 1.2 yamt
263 1.2 yamt if ((ra->ra_flags & RA_VALID) == 0) {
264 1.2 yamt initialize:
265 1.2 yamt ra->ra_winstart = ra->ra_next = reqoff + reqsize;
266 1.2 yamt ra->ra_winsize = RA_WINSIZE_INIT;
267 1.2 yamt ra->ra_flags |= RA_VALID;
268 1.2 yamt goto done;
269 1.2 yamt }
270 1.2 yamt
271 1.2 yamt /*
272 1.2 yamt * if it isn't in our window,
273 1.2 yamt * initialize context and return.
274 1.2 yamt * (read-ahead miss)
275 1.2 yamt */
276 1.2 yamt
277 1.2 yamt if (reqoff < ra->ra_winstart ||
278 1.2 yamt ra->ra_winstart + ra->ra_winsize < reqoff) {
279 1.2 yamt
280 1.2 yamt /*
281 1.2 yamt * ... unless we seem to be reading the same chunk repeatedly.
282 1.2 yamt *
283 1.2 yamt * XXX should have some margin?
284 1.2 yamt */
285 1.2 yamt
286 1.2 yamt if (reqoff + reqsize == ra->ra_winstart) {
287 1.2 yamt DPRINTF(("%s: %p: same block: off=%" PRIu64
288 1.2 yamt ", size=%zd, winstart=%" PRIu64 "\n",
289 1.2 yamt __func__, ra, reqoff, reqsize, ra->ra_winstart));
290 1.2 yamt goto done;
291 1.2 yamt }
292 1.2 yamt goto initialize;
293 1.2 yamt }
294 1.2 yamt
295 1.2 yamt /*
296 1.2 yamt * it's in our window. (read-ahead hit)
297 1.2 yamt * - start read-ahead i/o if appropriate.
298 1.2 yamt * - advance and enlarge window.
299 1.2 yamt */
300 1.2 yamt
301 1.2 yamt do_readahead:
302 1.2 yamt
303 1.2 yamt /*
304 1.2 yamt * don't bother to read-ahead behind current request.
305 1.2 yamt */
306 1.2 yamt
307 1.2 yamt if (reqoff > ra->ra_next) {
308 1.2 yamt ra->ra_next = reqoff;
309 1.2 yamt }
310 1.2 yamt
311 1.2 yamt /*
312 1.2 yamt * try to make [reqoff, reqoff+ra_winsize) in-core.
313 1.2 yamt * note that [reqoff, ra_next) is considered already done.
314 1.2 yamt */
315 1.2 yamt
316 1.2 yamt if (reqoff + ra->ra_winsize > ra->ra_next) {
317 1.2 yamt off_t raoff = MAX(reqoff, ra->ra_next);
318 1.2 yamt size_t rasize = reqoff + ra->ra_winsize - ra->ra_next;
319 1.2 yamt
320 1.2 yamt #if defined(DIAGNOSTIC)
321 1.2 yamt if (rasize > RA_WINSIZE_MAX) {
322 1.2 yamt printf("%s: corrupted context", __func__);
323 1.2 yamt rasize = RA_WINSIZE_MAX;
324 1.2 yamt }
325 1.2 yamt #endif /* defined(DIAGNOSTIC) */
326 1.2 yamt
327 1.2 yamt /*
328 1.2 yamt * issue read-ahead only if we can start big enough i/o.
329 1.2 yamt * otherwise we end up with a stream of small i/o.
330 1.2 yamt */
331 1.2 yamt
332 1.2 yamt if (rasize >= RA_MINSIZE) {
333 1.6 yamt off_t next;
334 1.6 yamt
335 1.8 rmind mutex_exit(uobj->vmobjlock);
336 1.6 yamt next = ra_startio(uobj, raoff, rasize);
337 1.8 rmind mutex_enter(uobj->vmobjlock);
338 1.6 yamt ra->ra_next = next;
339 1.2 yamt }
340 1.2 yamt }
341 1.2 yamt
342 1.2 yamt /*
343 1.2 yamt * update window.
344 1.2 yamt *
345 1.2 yamt * enlarge window by reqsize, so that it grows in a predictable manner
346 1.2 yamt * regardless of the size of each read(2).
347 1.2 yamt */
348 1.2 yamt
349 1.2 yamt ra->ra_winstart = reqoff + reqsize;
350 1.2 yamt ra->ra_winsize = MIN(RA_WINSIZE_MAX, ra->ra_winsize + reqsize);
351 1.2 yamt
352 1.2 yamt done:;
353 1.2 yamt }
354 1.6 yamt
355 1.6 yamt int
356 1.6 yamt uvm_readahead(struct uvm_object *uobj, off_t off, off_t size)
357 1.6 yamt {
358 1.6 yamt
359 1.6 yamt /*
360 1.6 yamt * don't allow too much read-ahead.
361 1.6 yamt */
362 1.6 yamt if (size > RA_WINSIZE_MAX) {
363 1.6 yamt size = RA_WINSIZE_MAX;
364 1.6 yamt }
365 1.6 yamt ra_startio(uobj, off, size);
366 1.6 yamt return 0;
367 1.6 yamt }
368