uvm_bio.c revision 1.14 1 /* $NetBSD: uvm_bio.c,v 1.14 2001/05/26 21:27:20 chs Exp $ */
2
3 /*
4 * Copyright (c) 1998 Chuck Silvers.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 */
31
32 #include "opt_uvmhist.h"
33
34 /*
35 * uvm_bio.c: buffered i/o vnode mapping cache
36 */
37
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/malloc.h>
42 #include <sys/kernel.h>
43 #include <sys/vnode.h>
44
45 #include <uvm/uvm.h>
46 #include <uvm/uvm_page.h>
47
48 /*
49 * global data structures
50 */
51
52 /*
53 * local functions
54 */
55
56 static int ubc_fault __P((struct uvm_faultinfo *, vaddr_t,
57 struct vm_page **, int, int, vm_fault_t, vm_prot_t, int));
58 static struct ubc_map *ubc_find_mapping __P((struct uvm_object *, voff_t));
59
60 /*
61 * local data structues
62 */
63
64 #define UBC_HASH(uobj, offset) (((((u_long)(uobj)) >> 8) + \
65 (((u_long)(offset)) >> PAGE_SHIFT)) & \
66 ubc_object.hashmask)
67
68 #define UBC_QUEUE(offset) (&ubc_object.inactive[((offset) >> ubc_winshift) & \
69 (UBC_NQUEUES - 1)])
70
71 struct ubc_map
72 {
73 struct uvm_object * uobj; /* mapped object */
74 voff_t offset; /* offset into uobj */
75 int refcount; /* refcount on mapping */
76 voff_t writeoff; /* overwrite offset */
77 vsize_t writelen; /* overwrite len */
78
79 LIST_ENTRY(ubc_map) hash; /* hash table */
80 TAILQ_ENTRY(ubc_map) inactive; /* inactive queue */
81 };
82
83 static struct ubc_object
84 {
85 struct uvm_object uobj; /* glue for uvm_map() */
86 char *kva; /* where ubc_object is mapped */
87 struct ubc_map *umap; /* array of ubc_map's */
88
89 LIST_HEAD(, ubc_map) *hash; /* hashtable for cached ubc_map's */
90 u_long hashmask; /* mask for hashtable */
91
92 TAILQ_HEAD(ubc_inactive_head, ubc_map) *inactive;
93 /* inactive queues for ubc_map's */
94
95 } ubc_object;
96
97 struct uvm_pagerops ubc_pager =
98 {
99 NULL, /* init */
100 NULL, /* reference */
101 NULL, /* detach */
102 ubc_fault, /* fault */
103 /* ... rest are NULL */
104 };
105
106 int ubc_nwins = UBC_NWINS;
107 int ubc_winshift = UBC_WINSHIFT;
108 int ubc_winsize;
109 #ifdef PMAP_PREFER
110 int ubc_nqueues;
111 boolean_t ubc_release_unmap = FALSE;
112 #define UBC_NQUEUES ubc_nqueues
113 #define UBC_RELEASE_UNMAP ubc_release_unmap
114 #else
115 #define UBC_NQUEUES 1
116 #define UBC_RELEASE_UNMAP FALSE
117 #endif
118
119 /*
120 * ubc_init
121 *
122 * init pager private data structures.
123 */
124
125 void
126 ubc_init(void)
127 {
128 struct ubc_map *umap;
129 vaddr_t va;
130 int i;
131
132 /*
133 * init ubc_object.
134 * alloc and init ubc_map's.
135 * init inactive queues.
136 * alloc and init hashtable.
137 * map in ubc_object.
138 */
139
140 simple_lock_init(&ubc_object.uobj.vmobjlock);
141 ubc_object.uobj.pgops = &ubc_pager;
142 TAILQ_INIT(&ubc_object.uobj.memq);
143 ubc_object.uobj.uo_npages = 0;
144 ubc_object.uobj.uo_refs = UVM_OBJ_KERN;
145
146 ubc_object.umap = malloc(ubc_nwins * sizeof(struct ubc_map),
147 M_TEMP, M_NOWAIT);
148 if (ubc_object.umap == NULL)
149 panic("ubc_init: failed to allocate ubc_map");
150 bzero(ubc_object.umap, ubc_nwins * sizeof(struct ubc_map));
151
152 va = (vaddr_t)1L;
153 #ifdef PMAP_PREFER
154 PMAP_PREFER(0, &va);
155 ubc_nqueues = va >> ubc_winshift;
156 if (ubc_nqueues == 0) {
157 ubc_nqueues = 1;
158 }
159 if (ubc_nqueues != 1) {
160 ubc_release_unmap = TRUE;
161 }
162 #endif
163 ubc_winsize = 1 << ubc_winshift;
164 ubc_object.inactive = malloc(UBC_NQUEUES *
165 sizeof(struct ubc_inactive_head),
166 M_TEMP, M_NOWAIT);
167 if (ubc_object.inactive == NULL)
168 panic("ubc_init: failed to allocate inactive queue heads");
169 for (i = 0; i < UBC_NQUEUES; i++) {
170 TAILQ_INIT(&ubc_object.inactive[i]);
171 }
172 for (i = 0; i < ubc_nwins; i++) {
173 umap = &ubc_object.umap[i];
174 TAILQ_INSERT_TAIL(&ubc_object.inactive[i & (UBC_NQUEUES - 1)],
175 umap, inactive);
176 }
177
178 ubc_object.hash = hashinit(ubc_nwins, HASH_LIST, M_TEMP, M_NOWAIT,
179 &ubc_object.hashmask);
180 for (i = 0; i <= ubc_object.hashmask; i++) {
181 LIST_INIT(&ubc_object.hash[i]);
182 }
183
184 if (uvm_map(kernel_map, (vaddr_t *)&ubc_object.kva,
185 ubc_nwins << ubc_winshift, &ubc_object.uobj, 0, (vsize_t)va,
186 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
187 UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)) != 0) {
188 panic("ubc_init: failed to map ubc_object\n");
189 }
190 UVMHIST_INIT(ubchist, 300);
191 }
192
193
194 /*
195 * ubc_fault: fault routine for ubc mapping
196 */
197 int
198 ubc_fault(ufi, ign1, ign2, ign3, ign4, fault_type, access_type, flags)
199 struct uvm_faultinfo *ufi;
200 vaddr_t ign1;
201 struct vm_page **ign2;
202 int ign3, ign4;
203 vm_fault_t fault_type;
204 vm_prot_t access_type;
205 int flags;
206 {
207 struct uvm_object *uobj;
208 struct vnode *vp;
209 struct ubc_map *umap;
210 vaddr_t va, eva, ubc_offset, slot_offset;
211 int i, error, rv, npages;
212 struct vm_page *pgs[(1 << ubc_winshift) >> PAGE_SHIFT], *pg;
213 UVMHIST_FUNC("ubc_fault"); UVMHIST_CALLED(ubchist);
214
215 /*
216 * no need to try with PGO_LOCKED...
217 * we don't need to have the map locked since we know that
218 * no one will mess with it until our reference is released.
219 */
220 if (flags & PGO_LOCKED) {
221 #if 0
222 return EBUSY;
223 #else
224 uvmfault_unlockall(ufi, NULL, &ubc_object.uobj, NULL);
225 flags &= ~PGO_LOCKED;
226 #endif
227 }
228
229 va = ufi->orig_rvaddr;
230 ubc_offset = va - (vaddr_t)ubc_object.kva;
231
232 UVMHIST_LOG(ubchist, "va 0x%lx ubc_offset 0x%lx at %d",
233 va, ubc_offset, access_type,0);
234
235 umap = &ubc_object.umap[ubc_offset >> ubc_winshift];
236 KASSERT(umap->refcount != 0);
237 slot_offset = trunc_page(ubc_offset & (ubc_winsize - 1));
238
239 /* no umap locking needed since we have a ref on the umap */
240 uobj = umap->uobj;
241 vp = (struct vnode *)uobj;
242 KASSERT(uobj != NULL);
243
244 npages = (ubc_winsize - slot_offset) >> PAGE_SHIFT;
245
246 /*
247 * XXXUBC
248 * if npages is more than 1 we have to be sure that
249 * we set PGO_OVERWRITE correctly.
250 */
251 if (access_type == VM_PROT_WRITE) {
252 npages = 1;
253 }
254
255 again:
256 memset(pgs, 0, sizeof (pgs));
257 simple_lock(&uobj->vmobjlock);
258
259 UVMHIST_LOG(ubchist, "slot_offset 0x%x writeoff 0x%x writelen 0x%x "
260 "u_size 0x%x", slot_offset, umap->writeoff, umap->writelen,
261 vp->v_uvm.u_size);
262
263 if (access_type & VM_PROT_WRITE &&
264 slot_offset >= umap->writeoff &&
265 (slot_offset + PAGE_SIZE <= umap->writeoff + umap->writelen ||
266 slot_offset + PAGE_SIZE >= vp->v_uvm.u_size - umap->offset)) {
267 UVMHIST_LOG(ubchist, "setting PGO_OVERWRITE", 0,0,0,0);
268 flags |= PGO_OVERWRITE;
269 }
270 else { UVMHIST_LOG(ubchist, "NOT setting PGO_OVERWRITE", 0,0,0,0); }
271 /* XXX be sure to zero any part of the page past EOF */
272
273 /*
274 * XXX
275 * ideally we'd like to pre-fault all of the pages we're overwriting.
276 * so for PGO_OVERWRITE, we should call VOP_GETPAGES() with all of the
277 * pages in [writeoff, writeoff+writesize] instead of just the one.
278 */
279
280 UVMHIST_LOG(ubchist, "getpages vp %p offset 0x%x npages %d",
281 uobj, umap->offset + slot_offset, npages, 0);
282
283 error = VOP_GETPAGES(vp, umap->offset + slot_offset, pgs, &npages, 0,
284 access_type, 0, flags);
285 UVMHIST_LOG(ubchist, "getpages error %d npages %d", error, npages,0,0);
286
287 if (error == EAGAIN) {
288 tsleep(&lbolt, PVM, "ubc_fault", 0);
289 goto again;
290 }
291 if (error) {
292 return error;
293 }
294 if (npages == 0) {
295 return 0;
296 }
297
298 va = ufi->orig_rvaddr;
299 eva = ufi->orig_rvaddr + (npages << PAGE_SHIFT);
300
301 UVMHIST_LOG(ubchist, "va 0x%lx eva 0x%lx", va, eva, 0,0);
302 simple_lock(&uobj->vmobjlock);
303 for (i = 0; va < eva; i++, va += PAGE_SIZE) {
304 UVMHIST_LOG(ubchist, "pgs[%d] = %p", i, pgs[i],0,0);
305 pg = pgs[i];
306
307 if (pg == NULL || pg == PGO_DONTCARE) {
308 continue;
309 }
310 if (pg->flags & PG_WANTED) {
311 wakeup(pg);
312 }
313 KASSERT((pg->flags & PG_FAKE) == 0);
314 if (pg->flags & PG_RELEASED) {
315 rv = uobj->pgops->pgo_releasepg(pg, NULL);
316 KASSERT(rv);
317 continue;
318 }
319 KASSERT(access_type == VM_PROT_READ ||
320 (pg->flags & PG_RDONLY) == 0);
321
322 uvm_lock_pageq();
323 uvm_pageactivate(pg);
324 uvm_unlock_pageq();
325
326 pmap_enter(ufi->orig_map->pmap, va, VM_PAGE_TO_PHYS(pg),
327 VM_PROT_READ | VM_PROT_WRITE, access_type);
328
329 pg->flags &= ~(PG_BUSY);
330 UVM_PAGE_OWN(pg, NULL);
331 }
332 simple_unlock(&uobj->vmobjlock);
333 pmap_update();
334 return 0;
335 }
336
337 /*
338 * local functions
339 */
340
341 struct ubc_map *
342 ubc_find_mapping(uobj, offset)
343 struct uvm_object *uobj;
344 voff_t offset;
345 {
346 struct ubc_map *umap;
347
348 LIST_FOREACH(umap, &ubc_object.hash[UBC_HASH(uobj, offset)], hash) {
349 if (umap->uobj == uobj && umap->offset == offset) {
350 return umap;
351 }
352 }
353 return NULL;
354 }
355
356
357 /*
358 * ubc interface functions
359 */
360
361 /*
362 * ubc_alloc: allocate a buffer mapping
363 */
364 void *
365 ubc_alloc(uobj, offset, lenp, flags)
366 struct uvm_object *uobj;
367 voff_t offset;
368 vsize_t *lenp;
369 int flags;
370 {
371 int s;
372 vaddr_t slot_offset, va;
373 struct ubc_map *umap;
374 voff_t umap_offset;
375 UVMHIST_FUNC("ubc_alloc"); UVMHIST_CALLED(ubchist);
376
377 UVMHIST_LOG(ubchist, "uobj %p offset 0x%lx len 0x%lx filesize 0x%x",
378 uobj, offset, *lenp, ((struct uvm_vnode *)uobj)->u_size);
379
380 umap_offset = (offset & ~((voff_t)ubc_winsize - 1));
381 slot_offset = (vaddr_t)(offset & ((voff_t)ubc_winsize - 1));
382 *lenp = min(*lenp, ubc_winsize - slot_offset);
383
384 /*
385 * the vnode is always locked here, so we don't need to add a ref.
386 */
387
388 s = splbio();
389
390 again:
391 simple_lock(&ubc_object.uobj.vmobjlock);
392 umap = ubc_find_mapping(uobj, umap_offset);
393 if (umap == NULL) {
394 umap = TAILQ_FIRST(UBC_QUEUE(offset));
395 if (umap == NULL) {
396 simple_unlock(&ubc_object.uobj.vmobjlock);
397 tsleep(&lbolt, PVM, "ubc_alloc", 0);
398 goto again;
399 }
400
401 /*
402 * remove from old hash (if any),
403 * add to new hash.
404 */
405
406 if (umap->uobj != NULL) {
407 LIST_REMOVE(umap, hash);
408 }
409
410 umap->uobj = uobj;
411 umap->offset = umap_offset;
412
413 LIST_INSERT_HEAD(&ubc_object.hash[UBC_HASH(uobj, umap_offset)],
414 umap, hash);
415
416 va = (vaddr_t)(ubc_object.kva +
417 ((umap - ubc_object.umap) << ubc_winshift));
418 pmap_remove(pmap_kernel(), va, va + ubc_winsize);
419 pmap_update();
420 }
421
422 if (umap->refcount == 0) {
423 TAILQ_REMOVE(UBC_QUEUE(offset), umap, inactive);
424 }
425
426 #ifdef DIAGNOSTIC
427 if ((flags & UBC_WRITE) &&
428 (umap->writeoff || umap->writelen)) {
429 panic("ubc_fault: concurrent writes vp %p", uobj);
430 }
431 #endif
432 if (flags & UBC_WRITE) {
433 umap->writeoff = slot_offset;
434 umap->writelen = *lenp;
435 }
436
437 umap->refcount++;
438 simple_unlock(&ubc_object.uobj.vmobjlock);
439 splx(s);
440 UVMHIST_LOG(ubchist, "umap %p refs %d va %p",
441 umap, umap->refcount,
442 ubc_object.kva + ((umap - ubc_object.umap) << ubc_winshift),
443 0);
444
445 return ubc_object.kva +
446 ((umap - ubc_object.umap) << ubc_winshift) + slot_offset;
447 }
448
449
450 void
451 ubc_release(va, wlen)
452 void *va;
453 vsize_t wlen;
454 {
455 struct ubc_map *umap;
456 struct uvm_object *uobj;
457 int s;
458 UVMHIST_FUNC("ubc_release"); UVMHIST_CALLED(ubchist);
459
460 UVMHIST_LOG(ubchist, "va %p", va,0,0,0);
461
462 s = splbio();
463 simple_lock(&ubc_object.uobj.vmobjlock);
464
465 umap = &ubc_object.umap[((char *)va - ubc_object.kva) >> ubc_winshift];
466 uobj = umap->uobj;
467 KASSERT(uobj != NULL);
468
469 umap->writeoff = 0;
470 umap->writelen = 0;
471 umap->refcount--;
472 if (umap->refcount == 0) {
473 if (UBC_RELEASE_UNMAP &&
474 (((struct vnode *)uobj)->v_flag & VTEXT)) {
475 vaddr_t va;
476
477 /*
478 * if this file is the executable image of
479 * some process, that process will likely have
480 * the file mapped at an alignment other than
481 * what PMAP_PREFER() would like. we'd like
482 * to have process text be able to use the
483 * cache even if someone is also reading the
484 * file, so invalidate mappings of such files
485 * as soon as possible.
486 */
487
488 va = (vaddr_t)(ubc_object.kva +
489 ((umap - ubc_object.umap) << ubc_winshift));
490 pmap_remove(pmap_kernel(), va, va + ubc_winsize);
491 pmap_update();
492 LIST_REMOVE(umap, hash);
493 umap->uobj = NULL;
494 TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap,
495 inactive);
496 } else {
497 TAILQ_INSERT_TAIL(UBC_QUEUE(umap->offset), umap,
498 inactive);
499 }
500 }
501 UVMHIST_LOG(ubchist, "umap %p refs %d", umap, umap->refcount,0,0);
502 simple_unlock(&ubc_object.uobj.vmobjlock);
503 splx(s);
504 }
505
506
507 /*
508 * removing a range of mappings from the ubc mapping cache.
509 */
510
511 void
512 ubc_flush(uobj, start, end)
513 struct uvm_object *uobj;
514 voff_t start, end;
515 {
516 struct ubc_map *umap;
517 vaddr_t va;
518 int s;
519 UVMHIST_FUNC("ubc_flush"); UVMHIST_CALLED(ubchist);
520
521 UVMHIST_LOG(ubchist, "uobj %p start 0x%lx end 0x%lx",
522 uobj, start, end,0);
523
524 s = splbio();
525 simple_lock(&ubc_object.uobj.vmobjlock);
526 for (umap = ubc_object.umap;
527 umap < &ubc_object.umap[ubc_nwins];
528 umap++) {
529
530 if (umap->uobj != uobj ||
531 umap->offset < start ||
532 (umap->offset >= end && end != 0) ||
533 umap->refcount > 0) {
534 continue;
535 }
536
537 /*
538 * remove from hash,
539 * move to head of inactive queue.
540 */
541
542 va = (vaddr_t)(ubc_object.kva +
543 ((umap - ubc_object.umap) << ubc_winshift));
544 pmap_remove(pmap_kernel(), va, va + ubc_winsize);
545 pmap_update();
546
547 LIST_REMOVE(umap, hash);
548 umap->uobj = NULL;
549 TAILQ_REMOVE(UBC_QUEUE(umap->offset), umap, inactive);
550 TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap, inactive);
551 }
552 simple_unlock(&ubc_object.uobj.vmobjlock);
553 splx(s);
554 }
555