uvm_bio.c revision 1.26 1 /* $NetBSD: uvm_bio.c,v 1.26 2002/09/27 15:38:08 provos Exp $ */
2
3 /*
4 * Copyright (c) 1998 Chuck Silvers.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 */
31
32 /*
33 * uvm_bio.c: buffered i/o vnode mapping cache
34 */
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: uvm_bio.c,v 1.26 2002/09/27 15:38:08 provos Exp $");
38
39 #include "opt_uvmhist.h"
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/malloc.h>
44 #include <sys/kernel.h>
45 #include <sys/vnode.h>
46 #include <sys/proc.h>
47
48 #include <uvm/uvm.h>
49
50 /*
51 * global data structures
52 */
53
54 /*
55 * local functions
56 */
57
58 int ubc_fault __P((struct uvm_faultinfo *, vaddr_t, struct vm_page **, int,
59 int, vm_fault_t, vm_prot_t, int));
60 struct ubc_map *ubc_find_mapping __P((struct uvm_object *, voff_t));
61
62 /*
63 * local data structues
64 */
65
66 #define UBC_HASH(uobj, offset) \
67 (((((u_long)(uobj)) >> 8) + (((u_long)(offset)) >> PAGE_SHIFT)) & \
68 ubc_object.hashmask)
69
70 #define UBC_QUEUE(offset) \
71 (&ubc_object.inactive[(((u_long)(offset)) >> ubc_winshift) & \
72 (UBC_NQUEUES - 1)])
73
74 #define UBC_UMAP_ADDR(u) \
75 (vaddr_t)(ubc_object.kva + (((u) - ubc_object.umap) << ubc_winshift))
76
77
78 #define UMAP_PAGES_LOCKED 0x0001
79 #define UMAP_MAPPING_CACHED 0x0002
80
81 struct ubc_map
82 {
83 struct uvm_object * uobj; /* mapped object */
84 voff_t offset; /* offset into uobj */
85 voff_t writeoff; /* overwrite offset */
86 vsize_t writelen; /* overwrite len */
87 int refcount; /* refcount on mapping */
88 int flags; /* extra state */
89
90 LIST_ENTRY(ubc_map) hash; /* hash table */
91 TAILQ_ENTRY(ubc_map) inactive; /* inactive queue */
92 };
93
94 static struct ubc_object
95 {
96 struct uvm_object uobj; /* glue for uvm_map() */
97 char *kva; /* where ubc_object is mapped */
98 struct ubc_map *umap; /* array of ubc_map's */
99
100 LIST_HEAD(, ubc_map) *hash; /* hashtable for cached ubc_map's */
101 u_long hashmask; /* mask for hashtable */
102
103 TAILQ_HEAD(ubc_inactive_head, ubc_map) *inactive;
104 /* inactive queues for ubc_map's */
105
106 } ubc_object;
107
108 struct uvm_pagerops ubc_pager =
109 {
110 NULL, /* init */
111 NULL, /* reference */
112 NULL, /* detach */
113 ubc_fault, /* fault */
114 /* ... rest are NULL */
115 };
116
117 int ubc_nwins = UBC_NWINS;
118 int ubc_winshift = UBC_WINSHIFT;
119 int ubc_winsize;
120 #ifdef PMAP_PREFER
121 int ubc_nqueues;
122 boolean_t ubc_release_unmap = FALSE;
123 #define UBC_NQUEUES ubc_nqueues
124 #define UBC_RELEASE_UNMAP ubc_release_unmap
125 #else
126 #define UBC_NQUEUES 1
127 #define UBC_RELEASE_UNMAP FALSE
128 #endif
129
130 /*
131 * ubc_init
132 *
133 * init pager private data structures.
134 */
135
136 void
137 ubc_init(void)
138 {
139 struct ubc_map *umap;
140 vaddr_t va;
141 int i;
142
143 /*
144 * Make sure ubc_winshift is sane.
145 */
146 if (ubc_winshift < PAGE_SHIFT)
147 ubc_winshift = PAGE_SHIFT;
148
149 /*
150 * init ubc_object.
151 * alloc and init ubc_map's.
152 * init inactive queues.
153 * alloc and init hashtable.
154 * map in ubc_object.
155 */
156
157 simple_lock_init(&ubc_object.uobj.vmobjlock);
158 ubc_object.uobj.pgops = &ubc_pager;
159 TAILQ_INIT(&ubc_object.uobj.memq);
160 ubc_object.uobj.uo_npages = 0;
161 ubc_object.uobj.uo_refs = UVM_OBJ_KERN;
162
163 ubc_object.umap = malloc(ubc_nwins * sizeof(struct ubc_map),
164 M_TEMP, M_NOWAIT);
165 if (ubc_object.umap == NULL)
166 panic("ubc_init: failed to allocate ubc_map");
167 memset(ubc_object.umap, 0, ubc_nwins * sizeof(struct ubc_map));
168
169 if (ubc_winshift < PAGE_SHIFT) {
170 ubc_winshift = PAGE_SHIFT;
171 }
172 va = (vaddr_t)1L;
173 #ifdef PMAP_PREFER
174 PMAP_PREFER(0, &va);
175 ubc_nqueues = va >> ubc_winshift;
176 if (ubc_nqueues == 0) {
177 ubc_nqueues = 1;
178 }
179 if (ubc_nqueues != 1) {
180 ubc_release_unmap = TRUE;
181 }
182 #endif
183 ubc_winsize = 1 << ubc_winshift;
184 ubc_object.inactive = malloc(UBC_NQUEUES *
185 sizeof(struct ubc_inactive_head), M_TEMP, M_NOWAIT);
186 if (ubc_object.inactive == NULL)
187 panic("ubc_init: failed to allocate inactive queue heads");
188 for (i = 0; i < UBC_NQUEUES; i++) {
189 TAILQ_INIT(&ubc_object.inactive[i]);
190 }
191 for (i = 0; i < ubc_nwins; i++) {
192 umap = &ubc_object.umap[i];
193 TAILQ_INSERT_TAIL(&ubc_object.inactive[i & (UBC_NQUEUES - 1)],
194 umap, inactive);
195 }
196
197 ubc_object.hash = hashinit(ubc_nwins, HASH_LIST, M_TEMP, M_NOWAIT,
198 &ubc_object.hashmask);
199 for (i = 0; i <= ubc_object.hashmask; i++) {
200 LIST_INIT(&ubc_object.hash[i]);
201 }
202
203 if (uvm_map(kernel_map, (vaddr_t *)&ubc_object.kva,
204 ubc_nwins << ubc_winshift, &ubc_object.uobj, 0, (vsize_t)va,
205 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
206 UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)) != 0) {
207 panic("ubc_init: failed to map ubc_object");
208 }
209 UVMHIST_INIT(ubchist, 300);
210 }
211
212 /*
213 * ubc_fault: fault routine for ubc mapping
214 */
215
216 int
217 ubc_fault(ufi, ign1, ign2, ign3, ign4, fault_type, access_type, flags)
218 struct uvm_faultinfo *ufi;
219 vaddr_t ign1;
220 struct vm_page **ign2;
221 int ign3, ign4;
222 vm_fault_t fault_type;
223 vm_prot_t access_type;
224 int flags;
225 {
226 struct uvm_object *uobj;
227 struct vnode *vp;
228 struct ubc_map *umap;
229 vaddr_t va, eva, ubc_offset, slot_offset;
230 int i, error, npages;
231 struct vm_page *pgs[ubc_winsize >> PAGE_SHIFT], *pg;
232 vm_prot_t prot;
233 UVMHIST_FUNC("ubc_fault"); UVMHIST_CALLED(ubchist);
234
235 /*
236 * no need to try with PGO_LOCKED...
237 * we don't need to have the map locked since we know that
238 * no one will mess with it until our reference is released.
239 */
240
241 if (flags & PGO_LOCKED) {
242 uvmfault_unlockall(ufi, NULL, &ubc_object.uobj, NULL);
243 flags &= ~PGO_LOCKED;
244 }
245
246 va = ufi->orig_rvaddr;
247 ubc_offset = va - (vaddr_t)ubc_object.kva;
248
249 UVMHIST_LOG(ubchist, "va 0x%lx ubc_offset 0x%lx at %d",
250 va, ubc_offset, access_type, 0);
251
252 umap = &ubc_object.umap[ubc_offset >> ubc_winshift];
253 KASSERT(umap->refcount != 0);
254 slot_offset = ubc_offset & (ubc_winsize - 1);
255
256 /* no umap locking needed since we have a ref on the umap */
257 uobj = umap->uobj;
258 vp = (struct vnode *)uobj;
259 KASSERT(vp != NULL);
260
261 npages = MIN(ubc_winsize - slot_offset,
262 (round_page(MAX(vp->v_size, umap->offset +
263 umap->writeoff + umap->writelen)) -
264 umap->offset)) >> PAGE_SHIFT;
265
266 again:
267 memset(pgs, 0, sizeof (pgs));
268 simple_lock(&uobj->vmobjlock);
269
270 UVMHIST_LOG(ubchist, "slot_offset 0x%x writeoff 0x%x writelen 0x%x "
271 "v_size 0x%x", slot_offset, umap->writeoff, umap->writelen,
272 vp->v_size);
273 UVMHIST_LOG(ubchist, "getpages vp %p offset 0x%x npages %d",
274 uobj, umap->offset + slot_offset, npages, 0);
275
276 flags |= PGO_PASTEOF;
277 error = VOP_GETPAGES(vp, umap->offset + slot_offset, pgs, &npages, 0,
278 access_type, 0, flags);
279 UVMHIST_LOG(ubchist, "getpages error %d npages %d", error, npages, 0,
280 0);
281
282 if (error == EAGAIN) {
283 tsleep(&lbolt, PVM, "ubc_fault", 0);
284 goto again;
285 }
286 if (error) {
287 return error;
288 }
289
290 va = ufi->orig_rvaddr;
291 eva = ufi->orig_rvaddr + (npages << PAGE_SHIFT);
292
293 /*
294 * for virtually-indexed, virtually-tagged caches we should avoid
295 * creating writable mappings when we don't absolutely need them,
296 * since the "compatible alias" trick doesn't work on such caches.
297 * otherwise, we can always map the pages writable.
298 */
299
300 #ifdef PMAP_CACHE_VIVT
301 prot = VM_PROT_READ | access_type;
302 #else
303 prot = VM_PROT_READ | VM_PROT_WRITE;
304 #endif
305 UVMHIST_LOG(ubchist, "va 0x%lx eva 0x%lx", va, eva, 0, 0);
306 simple_lock(&uobj->vmobjlock);
307 uvm_lock_pageq();
308 for (i = 0; va < eva; i++, va += PAGE_SIZE) {
309 UVMHIST_LOG(ubchist, "pgs[%d] = %p", i, pgs[i], 0, 0);
310 pg = pgs[i];
311
312 if (pg == NULL || pg == PGO_DONTCARE) {
313 continue;
314 }
315 if (pg->flags & PG_WANTED) {
316 wakeup(pg);
317 }
318 KASSERT((pg->flags & PG_FAKE) == 0);
319 if (pg->flags & PG_RELEASED) {
320 uvm_pagefree(pg);
321 continue;
322 }
323 KASSERT(access_type == VM_PROT_READ ||
324 (pg->flags & PG_RDONLY) == 0);
325 pmap_enter(ufi->orig_map->pmap, va, VM_PAGE_TO_PHYS(pg),
326 (pg->flags & PG_RDONLY) ? prot & ~VM_PROT_WRITE : prot,
327 access_type);
328 uvm_pageactivate(pg);
329 pg->flags &= ~(PG_BUSY);
330 UVM_PAGE_OWN(pg, NULL);
331 }
332 uvm_unlock_pageq();
333 simple_unlock(&uobj->vmobjlock);
334 pmap_update(ufi->orig_map->pmap);
335 return 0;
336 }
337
338 /*
339 * local functions
340 */
341
342 struct ubc_map *
343 ubc_find_mapping(uobj, offset)
344 struct uvm_object *uobj;
345 voff_t offset;
346 {
347 struct ubc_map *umap;
348
349 LIST_FOREACH(umap, &ubc_object.hash[UBC_HASH(uobj, offset)], hash) {
350 if (umap->uobj == uobj && umap->offset == offset) {
351 return umap;
352 }
353 }
354 return NULL;
355 }
356
357
358 /*
359 * ubc interface functions
360 */
361
362 /*
363 * ubc_alloc: allocate a file mapping window
364 */
365
366 void *
367 ubc_alloc(uobj, offset, lenp, flags)
368 struct uvm_object *uobj;
369 voff_t offset;
370 vsize_t *lenp;
371 int flags;
372 {
373 struct vnode *vp = (struct vnode *)uobj;
374 vaddr_t slot_offset, va;
375 struct ubc_map *umap;
376 voff_t umap_offset;
377 int error;
378 UVMHIST_FUNC("ubc_alloc"); UVMHIST_CALLED(ubchist);
379
380 UVMHIST_LOG(ubchist, "uobj %p offset 0x%lx len 0x%lx filesize 0x%x",
381 uobj, offset, *lenp, vp->v_size);
382
383 umap_offset = (offset & ~((voff_t)ubc_winsize - 1));
384 slot_offset = (vaddr_t)(offset & ((voff_t)ubc_winsize - 1));
385 *lenp = MIN(*lenp, ubc_winsize - slot_offset);
386
387 /*
388 * the vnode is always locked here, so we don't need to add a ref.
389 */
390
391 again:
392 simple_lock(&ubc_object.uobj.vmobjlock);
393 umap = ubc_find_mapping(uobj, umap_offset);
394 if (umap == NULL) {
395 umap = TAILQ_FIRST(UBC_QUEUE(offset));
396 if (umap == NULL) {
397 simple_unlock(&ubc_object.uobj.vmobjlock);
398 tsleep(&lbolt, PVM, "ubc_alloc", 0);
399 goto again;
400 }
401
402 /*
403 * remove from old hash (if any), add to new hash.
404 */
405
406 if (umap->uobj != NULL) {
407 LIST_REMOVE(umap, hash);
408 }
409 umap->uobj = uobj;
410 umap->offset = umap_offset;
411 LIST_INSERT_HEAD(&ubc_object.hash[UBC_HASH(uobj, umap_offset)],
412 umap, hash);
413 va = UBC_UMAP_ADDR(umap);
414 if (umap->flags & UMAP_MAPPING_CACHED) {
415 umap->flags &= ~UMAP_MAPPING_CACHED;
416 pmap_remove(pmap_kernel(), va, va + ubc_winsize);
417 pmap_update(pmap_kernel());
418 }
419 } else {
420 va = UBC_UMAP_ADDR(umap);
421 }
422
423 if (umap->refcount == 0) {
424 TAILQ_REMOVE(UBC_QUEUE(offset), umap, inactive);
425 }
426
427 #ifdef DIAGNOSTIC
428 if ((flags & UBC_WRITE) && (umap->writeoff || umap->writelen)) {
429 panic("ubc_fault: concurrent writes vp %p", uobj);
430 }
431 #endif
432 if (flags & UBC_WRITE) {
433 umap->writeoff = slot_offset;
434 umap->writelen = *lenp;
435 }
436
437 umap->refcount++;
438 simple_unlock(&ubc_object.uobj.vmobjlock);
439 UVMHIST_LOG(ubchist, "umap %p refs %d va %p flags 0x%x",
440 umap, umap->refcount, va, flags);
441
442 if (flags & UBC_FAULTBUSY) {
443 int npages = (*lenp + PAGE_SIZE - 1) >> PAGE_SHIFT;
444 struct vm_page *pgs[npages];
445 int gpflags = PGO_SYNCIO|PGO_OVERWRITE|PGO_PASTEOF;
446 int i;
447
448 if (umap->flags & UMAP_MAPPING_CACHED) {
449 umap->flags &= ~UMAP_MAPPING_CACHED;
450 pmap_remove(pmap_kernel(), va, va + ubc_winsize);
451 }
452 memset(pgs, 0, sizeof(pgs));
453 simple_lock(&uobj->vmobjlock);
454 error = VOP_GETPAGES(vp, trunc_page(offset), pgs, &npages, 0,
455 VM_PROT_READ|VM_PROT_WRITE, 0, gpflags);
456 UVMHIST_LOG(ubchist, "faultbusy getpages %d", error, 0, 0, 0);
457 if (error) {
458 goto out;
459 }
460 for (i = 0; i < npages; i++) {
461 pmap_kenter_pa(va + slot_offset + (i << PAGE_SHIFT),
462 VM_PAGE_TO_PHYS(pgs[i]),
463 VM_PROT_READ | VM_PROT_WRITE);
464 }
465 pmap_update(pmap_kernel());
466 umap->flags |= UMAP_PAGES_LOCKED;
467 }
468
469 out:
470 return (void *)(va + slot_offset);
471 }
472
473 /*
474 * ubc_release: free a file mapping window.
475 */
476
477 void
478 ubc_release(va, flags)
479 void *va;
480 int flags;
481 {
482 struct ubc_map *umap;
483 struct uvm_object *uobj;
484 vaddr_t umapva;
485 boolean_t unmapped;
486 UVMHIST_FUNC("ubc_release"); UVMHIST_CALLED(ubchist);
487
488 UVMHIST_LOG(ubchist, "va %p", va, 0, 0, 0);
489 umap = &ubc_object.umap[((char *)va - ubc_object.kva) >> ubc_winshift];
490 umapva = UBC_UMAP_ADDR(umap);
491 uobj = umap->uobj;
492 KASSERT(uobj != NULL);
493
494 if (umap->flags & UMAP_PAGES_LOCKED) {
495 int slot_offset = umap->writeoff;
496 int endoff = umap->writeoff + umap->writelen;
497 int zerolen = round_page(endoff) - endoff;
498 int npages = (int)(round_page(umap->writeoff + umap->writelen)
499 - trunc_page(umap->writeoff)) >> PAGE_SHIFT;
500 struct vm_page *pgs[npages];
501 paddr_t pa;
502 int i;
503 boolean_t rv;
504
505 if (zerolen) {
506 memset((char *)umapva + endoff, 0, zerolen);
507 }
508 umap->flags &= ~UMAP_PAGES_LOCKED;
509 uvm_lock_pageq();
510 for (i = 0; i < npages; i++) {
511 rv = pmap_extract(pmap_kernel(),
512 umapva + slot_offset + (i << PAGE_SHIFT), &pa);
513 KASSERT(rv);
514 pgs[i] = PHYS_TO_VM_PAGE(pa);
515 pgs[i]->flags &= ~(PG_FAKE|PG_CLEAN);
516 uvm_pageactivate(pgs[i]);
517 }
518 uvm_unlock_pageq();
519 pmap_kremove(umapva, ubc_winsize);
520 pmap_update(pmap_kernel());
521 uvm_page_unbusy(pgs, npages);
522 unmapped = TRUE;
523 } else {
524 unmapped = FALSE;
525 }
526
527 simple_lock(&ubc_object.uobj.vmobjlock);
528 umap->writeoff = 0;
529 umap->writelen = 0;
530 umap->refcount--;
531 if (umap->refcount == 0) {
532 if (UBC_RELEASE_UNMAP &&
533 (((struct vnode *)uobj)->v_flag & VTEXT)) {
534
535 /*
536 * if this file is the executable image of
537 * some process, that process will likely have
538 * the file mapped at an alignment other than
539 * what PMAP_PREFER() would like. we'd like
540 * to have process text be able to use the
541 * cache even if someone is also reading the
542 * file, so invalidate mappings of such files
543 * as soon as possible.
544 */
545
546 pmap_remove(pmap_kernel(), umapva,
547 umapva + ubc_winsize);
548 umap->flags &= ~UMAP_MAPPING_CACHED;
549 pmap_update(pmap_kernel());
550 LIST_REMOVE(umap, hash);
551 umap->uobj = NULL;
552 TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap,
553 inactive);
554 } else {
555 if (!unmapped) {
556 umap->flags |= UMAP_MAPPING_CACHED;
557 }
558 TAILQ_INSERT_TAIL(UBC_QUEUE(umap->offset), umap,
559 inactive);
560 }
561 }
562 UVMHIST_LOG(ubchist, "umap %p refs %d", umap, umap->refcount, 0, 0);
563 simple_unlock(&ubc_object.uobj.vmobjlock);
564 }
565
566
567 /*
568 * removing a range of mappings from the ubc mapping cache.
569 */
570
571 void
572 ubc_flush(uobj, start, end)
573 struct uvm_object *uobj;
574 voff_t start, end;
575 {
576 struct ubc_map *umap;
577 vaddr_t va;
578 UVMHIST_FUNC("ubc_flush"); UVMHIST_CALLED(ubchist);
579
580 UVMHIST_LOG(ubchist, "uobj %p start 0x%lx end 0x%lx",
581 uobj, start, end, 0);
582
583 simple_lock(&ubc_object.uobj.vmobjlock);
584 for (umap = ubc_object.umap;
585 umap < &ubc_object.umap[ubc_nwins];
586 umap++) {
587
588 if (umap->uobj != uobj || umap->offset < start ||
589 (umap->offset >= end && end != 0) ||
590 umap->refcount > 0) {
591 continue;
592 }
593
594 /*
595 * remove from hash,
596 * move to head of inactive queue.
597 */
598
599 va = (vaddr_t)(ubc_object.kva +
600 ((umap - ubc_object.umap) << ubc_winshift));
601 pmap_remove(pmap_kernel(), va, va + ubc_winsize);
602
603 LIST_REMOVE(umap, hash);
604 umap->uobj = NULL;
605 TAILQ_REMOVE(UBC_QUEUE(umap->offset), umap, inactive);
606 TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap, inactive);
607 }
608 pmap_update(pmap_kernel());
609 simple_unlock(&ubc_object.uobj.vmobjlock);
610 }
611