uvm_bio.c revision 1.18 1 /* $NetBSD: uvm_bio.c,v 1.18 2001/09/15 20:36:45 chs Exp $ */
2
3 /*
4 * Copyright (c) 1998 Chuck Silvers.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 */
31
32 #include "opt_uvmhist.h"
33
34 /*
35 * uvm_bio.c: buffered i/o vnode mapping cache
36 */
37
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/malloc.h>
42 #include <sys/kernel.h>
43 #include <sys/vnode.h>
44
45 #include <uvm/uvm.h>
46
47 /*
48 * global data structures
49 */
50
51 /*
52 * local functions
53 */
54
55 int ubc_fault __P((struct uvm_faultinfo *, vaddr_t, struct vm_page **, int,
56 int, vm_fault_t, vm_prot_t, int));
57 struct ubc_map *ubc_find_mapping __P((struct uvm_object *, voff_t));
58
59 /*
60 * local data structues
61 */
62
63 #define UBC_HASH(uobj, offset) \
64 (((((u_long)(uobj)) >> 8) + (((u_long)(offset)) >> PAGE_SHIFT)) & \
65 ubc_object.hashmask)
66
67 #define UBC_QUEUE(offset) \
68 (&ubc_object.inactive[(((u_long)(offset)) >> ubc_winshift) & \
69 (UBC_NQUEUES - 1)])
70
71 #define UBC_UMAP_ADDR(u) \
72 (vaddr_t)(ubc_object.kva + (((u) - ubc_object.umap) << ubc_winshift))
73
74
75 #define UMAP_PAGES_LOCKED 0x0001
76 #define UMAP_MAPPING_CACHED 0x0002
77
78 struct ubc_map
79 {
80 struct uvm_object * uobj; /* mapped object */
81 voff_t offset; /* offset into uobj */
82 voff_t writeoff; /* overwrite offset */
83 vsize_t writelen; /* overwrite len */
84 int refcount; /* refcount on mapping */
85 int flags; /* extra state */
86
87 LIST_ENTRY(ubc_map) hash; /* hash table */
88 TAILQ_ENTRY(ubc_map) inactive; /* inactive queue */
89 };
90
91 static struct ubc_object
92 {
93 struct uvm_object uobj; /* glue for uvm_map() */
94 char *kva; /* where ubc_object is mapped */
95 struct ubc_map *umap; /* array of ubc_map's */
96
97 LIST_HEAD(, ubc_map) *hash; /* hashtable for cached ubc_map's */
98 u_long hashmask; /* mask for hashtable */
99
100 TAILQ_HEAD(ubc_inactive_head, ubc_map) *inactive;
101 /* inactive queues for ubc_map's */
102
103 } ubc_object;
104
105 struct uvm_pagerops ubc_pager =
106 {
107 NULL, /* init */
108 NULL, /* reference */
109 NULL, /* detach */
110 ubc_fault, /* fault */
111 /* ... rest are NULL */
112 };
113
114 int ubc_nwins = UBC_NWINS;
115 int ubc_winshift = UBC_WINSHIFT;
116 int ubc_winsize;
117 #ifdef PMAP_PREFER
118 int ubc_nqueues;
119 boolean_t ubc_release_unmap = FALSE;
120 #define UBC_NQUEUES ubc_nqueues
121 #define UBC_RELEASE_UNMAP ubc_release_unmap
122 #else
123 #define UBC_NQUEUES 1
124 #define UBC_RELEASE_UNMAP FALSE
125 #endif
126
127 /*
128 * ubc_init
129 *
130 * init pager private data structures.
131 */
132
133 void
134 ubc_init(void)
135 {
136 struct ubc_map *umap;
137 vaddr_t va;
138 int i;
139
140 /*
141 * Make sure ubc_winshift is sane.
142 */
143 if (ubc_winshift < PAGE_SHIFT)
144 ubc_winshift = PAGE_SHIFT;
145
146 /*
147 * init ubc_object.
148 * alloc and init ubc_map's.
149 * init inactive queues.
150 * alloc and init hashtable.
151 * map in ubc_object.
152 */
153
154 simple_lock_init(&ubc_object.uobj.vmobjlock);
155 ubc_object.uobj.pgops = &ubc_pager;
156 TAILQ_INIT(&ubc_object.uobj.memq);
157 ubc_object.uobj.uo_npages = 0;
158 ubc_object.uobj.uo_refs = UVM_OBJ_KERN;
159
160 ubc_object.umap = malloc(ubc_nwins * sizeof(struct ubc_map),
161 M_TEMP, M_NOWAIT);
162 if (ubc_object.umap == NULL)
163 panic("ubc_init: failed to allocate ubc_map");
164 memset(ubc_object.umap, 0, ubc_nwins * sizeof(struct ubc_map));
165
166 if (ubc_winshift < PAGE_SHIFT) {
167 ubc_winshift = PAGE_SHIFT;
168 }
169 va = (vaddr_t)1L;
170 #ifdef PMAP_PREFER
171 PMAP_PREFER(0, &va);
172 ubc_nqueues = va >> ubc_winshift;
173 if (ubc_nqueues == 0) {
174 ubc_nqueues = 1;
175 }
176 if (ubc_nqueues != 1) {
177 ubc_release_unmap = TRUE;
178 }
179 #endif
180 ubc_winsize = 1 << ubc_winshift;
181 ubc_object.inactive = malloc(UBC_NQUEUES *
182 sizeof(struct ubc_inactive_head), M_TEMP, M_NOWAIT);
183 if (ubc_object.inactive == NULL)
184 panic("ubc_init: failed to allocate inactive queue heads");
185 for (i = 0; i < UBC_NQUEUES; i++) {
186 TAILQ_INIT(&ubc_object.inactive[i]);
187 }
188 for (i = 0; i < ubc_nwins; i++) {
189 umap = &ubc_object.umap[i];
190 TAILQ_INSERT_TAIL(&ubc_object.inactive[i & (UBC_NQUEUES - 1)],
191 umap, inactive);
192 }
193
194 ubc_object.hash = hashinit(ubc_nwins, HASH_LIST, M_TEMP, M_NOWAIT,
195 &ubc_object.hashmask);
196 for (i = 0; i <= ubc_object.hashmask; i++) {
197 LIST_INIT(&ubc_object.hash[i]);
198 }
199
200 if (uvm_map(kernel_map, (vaddr_t *)&ubc_object.kva,
201 ubc_nwins << ubc_winshift, &ubc_object.uobj, 0, (vsize_t)va,
202 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
203 UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)) != 0) {
204 panic("ubc_init: failed to map ubc_object\n");
205 }
206 UVMHIST_INIT(ubchist, 300);
207 }
208
209 /*
210 * ubc_fault: fault routine for ubc mapping
211 */
212
213 int
214 ubc_fault(ufi, ign1, ign2, ign3, ign4, fault_type, access_type, flags)
215 struct uvm_faultinfo *ufi;
216 vaddr_t ign1;
217 struct vm_page **ign2;
218 int ign3, ign4;
219 vm_fault_t fault_type;
220 vm_prot_t access_type;
221 int flags;
222 {
223 struct uvm_object *uobj;
224 struct vnode *vp;
225 struct ubc_map *umap;
226 vaddr_t va, eva, ubc_offset, slot_offset;
227 int i, error, npages;
228 struct vm_page *pgs[ubc_winsize >> PAGE_SHIFT], *pg;
229 UVMHIST_FUNC("ubc_fault"); UVMHIST_CALLED(ubchist);
230
231 /*
232 * no need to try with PGO_LOCKED...
233 * we don't need to have the map locked since we know that
234 * no one will mess with it until our reference is released.
235 */
236
237 if (flags & PGO_LOCKED) {
238 uvmfault_unlockall(ufi, NULL, &ubc_object.uobj, NULL);
239 flags &= ~PGO_LOCKED;
240 }
241
242 va = ufi->orig_rvaddr;
243 ubc_offset = va - (vaddr_t)ubc_object.kva;
244
245 UVMHIST_LOG(ubchist, "va 0x%lx ubc_offset 0x%lx at %d",
246 va, ubc_offset, access_type,0);
247
248 umap = &ubc_object.umap[ubc_offset >> ubc_winshift];
249 KASSERT(umap->refcount != 0);
250 slot_offset = ubc_offset & (ubc_winsize - 1);
251
252 /* no umap locking needed since we have a ref on the umap */
253 uobj = umap->uobj;
254 vp = (struct vnode *)uobj;
255 KASSERT(vp != NULL);
256
257 npages = MIN(ubc_winsize - slot_offset,
258 (int)(round_page(MAX(vp->v_size, umap->offset +
259 umap->writeoff + umap->writelen)) -
260 umap->offset)) >> PAGE_SHIFT;
261
262 again:
263 memset(pgs, 0, sizeof (pgs));
264 simple_lock(&uobj->vmobjlock);
265
266 UVMHIST_LOG(ubchist, "slot_offset 0x%x writeoff 0x%x writelen 0x%x "
267 "v_size 0x%x", slot_offset, umap->writeoff, umap->writelen,
268 vp->v_size);
269 UVMHIST_LOG(ubchist, "getpages vp %p offset 0x%x npages %d",
270 uobj, umap->offset + slot_offset, npages, 0);
271
272 flags |= PGO_PASTEOF;
273 error = VOP_GETPAGES(vp, umap->offset + slot_offset, pgs, &npages, 0,
274 access_type, 0, flags);
275 UVMHIST_LOG(ubchist, "getpages error %d npages %d", error, npages,0,0);
276
277 if (error == EAGAIN) {
278 tsleep(&lbolt, PVM, "ubc_fault", 0);
279 goto again;
280 }
281 if (error) {
282 return error;
283 }
284
285 va = ufi->orig_rvaddr;
286 eva = ufi->orig_rvaddr + (npages << PAGE_SHIFT);
287
288 UVMHIST_LOG(ubchist, "va 0x%lx eva 0x%lx", va, eva, 0,0);
289 simple_lock(&uobj->vmobjlock);
290 uvm_lock_pageq();
291 for (i = 0; va < eva; i++, va += PAGE_SIZE) {
292 UVMHIST_LOG(ubchist, "pgs[%d] = %p", i, pgs[i],0,0);
293 pg = pgs[i];
294
295 if (pg == NULL || pg == PGO_DONTCARE) {
296 continue;
297 }
298 if (pg->flags & PG_WANTED) {
299 wakeup(pg);
300 }
301 KASSERT((pg->flags & PG_FAKE) == 0);
302 if (pg->flags & PG_RELEASED) {
303 uvm_pagefree(pg);
304 continue;
305 }
306 KASSERT(access_type == VM_PROT_READ ||
307 (pg->flags & PG_RDONLY) == 0);
308 pmap_enter(ufi->orig_map->pmap, va, VM_PAGE_TO_PHYS(pg),
309 VM_PROT_READ | VM_PROT_WRITE, access_type);
310 uvm_pageactivate(pg);
311 pg->flags &= ~(PG_BUSY);
312 UVM_PAGE_OWN(pg, NULL);
313 }
314 uvm_unlock_pageq();
315 simple_unlock(&uobj->vmobjlock);
316 pmap_update(ufi->orig_map->pmap);
317 return 0;
318 }
319
320 /*
321 * local functions
322 */
323
324 struct ubc_map *
325 ubc_find_mapping(uobj, offset)
326 struct uvm_object *uobj;
327 voff_t offset;
328 {
329 struct ubc_map *umap;
330
331 LIST_FOREACH(umap, &ubc_object.hash[UBC_HASH(uobj, offset)], hash) {
332 if (umap->uobj == uobj && umap->offset == offset) {
333 return umap;
334 }
335 }
336 return NULL;
337 }
338
339
340 /*
341 * ubc interface functions
342 */
343
344 /*
345 * ubc_alloc: allocate a file mapping window
346 */
347
348 void *
349 ubc_alloc(uobj, offset, lenp, flags)
350 struct uvm_object *uobj;
351 voff_t offset;
352 vsize_t *lenp;
353 int flags;
354 {
355 struct vnode *vp = (struct vnode *)uobj;
356 vaddr_t slot_offset, va;
357 struct ubc_map *umap;
358 voff_t umap_offset;
359 int error;
360 UVMHIST_FUNC("ubc_alloc"); UVMHIST_CALLED(ubchist);
361
362 UVMHIST_LOG(ubchist, "uobj %p offset 0x%lx len 0x%lx filesize 0x%x",
363 uobj, offset, *lenp, vp->v_size);
364
365 umap_offset = (offset & ~((voff_t)ubc_winsize - 1));
366 slot_offset = (vaddr_t)(offset & ((voff_t)ubc_winsize - 1));
367 *lenp = MIN(*lenp, ubc_winsize - slot_offset);
368
369 /*
370 * the vnode is always locked here, so we don't need to add a ref.
371 */
372
373 again:
374 simple_lock(&ubc_object.uobj.vmobjlock);
375 umap = ubc_find_mapping(uobj, umap_offset);
376 if (umap == NULL) {
377 umap = TAILQ_FIRST(UBC_QUEUE(offset));
378 if (umap == NULL) {
379 simple_unlock(&ubc_object.uobj.vmobjlock);
380 tsleep(&lbolt, PVM, "ubc_alloc", 0);
381 goto again;
382 }
383
384 /*
385 * remove from old hash (if any), add to new hash.
386 */
387
388 if (umap->uobj != NULL) {
389 LIST_REMOVE(umap, hash);
390 }
391 umap->uobj = uobj;
392 umap->offset = umap_offset;
393 LIST_INSERT_HEAD(&ubc_object.hash[UBC_HASH(uobj, umap_offset)],
394 umap, hash);
395 va = UBC_UMAP_ADDR(umap);
396 if (umap->flags & UMAP_MAPPING_CACHED) {
397 umap->flags &= ~UMAP_MAPPING_CACHED;
398 pmap_remove(pmap_kernel(), va, va + ubc_winsize);
399 pmap_update(pmap_kernel());
400 }
401 } else {
402 va = UBC_UMAP_ADDR(umap);
403 }
404
405 if (umap->refcount == 0) {
406 TAILQ_REMOVE(UBC_QUEUE(offset), umap, inactive);
407 }
408
409 #ifdef DIAGNOSTIC
410 if ((flags & UBC_WRITE) && (umap->writeoff || umap->writelen)) {
411 panic("ubc_fault: concurrent writes vp %p", uobj);
412 }
413 #endif
414 if (flags & UBC_WRITE) {
415 umap->writeoff = slot_offset;
416 umap->writelen = *lenp;
417 }
418
419 umap->refcount++;
420 simple_unlock(&ubc_object.uobj.vmobjlock);
421 UVMHIST_LOG(ubchist, "umap %p refs %d va %p flags 0x%x",
422 umap, umap->refcount, va, flags);
423
424 if (flags & UBC_FAULTBUSY) {
425 int npages = (*lenp + PAGE_SIZE - 1) >> PAGE_SHIFT;
426 struct vm_page *pgs[npages];
427 int gpflags = PGO_SYNCIO|PGO_OVERWRITE|PGO_PASTEOF;
428 int i;
429
430 if (umap->flags & UMAP_MAPPING_CACHED) {
431 umap->flags &= ~UMAP_MAPPING_CACHED;
432 pmap_remove(pmap_kernel(), va, va + ubc_winsize);
433 }
434 simple_lock(&uobj->vmobjlock);
435 error = VOP_GETPAGES(vp, trunc_page(offset), pgs, &npages, 0,
436 VM_PROT_READ|VM_PROT_WRITE, 0, gpflags);
437 UVMHIST_LOG(ubchist, "faultbusy getpages %d", error,0,0,0);
438 if (error) {
439 goto out;
440 }
441 for (i = 0; i < npages; i++) {
442 pmap_kenter_pa(va + slot_offset + (i << PAGE_SHIFT),
443 VM_PAGE_TO_PHYS(pgs[i]),
444 VM_PROT_READ | VM_PROT_WRITE);
445 }
446 pmap_update(pmap_kernel());
447 umap->flags |= UMAP_PAGES_LOCKED;
448 }
449
450 out:
451 return (void *)(va + slot_offset);
452 }
453
454 /*
455 * ubc_release: free a file mapping window.
456 */
457
458 void
459 ubc_release(va, flags)
460 void *va;
461 int flags;
462 {
463 struct ubc_map *umap;
464 struct uvm_object *uobj;
465 vaddr_t umapva;
466 boolean_t unmapped;
467 UVMHIST_FUNC("ubc_release"); UVMHIST_CALLED(ubchist);
468
469 UVMHIST_LOG(ubchist, "va %p", va,0,0,0);
470 umap = &ubc_object.umap[((char *)va - ubc_object.kva) >> ubc_winshift];
471 umapva = UBC_UMAP_ADDR(umap);
472 uobj = umap->uobj;
473 KASSERT(uobj != NULL);
474
475 if (umap->flags & UMAP_PAGES_LOCKED) {
476 int slot_offset = umap->writeoff;
477 int endoff = umap->writeoff + umap->writelen;
478 int zerolen = round_page(endoff) - endoff;
479 int npages = (int)(round_page(umap->writeoff + umap->writelen)
480 - trunc_page(umap->writeoff)) >> PAGE_SHIFT;
481 struct vm_page *pgs[npages];
482 paddr_t pa;
483 int i;
484 boolean_t rv;
485
486 if (zerolen) {
487 memset((char *)umapva + endoff, 0, zerolen);
488 }
489 umap->flags &= ~UMAP_PAGES_LOCKED;
490 uvm_lock_pageq();
491 for (i = 0; i < npages; i++) {
492 rv = pmap_extract(pmap_kernel(),
493 umapva + slot_offset + (i << PAGE_SHIFT), &pa);
494 KASSERT(rv);
495 pgs[i] = PHYS_TO_VM_PAGE(pa);
496 pgs[i]->flags &= ~(PG_FAKE|PG_CLEAN);
497 uvm_pageactivate(pgs[i]);
498 }
499 uvm_unlock_pageq();
500 pmap_kremove(umapva, ubc_winsize);
501 pmap_update(pmap_kernel());
502 uvm_page_unbusy(pgs, npages);
503 unmapped = TRUE;
504 } else {
505 unmapped = FALSE;
506 }
507
508 simple_lock(&ubc_object.uobj.vmobjlock);
509 umap->writeoff = 0;
510 umap->writelen = 0;
511 umap->refcount--;
512 if (umap->refcount == 0) {
513 if (UBC_RELEASE_UNMAP &&
514 (((struct vnode *)uobj)->v_flag & VTEXT)) {
515
516 /*
517 * if this file is the executable image of
518 * some process, that process will likely have
519 * the file mapped at an alignment other than
520 * what PMAP_PREFER() would like. we'd like
521 * to have process text be able to use the
522 * cache even if someone is also reading the
523 * file, so invalidate mappings of such files
524 * as soon as possible.
525 */
526
527 pmap_remove(pmap_kernel(), umapva,
528 umapva + ubc_winsize);
529 umap->flags &= ~UMAP_MAPPING_CACHED;
530 pmap_update(pmap_kernel());
531 LIST_REMOVE(umap, hash);
532 umap->uobj = NULL;
533 TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap,
534 inactive);
535 } else {
536 if (!unmapped) {
537 umap->flags |= UMAP_MAPPING_CACHED;
538 }
539 TAILQ_INSERT_TAIL(UBC_QUEUE(umap->offset), umap,
540 inactive);
541 }
542 }
543 UVMHIST_LOG(ubchist, "umap %p refs %d", umap, umap->refcount,0,0);
544 simple_unlock(&ubc_object.uobj.vmobjlock);
545 }
546
547
548 /*
549 * removing a range of mappings from the ubc mapping cache.
550 */
551
552 void
553 ubc_flush(uobj, start, end)
554 struct uvm_object *uobj;
555 voff_t start, end;
556 {
557 struct ubc_map *umap;
558 vaddr_t va;
559 UVMHIST_FUNC("ubc_flush"); UVMHIST_CALLED(ubchist);
560
561 UVMHIST_LOG(ubchist, "uobj %p start 0x%lx end 0x%lx",
562 uobj, start, end,0);
563
564 simple_lock(&ubc_object.uobj.vmobjlock);
565 for (umap = ubc_object.umap;
566 umap < &ubc_object.umap[ubc_nwins];
567 umap++) {
568
569 if (umap->uobj != uobj || umap->offset < start ||
570 (umap->offset >= end && end != 0) ||
571 umap->refcount > 0) {
572 continue;
573 }
574
575 /*
576 * remove from hash,
577 * move to head of inactive queue.
578 */
579
580 va = (vaddr_t)(ubc_object.kva +
581 ((umap - ubc_object.umap) << ubc_winshift));
582 pmap_remove(pmap_kernel(), va, va + ubc_winsize);
583
584 LIST_REMOVE(umap, hash);
585 umap->uobj = NULL;
586 TAILQ_REMOVE(UBC_QUEUE(umap->offset), umap, inactive);
587 TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap, inactive);
588 }
589 pmap_update(pmap_kernel());
590 simple_unlock(&ubc_object.uobj.vmobjlock);
591 }
592