uvm_bio.c revision 1.17 1 /* $NetBSD: uvm_bio.c,v 1.17 2001/09/10 21:19:43 chris Exp $ */
2
3 /*
4 * Copyright (c) 1998 Chuck Silvers.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 */
31
32 #include "opt_uvmhist.h"
33
34 /*
35 * uvm_bio.c: buffered i/o vnode mapping cache
36 */
37
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/malloc.h>
42 #include <sys/kernel.h>
43 #include <sys/vnode.h>
44
45 #include <uvm/uvm.h>
46 #include <uvm/uvm_page.h>
47
48 /*
49 * global data structures
50 */
51
52 /*
53 * local functions
54 */
55
56 static int ubc_fault __P((struct uvm_faultinfo *, vaddr_t,
57 struct vm_page **, int, int, vm_fault_t, vm_prot_t, int));
58 static struct ubc_map *ubc_find_mapping __P((struct uvm_object *, voff_t));
59
60 /*
61 * local data structues
62 */
63
64 #define UBC_HASH(uobj, offset) (((((u_long)(uobj)) >> 8) + \
65 (((u_long)(offset)) >> PAGE_SHIFT)) & \
66 ubc_object.hashmask)
67
68 #define UBC_QUEUE(offset) (&ubc_object.inactive[((offset) >> ubc_winshift) & \
69 (UBC_NQUEUES - 1)])
70
71 struct ubc_map
72 {
73 struct uvm_object * uobj; /* mapped object */
74 voff_t offset; /* offset into uobj */
75 int refcount; /* refcount on mapping */
76 voff_t writeoff; /* overwrite offset */
77 vsize_t writelen; /* overwrite len */
78
79 LIST_ENTRY(ubc_map) hash; /* hash table */
80 TAILQ_ENTRY(ubc_map) inactive; /* inactive queue */
81 };
82
83 static struct ubc_object
84 {
85 struct uvm_object uobj; /* glue for uvm_map() */
86 char *kva; /* where ubc_object is mapped */
87 struct ubc_map *umap; /* array of ubc_map's */
88
89 LIST_HEAD(, ubc_map) *hash; /* hashtable for cached ubc_map's */
90 u_long hashmask; /* mask for hashtable */
91
92 TAILQ_HEAD(ubc_inactive_head, ubc_map) *inactive;
93 /* inactive queues for ubc_map's */
94
95 } ubc_object;
96
97 struct uvm_pagerops ubc_pager =
98 {
99 NULL, /* init */
100 NULL, /* reference */
101 NULL, /* detach */
102 ubc_fault, /* fault */
103 /* ... rest are NULL */
104 };
105
106 int ubc_nwins = UBC_NWINS;
107 int ubc_winshift = UBC_WINSHIFT;
108 int ubc_winsize;
109 #ifdef PMAP_PREFER
110 int ubc_nqueues;
111 boolean_t ubc_release_unmap = FALSE;
112 #define UBC_NQUEUES ubc_nqueues
113 #define UBC_RELEASE_UNMAP ubc_release_unmap
114 #else
115 #define UBC_NQUEUES 1
116 #define UBC_RELEASE_UNMAP FALSE
117 #endif
118
119 /*
120 * ubc_init
121 *
122 * init pager private data structures.
123 */
124
125 void
126 ubc_init(void)
127 {
128 struct ubc_map *umap;
129 vaddr_t va;
130 int i;
131
132 /*
133 * Make sure ubc_winshift is sane.
134 */
135 if (ubc_winshift < PAGE_SHIFT)
136 ubc_winshift = PAGE_SHIFT;
137
138 /*
139 * init ubc_object.
140 * alloc and init ubc_map's.
141 * init inactive queues.
142 * alloc and init hashtable.
143 * map in ubc_object.
144 */
145
146 simple_lock_init(&ubc_object.uobj.vmobjlock);
147 ubc_object.uobj.pgops = &ubc_pager;
148 TAILQ_INIT(&ubc_object.uobj.memq);
149 ubc_object.uobj.uo_npages = 0;
150 ubc_object.uobj.uo_refs = UVM_OBJ_KERN;
151
152 ubc_object.umap = malloc(ubc_nwins * sizeof(struct ubc_map),
153 M_TEMP, M_NOWAIT);
154 if (ubc_object.umap == NULL)
155 panic("ubc_init: failed to allocate ubc_map");
156 memset(ubc_object.umap, 0, ubc_nwins * sizeof(struct ubc_map));
157
158 va = (vaddr_t)1L;
159 #ifdef PMAP_PREFER
160 PMAP_PREFER(0, &va);
161 ubc_nqueues = va >> ubc_winshift;
162 if (ubc_nqueues == 0) {
163 ubc_nqueues = 1;
164 }
165 if (ubc_nqueues != 1) {
166 ubc_release_unmap = TRUE;
167 }
168 #endif
169 ubc_winsize = 1 << ubc_winshift;
170 ubc_object.inactive = malloc(UBC_NQUEUES *
171 sizeof(struct ubc_inactive_head),
172 M_TEMP, M_NOWAIT);
173 if (ubc_object.inactive == NULL)
174 panic("ubc_init: failed to allocate inactive queue heads");
175 for (i = 0; i < UBC_NQUEUES; i++) {
176 TAILQ_INIT(&ubc_object.inactive[i]);
177 }
178 for (i = 0; i < ubc_nwins; i++) {
179 umap = &ubc_object.umap[i];
180 TAILQ_INSERT_TAIL(&ubc_object.inactive[i & (UBC_NQUEUES - 1)],
181 umap, inactive);
182 }
183
184 ubc_object.hash = hashinit(ubc_nwins, HASH_LIST, M_TEMP, M_NOWAIT,
185 &ubc_object.hashmask);
186 for (i = 0; i <= ubc_object.hashmask; i++) {
187 LIST_INIT(&ubc_object.hash[i]);
188 }
189
190 if (uvm_map(kernel_map, (vaddr_t *)&ubc_object.kva,
191 ubc_nwins << ubc_winshift, &ubc_object.uobj, 0, (vsize_t)va,
192 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
193 UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)) != 0) {
194 panic("ubc_init: failed to map ubc_object\n");
195 }
196 UVMHIST_INIT(ubchist, 300);
197 }
198
199
200 /*
201 * ubc_fault: fault routine for ubc mapping
202 */
203 int
204 ubc_fault(ufi, ign1, ign2, ign3, ign4, fault_type, access_type, flags)
205 struct uvm_faultinfo *ufi;
206 vaddr_t ign1;
207 struct vm_page **ign2;
208 int ign3, ign4;
209 vm_fault_t fault_type;
210 vm_prot_t access_type;
211 int flags;
212 {
213 struct uvm_object *uobj;
214 struct vnode *vp;
215 struct ubc_map *umap;
216 vaddr_t va, eva, ubc_offset, slot_offset;
217 int i, error, rv, npages;
218 struct vm_page *pgs[(1 << ubc_winshift) >> PAGE_SHIFT], *pg;
219 UVMHIST_FUNC("ubc_fault"); UVMHIST_CALLED(ubchist);
220
221 /*
222 * no need to try with PGO_LOCKED...
223 * we don't need to have the map locked since we know that
224 * no one will mess with it until our reference is released.
225 */
226 if (flags & PGO_LOCKED) {
227 #if 0
228 return EBUSY;
229 #else
230 uvmfault_unlockall(ufi, NULL, &ubc_object.uobj, NULL);
231 flags &= ~PGO_LOCKED;
232 #endif
233 }
234
235 va = ufi->orig_rvaddr;
236 ubc_offset = va - (vaddr_t)ubc_object.kva;
237
238 UVMHIST_LOG(ubchist, "va 0x%lx ubc_offset 0x%lx at %d",
239 va, ubc_offset, access_type,0);
240
241 umap = &ubc_object.umap[ubc_offset >> ubc_winshift];
242 KASSERT(umap->refcount != 0);
243 slot_offset = trunc_page(ubc_offset & (ubc_winsize - 1));
244
245 /* no umap locking needed since we have a ref on the umap */
246 uobj = umap->uobj;
247 vp = (struct vnode *)uobj;
248 KASSERT(uobj != NULL);
249
250 npages = (ubc_winsize - slot_offset) >> PAGE_SHIFT;
251
252 /*
253 * XXXUBC
254 * if npages is more than 1 we have to be sure that
255 * we set PGO_OVERWRITE correctly.
256 */
257 if (access_type == VM_PROT_WRITE) {
258 npages = 1;
259 }
260
261 again:
262 memset(pgs, 0, sizeof (pgs));
263 simple_lock(&uobj->vmobjlock);
264
265 UVMHIST_LOG(ubchist, "slot_offset 0x%x writeoff 0x%x writelen 0x%x "
266 "u_size 0x%x", slot_offset, umap->writeoff, umap->writelen,
267 vp->v_uvm.u_size);
268
269 if (access_type & VM_PROT_WRITE &&
270 slot_offset >= umap->writeoff &&
271 (slot_offset + PAGE_SIZE <= umap->writeoff + umap->writelen ||
272 slot_offset + PAGE_SIZE >= vp->v_uvm.u_size - umap->offset)) {
273 UVMHIST_LOG(ubchist, "setting PGO_OVERWRITE", 0,0,0,0);
274 flags |= PGO_OVERWRITE;
275 }
276 else { UVMHIST_LOG(ubchist, "NOT setting PGO_OVERWRITE", 0,0,0,0); }
277 /* XXX be sure to zero any part of the page past EOF */
278
279 /*
280 * XXX
281 * ideally we'd like to pre-fault all of the pages we're overwriting.
282 * so for PGO_OVERWRITE, we should call VOP_GETPAGES() with all of the
283 * pages in [writeoff, writeoff+writesize] instead of just the one.
284 */
285
286 UVMHIST_LOG(ubchist, "getpages vp %p offset 0x%x npages %d",
287 uobj, umap->offset + slot_offset, npages, 0);
288
289 error = VOP_GETPAGES(vp, umap->offset + slot_offset, pgs, &npages, 0,
290 access_type, 0, flags);
291 UVMHIST_LOG(ubchist, "getpages error %d npages %d", error, npages,0,0);
292
293 if (error == EAGAIN) {
294 tsleep(&lbolt, PVM, "ubc_fault", 0);
295 goto again;
296 }
297 if (error) {
298 return error;
299 }
300 if (npages == 0) {
301 return 0;
302 }
303
304 va = ufi->orig_rvaddr;
305 eva = ufi->orig_rvaddr + (npages << PAGE_SHIFT);
306
307 UVMHIST_LOG(ubchist, "va 0x%lx eva 0x%lx", va, eva, 0,0);
308 simple_lock(&uobj->vmobjlock);
309 for (i = 0; va < eva; i++, va += PAGE_SIZE) {
310 UVMHIST_LOG(ubchist, "pgs[%d] = %p", i, pgs[i],0,0);
311 pg = pgs[i];
312
313 if (pg == NULL || pg == PGO_DONTCARE) {
314 continue;
315 }
316 if (pg->flags & PG_WANTED) {
317 wakeup(pg);
318 }
319 KASSERT((pg->flags & PG_FAKE) == 0);
320 if (pg->flags & PG_RELEASED) {
321 rv = uobj->pgops->pgo_releasepg(pg, NULL);
322 KASSERT(rv);
323 continue;
324 }
325 KASSERT(access_type == VM_PROT_READ ||
326 (pg->flags & PG_RDONLY) == 0);
327
328 uvm_lock_pageq();
329 uvm_pageactivate(pg);
330 uvm_unlock_pageq();
331
332 pmap_enter(ufi->orig_map->pmap, va, VM_PAGE_TO_PHYS(pg),
333 VM_PROT_READ | VM_PROT_WRITE, access_type);
334
335 pg->flags &= ~(PG_BUSY);
336 UVM_PAGE_OWN(pg, NULL);
337 }
338 simple_unlock(&uobj->vmobjlock);
339 pmap_update(ufi->orig_map->pmap);
340 return 0;
341 }
342
343 /*
344 * local functions
345 */
346
347 struct ubc_map *
348 ubc_find_mapping(uobj, offset)
349 struct uvm_object *uobj;
350 voff_t offset;
351 {
352 struct ubc_map *umap;
353
354 LIST_FOREACH(umap, &ubc_object.hash[UBC_HASH(uobj, offset)], hash) {
355 if (umap->uobj == uobj && umap->offset == offset) {
356 return umap;
357 }
358 }
359 return NULL;
360 }
361
362
363 /*
364 * ubc interface functions
365 */
366
367 /*
368 * ubc_alloc: allocate a buffer mapping
369 */
370 void *
371 ubc_alloc(uobj, offset, lenp, flags)
372 struct uvm_object *uobj;
373 voff_t offset;
374 vsize_t *lenp;
375 int flags;
376 {
377 int s;
378 vaddr_t slot_offset, va;
379 struct ubc_map *umap;
380 voff_t umap_offset;
381 UVMHIST_FUNC("ubc_alloc"); UVMHIST_CALLED(ubchist);
382
383 UVMHIST_LOG(ubchist, "uobj %p offset 0x%lx len 0x%lx filesize 0x%x",
384 uobj, offset, *lenp, ((struct uvm_vnode *)uobj)->u_size);
385
386 umap_offset = (offset & ~((voff_t)ubc_winsize - 1));
387 slot_offset = (vaddr_t)(offset & ((voff_t)ubc_winsize - 1));
388 *lenp = min(*lenp, ubc_winsize - slot_offset);
389
390 /*
391 * the vnode is always locked here, so we don't need to add a ref.
392 */
393
394 s = splbio();
395
396 again:
397 simple_lock(&ubc_object.uobj.vmobjlock);
398 umap = ubc_find_mapping(uobj, umap_offset);
399 if (umap == NULL) {
400 umap = TAILQ_FIRST(UBC_QUEUE(offset));
401 if (umap == NULL) {
402 simple_unlock(&ubc_object.uobj.vmobjlock);
403 tsleep(&lbolt, PVM, "ubc_alloc", 0);
404 goto again;
405 }
406
407 /*
408 * remove from old hash (if any),
409 * add to new hash.
410 */
411
412 if (umap->uobj != NULL) {
413 LIST_REMOVE(umap, hash);
414 }
415
416 umap->uobj = uobj;
417 umap->offset = umap_offset;
418
419 LIST_INSERT_HEAD(&ubc_object.hash[UBC_HASH(uobj, umap_offset)],
420 umap, hash);
421
422 va = (vaddr_t)(ubc_object.kva +
423 ((umap - ubc_object.umap) << ubc_winshift));
424 pmap_remove(pmap_kernel(), va, va + ubc_winsize);
425 pmap_update(pmap_kernel());
426 }
427
428 if (umap->refcount == 0) {
429 TAILQ_REMOVE(UBC_QUEUE(offset), umap, inactive);
430 }
431
432 #ifdef DIAGNOSTIC
433 if ((flags & UBC_WRITE) &&
434 (umap->writeoff || umap->writelen)) {
435 panic("ubc_fault: concurrent writes vp %p", uobj);
436 }
437 #endif
438 if (flags & UBC_WRITE) {
439 umap->writeoff = slot_offset;
440 umap->writelen = *lenp;
441 }
442
443 umap->refcount++;
444 simple_unlock(&ubc_object.uobj.vmobjlock);
445 splx(s);
446 UVMHIST_LOG(ubchist, "umap %p refs %d va %p",
447 umap, umap->refcount,
448 ubc_object.kva + ((umap - ubc_object.umap) << ubc_winshift),
449 0);
450
451 return ubc_object.kva +
452 ((umap - ubc_object.umap) << ubc_winshift) + slot_offset;
453 }
454
455
456 void
457 ubc_release(va, wlen)
458 void *va;
459 vsize_t wlen;
460 {
461 struct ubc_map *umap;
462 struct uvm_object *uobj;
463 int s;
464 UVMHIST_FUNC("ubc_release"); UVMHIST_CALLED(ubchist);
465
466 UVMHIST_LOG(ubchist, "va %p", va,0,0,0);
467
468 s = splbio();
469 simple_lock(&ubc_object.uobj.vmobjlock);
470
471 umap = &ubc_object.umap[((char *)va - ubc_object.kva) >> ubc_winshift];
472 uobj = umap->uobj;
473 KASSERT(uobj != NULL);
474
475 umap->writeoff = 0;
476 umap->writelen = 0;
477 umap->refcount--;
478 if (umap->refcount == 0) {
479 if (UBC_RELEASE_UNMAP &&
480 (((struct vnode *)uobj)->v_flag & VTEXT)) {
481 vaddr_t va;
482
483 /*
484 * if this file is the executable image of
485 * some process, that process will likely have
486 * the file mapped at an alignment other than
487 * what PMAP_PREFER() would like. we'd like
488 * to have process text be able to use the
489 * cache even if someone is also reading the
490 * file, so invalidate mappings of such files
491 * as soon as possible.
492 */
493
494 va = (vaddr_t)(ubc_object.kva +
495 ((umap - ubc_object.umap) << ubc_winshift));
496 pmap_remove(pmap_kernel(), va, va + ubc_winsize);
497 pmap_update(pmap_kernel());
498 LIST_REMOVE(umap, hash);
499 umap->uobj = NULL;
500 TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap,
501 inactive);
502 } else {
503 TAILQ_INSERT_TAIL(UBC_QUEUE(umap->offset), umap,
504 inactive);
505 }
506 }
507 UVMHIST_LOG(ubchist, "umap %p refs %d", umap, umap->refcount,0,0);
508 simple_unlock(&ubc_object.uobj.vmobjlock);
509 splx(s);
510 }
511
512
513 /*
514 * removing a range of mappings from the ubc mapping cache.
515 */
516
517 void
518 ubc_flush(uobj, start, end)
519 struct uvm_object *uobj;
520 voff_t start, end;
521 {
522 struct ubc_map *umap;
523 vaddr_t va;
524 int s;
525 UVMHIST_FUNC("ubc_flush"); UVMHIST_CALLED(ubchist);
526
527 UVMHIST_LOG(ubchist, "uobj %p start 0x%lx end 0x%lx",
528 uobj, start, end,0);
529
530 s = splbio();
531 simple_lock(&ubc_object.uobj.vmobjlock);
532 for (umap = ubc_object.umap;
533 umap < &ubc_object.umap[ubc_nwins];
534 umap++) {
535
536 if (umap->uobj != uobj ||
537 umap->offset < start ||
538 (umap->offset >= end && end != 0) ||
539 umap->refcount > 0) {
540 continue;
541 }
542
543 /*
544 * remove from hash,
545 * move to head of inactive queue.
546 */
547
548 va = (vaddr_t)(ubc_object.kva +
549 ((umap - ubc_object.umap) << ubc_winshift));
550 pmap_remove(pmap_kernel(), va, va + ubc_winsize);
551 pmap_update(pmap_kernel());
552
553 LIST_REMOVE(umap, hash);
554 umap->uobj = NULL;
555 TAILQ_REMOVE(UBC_QUEUE(umap->offset), umap, inactive);
556 TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap, inactive);
557 }
558 simple_unlock(&ubc_object.uobj.vmobjlock);
559 splx(s);
560 }
561