uvm_bio.c revision 1.12 1 /* $NetBSD: uvm_bio.c,v 1.12 2001/04/24 04:31:17 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 1998 Chuck Silvers.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 */
31
32 #include "opt_uvmhist.h"
33
34 /*
35 * uvm_bio.c: buffered i/o vnode mapping cache
36 */
37
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/malloc.h>
42 #include <sys/kernel.h>
43 #include <sys/vnode.h>
44
45 #include <uvm/uvm.h>
46 #include <uvm/uvm_page.h>
47
48 /*
49 * global data structures
50 */
51
52 /*
53 * local functions
54 */
55
56 static int ubc_fault __P((struct uvm_faultinfo *, vaddr_t,
57 vm_page_t *, int, int, vm_fault_t, vm_prot_t,
58 int));
59 static struct ubc_map *ubc_find_mapping __P((struct uvm_object *, voff_t));
60
61 /*
62 * local data structues
63 */
64
65 #define UBC_HASH(uobj, offset) (((((u_long)(uobj)) >> 8) + \
66 (((u_long)(offset)) >> PAGE_SHIFT)) & \
67 ubc_object.hashmask)
68
69 #define UBC_QUEUE(offset) (&ubc_object.inactive[((offset) >> ubc_winshift) & \
70 (UBC_NQUEUES - 1)])
71
72 struct ubc_map
73 {
74 struct uvm_object * uobj; /* mapped object */
75 voff_t offset; /* offset into uobj */
76 int refcount; /* refcount on mapping */
77 voff_t writeoff; /* overwrite offset */
78 vsize_t writelen; /* overwrite len */
79
80 LIST_ENTRY(ubc_map) hash; /* hash table */
81 TAILQ_ENTRY(ubc_map) inactive; /* inactive queue */
82 };
83
84 static struct ubc_object
85 {
86 struct uvm_object uobj; /* glue for uvm_map() */
87 char *kva; /* where ubc_object is mapped */
88 struct ubc_map *umap; /* array of ubc_map's */
89
90 LIST_HEAD(, ubc_map) *hash; /* hashtable for cached ubc_map's */
91 u_long hashmask; /* mask for hashtable */
92
93 TAILQ_HEAD(ubc_inactive_head, ubc_map) *inactive;
94 /* inactive queues for ubc_map's */
95
96 } ubc_object;
97
98 struct uvm_pagerops ubc_pager =
99 {
100 NULL, /* init */
101 NULL, /* reference */
102 NULL, /* detach */
103 ubc_fault, /* fault */
104 /* ... rest are NULL */
105 };
106
107 int ubc_nwins = UBC_NWINS;
108 int ubc_winshift = UBC_WINSHIFT;
109 int ubc_winsize;
110 #ifdef PMAP_PREFER
111 int ubc_nqueues;
112 boolean_t ubc_release_unmap = FALSE;
113 #define UBC_NQUEUES ubc_nqueues
114 #define UBC_RELEASE_UNMAP ubc_release_unmap
115 #else
116 #define UBC_NQUEUES 1
117 #define UBC_RELEASE_UNMAP FALSE
118 #endif
119
120 /*
121 * ubc_init
122 *
123 * init pager private data structures.
124 */
125
126 void
127 ubc_init(void)
128 {
129 struct ubc_map *umap;
130 vaddr_t va;
131 int i;
132
133 /*
134 * init ubc_object.
135 * alloc and init ubc_map's.
136 * init inactive queues.
137 * alloc and init hashtable.
138 * map in ubc_object.
139 */
140
141 simple_lock_init(&ubc_object.uobj.vmobjlock);
142 ubc_object.uobj.pgops = &ubc_pager;
143 TAILQ_INIT(&ubc_object.uobj.memq);
144 ubc_object.uobj.uo_npages = 0;
145 ubc_object.uobj.uo_refs = UVM_OBJ_KERN;
146
147 ubc_object.umap = malloc(ubc_nwins * sizeof(struct ubc_map),
148 M_TEMP, M_NOWAIT);
149 if (ubc_object.umap == NULL)
150 panic("ubc_init: failed to allocate ubc_map");
151 bzero(ubc_object.umap, ubc_nwins * sizeof(struct ubc_map));
152
153 va = (vaddr_t)1L;
154 #ifdef PMAP_PREFER
155 PMAP_PREFER(0, &va);
156 ubc_nqueues = va >> ubc_winshift;
157 if (ubc_nqueues == 0) {
158 ubc_nqueues = 1;
159 }
160 if (ubc_nqueues != 1) {
161 ubc_release_unmap = TRUE;
162 }
163 #endif
164 ubc_winsize = 1 << ubc_winshift;
165 ubc_object.inactive = malloc(UBC_NQUEUES *
166 sizeof(struct ubc_inactive_head),
167 M_TEMP, M_NOWAIT);
168 if (ubc_object.inactive == NULL)
169 panic("ubc_init: failed to allocate inactive queue heads");
170 for (i = 0; i < UBC_NQUEUES; i++) {
171 TAILQ_INIT(&ubc_object.inactive[i]);
172 }
173 for (i = 0; i < ubc_nwins; i++) {
174 umap = &ubc_object.umap[i];
175 TAILQ_INSERT_TAIL(&ubc_object.inactive[i & (UBC_NQUEUES - 1)],
176 umap, inactive);
177 }
178
179 ubc_object.hash = hashinit(ubc_nwins, HASH_LIST, M_TEMP, M_NOWAIT,
180 &ubc_object.hashmask);
181 for (i = 0; i <= ubc_object.hashmask; i++) {
182 LIST_INIT(&ubc_object.hash[i]);
183 }
184
185 if (uvm_map(kernel_map, (vaddr_t *)&ubc_object.kva,
186 ubc_nwins << ubc_winshift, &ubc_object.uobj, 0, (vsize_t)va,
187 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
188 UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)) != 0) {
189 panic("ubc_init: failed to map ubc_object\n");
190 }
191 UVMHIST_INIT(ubchist, 300);
192 }
193
194
195 /*
196 * ubc_fault: fault routine for ubc mapping
197 */
198 int
199 ubc_fault(ufi, ign1, ign2, ign3, ign4, fault_type, access_type, flags)
200 struct uvm_faultinfo *ufi;
201 vaddr_t ign1;
202 vm_page_t *ign2;
203 int ign3, ign4;
204 vm_fault_t fault_type;
205 vm_prot_t access_type;
206 int flags;
207 {
208 struct uvm_object *uobj;
209 struct vnode *vp;
210 struct ubc_map *umap;
211 vaddr_t va, eva, ubc_offset, slot_offset;
212 int i, error, rv, npages;
213 struct vm_page *pgs[(1 << ubc_winshift) >> PAGE_SHIFT], *pg;
214 UVMHIST_FUNC("ubc_fault"); UVMHIST_CALLED(ubchist);
215
216 /*
217 * no need to try with PGO_LOCKED...
218 * we don't need to have the map locked since we know that
219 * no one will mess with it until our reference is released.
220 */
221 if (flags & PGO_LOCKED) {
222 #if 0
223 return EBUSY;
224 #else
225 uvmfault_unlockall(ufi, NULL, &ubc_object.uobj, NULL);
226 flags &= ~PGO_LOCKED;
227 #endif
228 }
229
230 va = ufi->orig_rvaddr;
231 ubc_offset = va - (vaddr_t)ubc_object.kva;
232
233 UVMHIST_LOG(ubchist, "va 0x%lx ubc_offset 0x%lx at %d",
234 va, ubc_offset, access_type,0);
235
236 umap = &ubc_object.umap[ubc_offset >> ubc_winshift];
237 KASSERT(umap->refcount != 0);
238 slot_offset = trunc_page(ubc_offset & (ubc_winsize - 1));
239
240 /* no umap locking needed since we have a ref on the umap */
241 uobj = umap->uobj;
242 vp = (struct vnode *)uobj;
243 KASSERT(uobj != NULL);
244
245 npages = (ubc_winsize - slot_offset) >> PAGE_SHIFT;
246
247 /*
248 * XXXUBC
249 * if npages is more than 1 we have to be sure that
250 * we set PGO_OVERWRITE correctly.
251 */
252 if (access_type == VM_PROT_WRITE) {
253 npages = 1;
254 }
255
256 again:
257 memset(pgs, 0, sizeof (pgs));
258 simple_lock(&uobj->vmobjlock);
259
260 UVMHIST_LOG(ubchist, "slot_offset 0x%x writeoff 0x%x writelen 0x%x "
261 "u_size 0x%x", slot_offset, umap->writeoff, umap->writelen,
262 vp->v_uvm.u_size);
263
264 if (access_type & VM_PROT_WRITE &&
265 slot_offset >= umap->writeoff &&
266 (slot_offset + PAGE_SIZE <= umap->writeoff + umap->writelen ||
267 slot_offset + PAGE_SIZE >= vp->v_uvm.u_size - umap->offset)) {
268 UVMHIST_LOG(ubchist, "setting PGO_OVERWRITE", 0,0,0,0);
269 flags |= PGO_OVERWRITE;
270 }
271 else { UVMHIST_LOG(ubchist, "NOT setting PGO_OVERWRITE", 0,0,0,0); }
272 /* XXX be sure to zero any part of the page past EOF */
273
274 /*
275 * XXX
276 * ideally we'd like to pre-fault all of the pages we're overwriting.
277 * so for PGO_OVERWRITE, we should call VOP_GETPAGES() with all of the
278 * pages in [writeoff, writeoff+writesize] instead of just the one.
279 */
280
281 UVMHIST_LOG(ubchist, "getpages vp %p offset 0x%x npages %d",
282 uobj, umap->offset + slot_offset, npages, 0);
283
284 error = VOP_GETPAGES(vp, umap->offset + slot_offset, pgs, &npages, 0,
285 access_type, 0, flags);
286 UVMHIST_LOG(ubchist, "getpages error %d npages %d", error, npages,0,0);
287
288 if (error == EAGAIN) {
289 tsleep(&lbolt, PVM, "ubc_fault", 0);
290 goto again;
291 }
292 if (error) {
293 return error;
294 }
295 if (npages == 0) {
296 return 0;
297 }
298
299 va = ufi->orig_rvaddr;
300 eva = ufi->orig_rvaddr + (npages << PAGE_SHIFT);
301
302 UVMHIST_LOG(ubchist, "va 0x%lx eva 0x%lx", va, eva, 0,0);
303 simple_lock(&uobj->vmobjlock);
304 for (i = 0; va < eva; i++, va += PAGE_SIZE) {
305 UVMHIST_LOG(ubchist, "pgs[%d] = %p", i, pgs[i],0,0);
306 pg = pgs[i];
307
308 if (pg == NULL || pg == PGO_DONTCARE) {
309 continue;
310 }
311 if (pg->flags & PG_WANTED) {
312 wakeup(pg);
313 }
314 KASSERT((pg->flags & PG_FAKE) == 0);
315 if (pg->flags & PG_RELEASED) {
316 rv = uobj->pgops->pgo_releasepg(pg, NULL);
317 KASSERT(rv);
318 continue;
319 }
320 KASSERT(access_type == VM_PROT_READ ||
321 (pg->flags & PG_RDONLY) == 0);
322
323 uvm_lock_pageq();
324 uvm_pageactivate(pg);
325 uvm_unlock_pageq();
326
327 pmap_enter(ufi->orig_map->pmap, va, VM_PAGE_TO_PHYS(pg),
328 VM_PROT_READ | VM_PROT_WRITE, access_type);
329
330 pg->flags &= ~(PG_BUSY);
331 UVM_PAGE_OWN(pg, NULL);
332 }
333 simple_unlock(&uobj->vmobjlock);
334 pmap_update();
335 return 0;
336 }
337
338 /*
339 * local functions
340 */
341
342 struct ubc_map *
343 ubc_find_mapping(uobj, offset)
344 struct uvm_object *uobj;
345 voff_t offset;
346 {
347 struct ubc_map *umap;
348
349 LIST_FOREACH(umap, &ubc_object.hash[UBC_HASH(uobj, offset)], hash) {
350 if (umap->uobj == uobj && umap->offset == offset) {
351 return umap;
352 }
353 }
354 return NULL;
355 }
356
357
358 /*
359 * ubc interface functions
360 */
361
362 /*
363 * ubc_alloc: allocate a buffer mapping
364 */
365 void *
366 ubc_alloc(uobj, offset, lenp, flags)
367 struct uvm_object *uobj;
368 voff_t offset;
369 vsize_t *lenp;
370 int flags;
371 {
372 int s;
373 vaddr_t slot_offset, va;
374 struct ubc_map *umap;
375 voff_t umap_offset;
376 UVMHIST_FUNC("ubc_alloc"); UVMHIST_CALLED(ubchist);
377
378 UVMHIST_LOG(ubchist, "uobj %p offset 0x%lx len 0x%lx filesize 0x%x",
379 uobj, offset, *lenp, ((struct uvm_vnode *)uobj)->u_size);
380
381 umap_offset = (offset & ~((voff_t)ubc_winsize - 1));
382 slot_offset = (vaddr_t)(offset & ((voff_t)ubc_winsize - 1));
383 *lenp = min(*lenp, ubc_winsize - slot_offset);
384
385 /*
386 * the vnode is always locked here, so we don't need to add a ref.
387 */
388
389 s = splbio();
390
391 again:
392 simple_lock(&ubc_object.uobj.vmobjlock);
393 umap = ubc_find_mapping(uobj, umap_offset);
394 if (umap == NULL) {
395 umap = TAILQ_FIRST(UBC_QUEUE(offset));
396 if (umap == NULL) {
397 simple_unlock(&ubc_object.uobj.vmobjlock);
398 tsleep(&lbolt, PVM, "ubc_alloc", 0);
399 goto again;
400 }
401
402 /*
403 * remove from old hash (if any),
404 * add to new hash.
405 */
406
407 if (umap->uobj != NULL) {
408 LIST_REMOVE(umap, hash);
409 }
410
411 umap->uobj = uobj;
412 umap->offset = umap_offset;
413
414 LIST_INSERT_HEAD(&ubc_object.hash[UBC_HASH(uobj, umap_offset)],
415 umap, hash);
416
417 va = (vaddr_t)(ubc_object.kva +
418 ((umap - ubc_object.umap) << ubc_winshift));
419 pmap_remove(pmap_kernel(), va, va + ubc_winsize);
420 pmap_update();
421 }
422
423 if (umap->refcount == 0) {
424 TAILQ_REMOVE(UBC_QUEUE(offset), umap, inactive);
425 }
426
427 #ifdef DIAGNOSTIC
428 if ((flags & UBC_WRITE) &&
429 (umap->writeoff || umap->writelen)) {
430 panic("ubc_fault: concurrent writes vp %p", uobj);
431 }
432 #endif
433 if (flags & UBC_WRITE) {
434 umap->writeoff = slot_offset;
435 umap->writelen = *lenp;
436 }
437
438 umap->refcount++;
439 simple_unlock(&ubc_object.uobj.vmobjlock);
440 splx(s);
441 UVMHIST_LOG(ubchist, "umap %p refs %d va %p",
442 umap, umap->refcount,
443 ubc_object.kva + ((umap - ubc_object.umap) << ubc_winshift),
444 0);
445
446 return ubc_object.kva +
447 ((umap - ubc_object.umap) << ubc_winshift) + slot_offset;
448 }
449
450
451 void
452 ubc_release(va, wlen)
453 void *va;
454 vsize_t wlen;
455 {
456 struct ubc_map *umap;
457 struct uvm_object *uobj;
458 int s;
459 UVMHIST_FUNC("ubc_release"); UVMHIST_CALLED(ubchist);
460
461 UVMHIST_LOG(ubchist, "va %p", va,0,0,0);
462
463 s = splbio();
464 simple_lock(&ubc_object.uobj.vmobjlock);
465
466 umap = &ubc_object.umap[((char *)va - ubc_object.kva) >> ubc_winshift];
467 uobj = umap->uobj;
468 KASSERT(uobj != NULL);
469
470 umap->writeoff = 0;
471 umap->writelen = 0;
472 umap->refcount--;
473 if (umap->refcount == 0) {
474 if (UBC_RELEASE_UNMAP &&
475 (((struct vnode *)uobj)->v_flag & VTEXT)) {
476 vaddr_t va;
477
478 /*
479 * if this file is the executable image of
480 * some process, that process will likely have
481 * the file mapped at an alignment other than
482 * what PMAP_PREFER() would like. we'd like
483 * to have process text be able to use the
484 * cache even if someone is also reading the
485 * file, so invalidate mappings of such files
486 * as soon as possible.
487 */
488
489 va = (vaddr_t)(ubc_object.kva +
490 ((umap - ubc_object.umap) << ubc_winshift));
491 pmap_remove(pmap_kernel(), va, va + ubc_winsize);
492 pmap_update();
493 LIST_REMOVE(umap, hash);
494 umap->uobj = NULL;
495 TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap,
496 inactive);
497 } else {
498 TAILQ_INSERT_TAIL(UBC_QUEUE(umap->offset), umap,
499 inactive);
500 }
501 }
502 UVMHIST_LOG(ubchist, "umap %p refs %d", umap, umap->refcount,0,0);
503 simple_unlock(&ubc_object.uobj.vmobjlock);
504 splx(s);
505 }
506
507
508 /*
509 * removing a range of mappings from the ubc mapping cache.
510 */
511
512 void
513 ubc_flush(uobj, start, end)
514 struct uvm_object *uobj;
515 voff_t start, end;
516 {
517 struct ubc_map *umap;
518 vaddr_t va;
519 int s;
520 UVMHIST_FUNC("ubc_flush"); UVMHIST_CALLED(ubchist);
521
522 UVMHIST_LOG(ubchist, "uobj %p start 0x%lx end 0x%lx",
523 uobj, start, end,0);
524
525 s = splbio();
526 simple_lock(&ubc_object.uobj.vmobjlock);
527 for (umap = ubc_object.umap;
528 umap < &ubc_object.umap[ubc_nwins];
529 umap++) {
530
531 if (umap->uobj != uobj ||
532 umap->offset < start ||
533 (umap->offset >= end && end != 0) ||
534 umap->refcount > 0) {
535 continue;
536 }
537
538 /*
539 * remove from hash,
540 * move to head of inactive queue.
541 */
542
543 va = (vaddr_t)(ubc_object.kva +
544 ((umap - ubc_object.umap) << ubc_winshift));
545 pmap_remove(pmap_kernel(), va, va + ubc_winsize);
546 pmap_update();
547
548 LIST_REMOVE(umap, hash);
549 umap->uobj = NULL;
550 TAILQ_REMOVE(UBC_QUEUE(umap->offset), umap, inactive);
551 TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap, inactive);
552 }
553 simple_unlock(&ubc_object.uobj.vmobjlock);
554 splx(s);
555 }
556