uvm_bio.c revision 1.9 1 /* $NetBSD: uvm_bio.c,v 1.9 2001/03/15 06:10:56 chs Exp $ */
2
3 /*
4 * Copyright (c) 1998 Chuck Silvers.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 */
31
32 #include "opt_uvmhist.h"
33
34 /*
35 * uvm_bio.c: buffered i/o vnode mapping cache
36 */
37
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/malloc.h>
42 #include <sys/kernel.h>
43 #include <sys/vnode.h>
44
45 #include <uvm/uvm.h>
46 #include <uvm/uvm_page.h>
47
48 /*
49 * global data structures
50 */
51
52 /*
53 * local functions
54 */
55
56 static int ubc_fault __P((struct uvm_faultinfo *, vaddr_t,
57 vm_page_t *, int, int, vm_fault_t, vm_prot_t,
58 int));
59 static struct ubc_map *ubc_find_mapping __P((struct uvm_object *, voff_t));
60
61 /*
62 * local data structues
63 */
64
65 #define UBC_HASH(uobj, offset) (((((u_long)(uobj)) >> 8) + \
66 (((u_long)(offset)) >> PAGE_SHIFT)) & \
67 ubc_object.hashmask)
68
69 #define UBC_QUEUE(offset) (&ubc_object.inactive[((offset) / ubc_winsize) & \
70 (UBC_NQUEUES - 1)])
71
72 struct ubc_map
73 {
74 struct uvm_object * uobj; /* mapped object */
75 voff_t offset; /* offset into uobj */
76 int refcount; /* refcount on mapping */
77 voff_t writeoff; /* overwrite offset */
78 vsize_t writelen; /* overwrite len */
79
80 LIST_ENTRY(ubc_map) hash; /* hash table */
81 TAILQ_ENTRY(ubc_map) inactive; /* inactive queue */
82 };
83
84 static struct ubc_object
85 {
86 struct uvm_object uobj; /* glue for uvm_map() */
87 char *kva; /* where ubc_object is mapped */
88 struct ubc_map *umap; /* array of ubc_map's */
89
90 LIST_HEAD(, ubc_map) *hash; /* hashtable for cached ubc_map's */
91 u_long hashmask; /* mask for hashtable */
92
93 TAILQ_HEAD(ubc_inactive_head, ubc_map) *inactive;
94 /* inactive queues for ubc_map's */
95
96 } ubc_object;
97
98 struct uvm_pagerops ubc_pager =
99 {
100 NULL, /* init */
101 NULL, /* reference */
102 NULL, /* detach */
103 ubc_fault, /* fault */
104 /* ... rest are NULL */
105 };
106
107 int ubc_nwins = UBC_NWINS;
108 int ubc_winsize = UBC_WINSIZE;
109 #ifdef PMAP_PREFER
110 int ubc_nqueues;
111 boolean_t ubc_release_unmap = FALSE;
112 #define UBC_NQUEUES ubc_nqueues
113 #define UBC_RELEASE_UNMAP ubc_release_unmap
114 #else
115 #define UBC_NQUEUES 1
116 #define UBC_RELEASE_UNMAP FALSE
117 #endif
118
119 /*
120 * ubc_init
121 *
122 * init pager private data structures.
123 */
124
125 void
126 ubc_init(void)
127 {
128 struct ubc_map *umap;
129 vaddr_t va;
130 int i;
131
132 /*
133 * init ubc_object.
134 * alloc and init ubc_map's.
135 * init inactive queues.
136 * alloc and init hashtable.
137 * map in ubc_object.
138 */
139
140 simple_lock_init(&ubc_object.uobj.vmobjlock);
141 ubc_object.uobj.pgops = &ubc_pager;
142 TAILQ_INIT(&ubc_object.uobj.memq);
143 ubc_object.uobj.uo_npages = 0;
144 ubc_object.uobj.uo_refs = UVM_OBJ_KERN;
145
146 ubc_object.umap = malloc(ubc_nwins * sizeof(struct ubc_map),
147 M_TEMP, M_NOWAIT);
148 if (ubc_object.umap == NULL)
149 panic("ubc_init: failed to allocate ubc_map");
150 bzero(ubc_object.umap, ubc_nwins * sizeof(struct ubc_map));
151
152 va = (vaddr_t)1L;
153 #ifdef PMAP_PREFER
154 PMAP_PREFER(0, &va);
155 if (va < ubc_winsize) {
156 va = ubc_winsize;
157 }
158 ubc_nqueues = va / ubc_winsize;
159 if (ubc_nqueues != 1) {
160 ubc_release_unmap = TRUE;
161 }
162 #endif
163 ubc_object.inactive = malloc(UBC_NQUEUES *
164 sizeof(struct ubc_inactive_head),
165 M_TEMP, M_NOWAIT);
166 if (ubc_object.inactive == NULL)
167 panic("ubc_init: failed to allocate inactive queue heads");
168 for (i = 0; i < UBC_NQUEUES; i++) {
169 TAILQ_INIT(&ubc_object.inactive[i]);
170 }
171 for (i = 0; i < ubc_nwins; i++) {
172 umap = &ubc_object.umap[i];
173 TAILQ_INSERT_TAIL(&ubc_object.inactive[i & (UBC_NQUEUES - 1)],
174 umap, inactive);
175 }
176
177 ubc_object.hash = hashinit(ubc_nwins, HASH_LIST, M_TEMP, M_NOWAIT,
178 &ubc_object.hashmask);
179 for (i = 0; i <= ubc_object.hashmask; i++) {
180 LIST_INIT(&ubc_object.hash[i]);
181 }
182
183 if (uvm_map(kernel_map, (vaddr_t *)&ubc_object.kva,
184 ubc_nwins * ubc_winsize, &ubc_object.uobj, 0, (vsize_t)va,
185 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
186 UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)) != 0) {
187 panic("ubc_init: failed to map ubc_object\n");
188 }
189 UVMHIST_INIT(ubchist, 300);
190 }
191
192
193 /*
194 * ubc_fault: fault routine for ubc mapping
195 */
196 static int
197 ubc_fault(ufi, ign1, ign2, ign3, ign4, fault_type, access_type, flags)
198 struct uvm_faultinfo *ufi;
199 vaddr_t ign1;
200 vm_page_t *ign2;
201 int ign3, ign4;
202 vm_fault_t fault_type;
203 vm_prot_t access_type;
204 int flags;
205 {
206 struct uvm_object *uobj;
207 struct vnode *vp;
208 struct ubc_map *umap;
209 vaddr_t va, eva, ubc_offset, slot_offset;
210 int i, error, rv, npages;
211 struct vm_page *pgs[ubc_winsize >> PAGE_SHIFT], *pg;
212 UVMHIST_FUNC("ubc_fault"); UVMHIST_CALLED(ubchist);
213
214 /*
215 * no need to try with PGO_LOCKED...
216 * we don't need to have the map locked since we know that
217 * no one will mess with it until our reference is released.
218 */
219 if (flags & PGO_LOCKED) {
220 #if 0
221 return EBUSY;
222 #else
223 uvmfault_unlockall(ufi, NULL, &ubc_object.uobj, NULL);
224 flags &= ~PGO_LOCKED;
225 #endif
226 }
227
228 va = ufi->orig_rvaddr;
229 ubc_offset = va - (vaddr_t)ubc_object.kva;
230
231 UVMHIST_LOG(ubchist, "va 0x%lx ubc_offset 0x%lx at %d",
232 va, ubc_offset, access_type,0);
233
234 umap = &ubc_object.umap[ubc_offset / ubc_winsize];
235 KASSERT(umap->refcount != 0);
236 slot_offset = trunc_page(ubc_offset & (ubc_winsize - 1));
237
238 /* no umap locking needed since we have a ref on the umap */
239 uobj = umap->uobj;
240 vp = (struct vnode *)uobj;
241 KASSERT(uobj != NULL);
242
243 npages = (ubc_winsize - slot_offset) >> PAGE_SHIFT;
244
245 /*
246 * XXXUBC
247 * if npages is more than 1 we have to be sure that
248 * we set PGO_OVERWRITE correctly.
249 */
250 if (access_type == VM_PROT_WRITE) {
251 npages = 1;
252 }
253
254 again:
255 memset(pgs, 0, sizeof (pgs));
256 simple_lock(&uobj->vmobjlock);
257
258 UVMHIST_LOG(ubchist, "slot_offset 0x%x writeoff 0x%x writelen 0x%x "
259 "u_size 0x%x", slot_offset, umap->writeoff, umap->writelen,
260 vp->v_uvm.u_size);
261
262 if (access_type & VM_PROT_WRITE &&
263 slot_offset >= umap->writeoff &&
264 (slot_offset + PAGE_SIZE <= umap->writeoff + umap->writelen ||
265 slot_offset + PAGE_SIZE >= vp->v_uvm.u_size - umap->offset)) {
266 UVMHIST_LOG(ubchist, "setting PGO_OVERWRITE", 0,0,0,0);
267 flags |= PGO_OVERWRITE;
268 }
269 else { UVMHIST_LOG(ubchist, "NOT setting PGO_OVERWRITE", 0,0,0,0); }
270 /* XXX be sure to zero any part of the page past EOF */
271
272 /*
273 * XXX
274 * ideally we'd like to pre-fault all of the pages we're overwriting.
275 * so for PGO_OVERWRITE, we should call VOP_GETPAGES() with all of the
276 * pages in [writeoff, writeoff+writesize] instead of just the one.
277 */
278
279 UVMHIST_LOG(ubchist, "getpages vp %p offset 0x%x npages %d",
280 uobj, umap->offset + slot_offset, npages, 0);
281
282 error = VOP_GETPAGES(vp, umap->offset + slot_offset, pgs, &npages, 0,
283 access_type, 0, flags);
284 UVMHIST_LOG(ubchist, "getpages error %d npages %d", error, npages,0,0);
285
286 if (error == EAGAIN) {
287 tsleep(&lbolt, PVM, "ubc_fault", 0);
288 goto again;
289 }
290 if (error) {
291 return EIO;
292 }
293 if (npages == 0) {
294 return 0;
295 }
296
297 va = ufi->orig_rvaddr;
298 eva = ufi->orig_rvaddr + (npages << PAGE_SHIFT);
299
300 UVMHIST_LOG(ubchist, "va 0x%lx eva 0x%lx", va, eva, 0,0);
301 simple_lock(&uobj->vmobjlock);
302 for (i = 0; va < eva; i++, va += PAGE_SIZE) {
303 UVMHIST_LOG(ubchist, "pgs[%d] = %p", i, pgs[i],0,0);
304 pg = pgs[i];
305
306 if (pg == NULL || pg == PGO_DONTCARE) {
307 continue;
308 }
309 if (pg->flags & PG_WANTED) {
310 wakeup(pg);
311 }
312 KASSERT((pg->flags & PG_FAKE) == 0);
313 if (pg->flags & PG_RELEASED) {
314 rv = uobj->pgops->pgo_releasepg(pg, NULL);
315 KASSERT(rv);
316 continue;
317 }
318 KASSERT(access_type == VM_PROT_READ ||
319 (pg->flags & PG_RDONLY) == 0);
320
321 uvm_lock_pageq();
322 uvm_pageactivate(pg);
323 uvm_unlock_pageq();
324
325 pmap_enter(ufi->orig_map->pmap, va, VM_PAGE_TO_PHYS(pg),
326 VM_PROT_READ | VM_PROT_WRITE, access_type);
327
328 pg->flags &= ~(PG_BUSY);
329 UVM_PAGE_OWN(pg, NULL);
330 }
331 simple_unlock(&uobj->vmobjlock);
332 return 0;
333 }
334
335 /*
336 * local functions
337 */
338
339 static struct ubc_map *
340 ubc_find_mapping(uobj, offset)
341 struct uvm_object *uobj;
342 voff_t offset;
343 {
344 struct ubc_map *umap;
345
346 LIST_FOREACH(umap, &ubc_object.hash[UBC_HASH(uobj, offset)], hash) {
347 if (umap->uobj == uobj && umap->offset == offset) {
348 return umap;
349 }
350 }
351 return NULL;
352 }
353
354
355 /*
356 * ubc interface functions
357 */
358
359 /*
360 * ubc_alloc: allocate a buffer mapping
361 */
362 void *
363 ubc_alloc(uobj, offset, lenp, flags)
364 struct uvm_object *uobj;
365 voff_t offset;
366 vsize_t *lenp;
367 int flags;
368 {
369 int s;
370 vaddr_t slot_offset, va;
371 struct ubc_map *umap;
372 voff_t umap_offset;
373 UVMHIST_FUNC("ubc_alloc"); UVMHIST_CALLED(ubchist);
374
375 UVMHIST_LOG(ubchist, "uobj %p offset 0x%lx len 0x%lx filesize 0x%x",
376 uobj, offset, *lenp, ((struct uvm_vnode *)uobj)->u_size);
377
378 umap_offset = (offset & ~((voff_t)ubc_winsize - 1));
379 slot_offset = (vaddr_t)(offset & ((voff_t)ubc_winsize - 1));
380 *lenp = min(*lenp, ubc_winsize - slot_offset);
381
382 /*
383 * the vnode is always locked here, so we don't need to add a ref.
384 */
385
386 s = splbio();
387
388 again:
389 simple_lock(&ubc_object.uobj.vmobjlock);
390 umap = ubc_find_mapping(uobj, umap_offset);
391 if (umap == NULL) {
392 umap = TAILQ_FIRST(UBC_QUEUE(offset));
393 if (umap == NULL) {
394 simple_unlock(&ubc_object.uobj.vmobjlock);
395 tsleep(&lbolt, PVM, "ubc_alloc", 0);
396 goto again;
397 }
398
399 /*
400 * remove from old hash (if any),
401 * add to new hash.
402 */
403
404 if (umap->uobj != NULL) {
405 LIST_REMOVE(umap, hash);
406 }
407
408 umap->uobj = uobj;
409 umap->offset = umap_offset;
410
411 LIST_INSERT_HEAD(&ubc_object.hash[UBC_HASH(uobj, umap_offset)],
412 umap, hash);
413
414 va = (vaddr_t)(ubc_object.kva +
415 (umap - ubc_object.umap) * ubc_winsize);
416 pmap_remove(pmap_kernel(), va, va + ubc_winsize);
417 }
418
419 if (umap->refcount == 0) {
420 TAILQ_REMOVE(UBC_QUEUE(offset), umap, inactive);
421 }
422
423 #ifdef DIAGNOSTIC
424 if ((flags & UBC_WRITE) &&
425 (umap->writeoff || umap->writelen)) {
426 panic("ubc_fault: concurrent writes vp %p", uobj);
427 }
428 #endif
429 if (flags & UBC_WRITE) {
430 umap->writeoff = slot_offset;
431 umap->writelen = *lenp;
432 }
433
434 umap->refcount++;
435 simple_unlock(&ubc_object.uobj.vmobjlock);
436 splx(s);
437 UVMHIST_LOG(ubchist, "umap %p refs %d va %p",
438 umap, umap->refcount,
439 ubc_object.kva + (umap - ubc_object.umap) * ubc_winsize,0);
440
441 return ubc_object.kva +
442 (umap - ubc_object.umap) * ubc_winsize + slot_offset;
443 }
444
445
446 void
447 ubc_release(va, wlen)
448 void *va;
449 vsize_t wlen;
450 {
451 struct ubc_map *umap;
452 struct uvm_object *uobj;
453 int s;
454 UVMHIST_FUNC("ubc_release"); UVMHIST_CALLED(ubchist);
455
456 UVMHIST_LOG(ubchist, "va %p", va,0,0,0);
457
458 s = splbio();
459 simple_lock(&ubc_object.uobj.vmobjlock);
460
461 umap = &ubc_object.umap[((char *)va - ubc_object.kva) / ubc_winsize];
462 uobj = umap->uobj;
463 KASSERT(uobj != NULL);
464
465 umap->writeoff = 0;
466 umap->writelen = 0;
467 umap->refcount--;
468 if (umap->refcount == 0) {
469 if (UBC_RELEASE_UNMAP &&
470 (((struct vnode *)uobj)->v_flag & VTEXT)) {
471 vaddr_t va;
472
473 /*
474 * if this file is the executable image of
475 * some process, that process will likely have
476 * the file mapped at an alignment other than
477 * what PMAP_PREFER() would like. we'd like
478 * to have process text be able to use the
479 * cache even if someone is also reading the
480 * file, so invalidate mappings of such files
481 * as soon as possible.
482 */
483
484 va = (vaddr_t)(ubc_object.kva +
485 (umap - ubc_object.umap) * ubc_winsize);
486 pmap_remove(pmap_kernel(), va, va + ubc_winsize);
487 LIST_REMOVE(umap, hash);
488 umap->uobj = NULL;
489 TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap,
490 inactive);
491 } else {
492 TAILQ_INSERT_TAIL(UBC_QUEUE(umap->offset), umap,
493 inactive);
494 }
495 }
496 UVMHIST_LOG(ubchist, "umap %p refs %d", umap, umap->refcount,0,0);
497 simple_unlock(&ubc_object.uobj.vmobjlock);
498 splx(s);
499 }
500
501
502 /*
503 * removing a range of mappings from the ubc mapping cache.
504 */
505
506 void
507 ubc_flush(uobj, start, end)
508 struct uvm_object *uobj;
509 voff_t start, end;
510 {
511 struct ubc_map *umap;
512 vaddr_t va;
513 int s;
514 UVMHIST_FUNC("ubc_flush"); UVMHIST_CALLED(ubchist);
515
516 UVMHIST_LOG(ubchist, "uobj %p start 0x%lx end 0x%lx",
517 uobj, start, end,0);
518
519 s = splbio();
520 simple_lock(&ubc_object.uobj.vmobjlock);
521 for (umap = ubc_object.umap;
522 umap < &ubc_object.umap[ubc_nwins];
523 umap++) {
524
525 if (umap->uobj != uobj ||
526 umap->offset < start ||
527 (umap->offset >= end && end != 0) ||
528 umap->refcount > 0) {
529 continue;
530 }
531
532 /*
533 * remove from hash,
534 * move to head of inactive queue.
535 */
536
537 va = (vaddr_t)(ubc_object.kva +
538 (umap - ubc_object.umap) * ubc_winsize);
539 pmap_remove(pmap_kernel(), va, va + ubc_winsize);
540
541 LIST_REMOVE(umap, hash);
542 umap->uobj = NULL;
543 TAILQ_REMOVE(UBC_QUEUE(umap->offset), umap, inactive);
544 TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap, inactive);
545 }
546 simple_unlock(&ubc_object.uobj.vmobjlock);
547 splx(s);
548 }
549