uvm_bio.c revision 1.2 1 /* $NetBSD: uvm_bio.c,v 1.2 2000/11/27 08:43:40 chs Exp $ */
2
3 /*
4 * Copyright (c) 1998 Chuck Silvers.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 */
31
32 #include "opt_uvmhist.h"
33
34 /*
35 * uvm_bio.c: buffered i/o vnode mapping cache
36 */
37
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/malloc.h>
42 #include <sys/kernel.h>
43 #include <sys/vnode.h>
44
45 #include <uvm/uvm.h>
46 #include <uvm/uvm_page.h>
47
48 /*
49 * global data structures
50 */
51
52 /*
53 * local functions
54 */
55
56 static int ubc_fault __P((struct uvm_faultinfo *, vaddr_t,
57 vm_page_t *, int, int, vm_fault_t, vm_prot_t,
58 int));
59 static struct ubc_map *ubc_find_mapping __P((struct uvm_object *, voff_t));
60
61 /*
62 * local data structues
63 */
64
65 #define UBC_HASH(uobj, offset) (((((u_long)(uobj)) >> 8) + \
66 (((u_long)(offset)) >> PAGE_SHIFT)) & \
67 ubc_object.hashmask)
68
69 #define UBC_QUEUE(offset) (&ubc_object.inactive[((offset) / UBC_WINSIZE) & \
70 (UBC_NQUEUES - 1)])
71
72 struct ubc_map
73 {
74 struct uvm_object * uobj; /* mapped object */
75 voff_t offset; /* offset into uobj */
76 int refcount; /* refcount on mapping */
77 voff_t writeoff; /* overwrite offset */
78 vsize_t writelen; /* overwrite len */
79
80 LIST_ENTRY(ubc_map) hash; /* hash table */
81 TAILQ_ENTRY(ubc_map) inactive; /* inactive queue */
82 };
83
84 static struct ubc_object
85 {
86 struct uvm_object uobj; /* glue for uvm_map() */
87 char *kva; /* where ubc_object is mapped */
88 struct ubc_map *umap; /* array of ubc_map's */
89
90 LIST_HEAD(, ubc_map) *hash; /* hashtable for cached ubc_map's */
91 u_long hashmask; /* mask for hashtable */
92
93 TAILQ_HEAD(ubc_inactive_head, ubc_map) *inactive;
94 /* inactive queues for ubc_map's */
95
96 } ubc_object;
97
98 struct uvm_pagerops ubc_pager =
99 {
100 NULL, /* init */
101 NULL, /* reference */
102 NULL, /* detach */
103 ubc_fault, /* fault */
104 /* ... rest are NULL */
105 };
106
107 int ubc_nwins = UBC_NWINS;
108 int ubc_winsize = UBC_WINSIZE;
109 #ifdef PMAP_PREFER
110 int ubc_nqueues;
111 boolean_t ubc_release_unmap = FALSE;
112 #define UBC_NQUEUES ubc_nqueues
113 #define UBC_RELEASE_UNMAP ubc_release_unmap
114 #else
115 #define UBC_NQUEUES 1
116 #define UBC_RELEASE_UNMAP FALSE
117 #endif
118
119 /*
120 * ubc_init
121 *
122 * init pager private data structures.
123 */
124
125 void
126 ubc_init(void)
127 {
128 struct ubc_map *umap;
129 vaddr_t va;
130 int i;
131
132 /*
133 * init ubc_object.
134 * alloc and init ubc_map's.
135 * init inactive queues.
136 * alloc and init hashtable.
137 * map in ubc_object.
138 */
139
140 simple_lock_init(&ubc_object.uobj.vmobjlock);
141 ubc_object.uobj.pgops = &ubc_pager;
142 TAILQ_INIT(&ubc_object.uobj.memq);
143 ubc_object.uobj.uo_npages = 0;
144 ubc_object.uobj.uo_refs = UVM_OBJ_KERN;
145
146 ubc_object.umap = malloc(ubc_nwins * sizeof(struct ubc_map),
147 M_TEMP, M_NOWAIT);
148 bzero(ubc_object.umap, ubc_nwins * sizeof(struct ubc_map));
149
150 va = (vaddr_t)1L;
151 #ifdef PMAP_PREFER
152 PMAP_PREFER(0, &va);
153 if (va < UBC_WINSIZE) {
154 va = UBC_WINSIZE;
155 }
156 ubc_nqueues = va / UBC_WINSIZE;
157 if (ubc_nqueues != 1) {
158 ubc_release_unmap = TRUE;
159 }
160 #endif
161 ubc_object.inactive = malloc(UBC_NQUEUES *
162 sizeof(struct ubc_inactive_head),
163 M_TEMP, M_NOWAIT);
164 for (i = 0; i < UBC_NQUEUES; i++) {
165 TAILQ_INIT(&ubc_object.inactive[i]);
166 }
167 for (i = 0; i < ubc_nwins; i++) {
168 umap = &ubc_object.umap[i];
169 TAILQ_INSERT_TAIL(&ubc_object.inactive[i & (UBC_NQUEUES - 1)],
170 umap, inactive);
171 }
172
173 ubc_object.hash = hashinit(ubc_nwins, HASH_LIST, M_TEMP, M_NOWAIT,
174 &ubc_object.hashmask);
175 for (i = 0; i <= ubc_object.hashmask; i++) {
176 LIST_INIT(&ubc_object.hash[i]);
177 }
178
179 if (uvm_map(kernel_map, (vaddr_t *)&ubc_object.kva,
180 ubc_nwins * UBC_WINSIZE, &ubc_object.uobj, 0, (vsize_t)va,
181 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
182 UVM_ADV_RANDOM, UVM_FLAG_NOMERGE))
183 != KERN_SUCCESS) {
184 panic("ubc_init: failed to map ubc_object\n");
185 }
186 UVMHIST_INIT(ubchist, 300);
187 }
188
189
190 /*
191 * ubc_fault: fault routine for ubc mapping
192 */
193 static int
194 ubc_fault(ufi, ign1, ign2, ign3, ign4, fault_type, access_type, flags)
195 struct uvm_faultinfo *ufi;
196 vaddr_t ign1;
197 vm_page_t *ign2;
198 int ign3, ign4;
199 vm_fault_t fault_type;
200 vm_prot_t access_type;
201 int flags;
202 {
203 struct uvm_object *uobj;
204 struct vnode *vp;
205 struct ubc_map *umap;
206 vaddr_t va, eva, ubc_offset, slot_offset;
207 int i, rv, npages;
208 struct vm_page *pgs[UBC_WINSIZE >> PAGE_SHIFT], *pg;
209 UVMHIST_FUNC("ubc_fault"); UVMHIST_CALLED(ubchist);
210
211 /*
212 * no need to try with PGO_LOCKED...
213 * we don't need to have the map locked since we know that
214 * no one will mess with it until our reference is released.
215 */
216 if (flags & PGO_LOCKED) {
217 #if 0
218 return VM_PAGER_UNLOCK;
219 #else
220 uvmfault_unlockall(ufi, NULL, &ubc_object.uobj, NULL);
221 flags &= ~PGO_LOCKED;
222 #endif
223 }
224
225 va = ufi->orig_rvaddr;
226 ubc_offset = va - (vaddr_t)ubc_object.kva;
227
228 UVMHIST_LOG(ubchist, "va 0x%lx ubc_offset 0x%lx at %d",
229 va, ubc_offset, access_type,0);
230
231 umap = &ubc_object.umap[ubc_offset / UBC_WINSIZE];
232 KASSERT(umap->refcount != 0);
233 slot_offset = trunc_page(ubc_offset & (UBC_WINSIZE - 1));
234
235 /* no umap locking needed since we have a ref on the umap */
236 uobj = umap->uobj;
237 vp = (struct vnode *)uobj;
238 KASSERT(uobj != NULL);
239
240 npages = (UBC_WINSIZE - slot_offset) >> PAGE_SHIFT;
241
242 /*
243 * XXXUBC
244 * if npages is more than 1 we have to be sure that
245 * we set PGO_OVERWRITE correctly.
246 */
247 if (access_type == VM_PROT_WRITE) {
248 npages = 1;
249 }
250
251 again:
252 memset(pgs, 0, sizeof (pgs));
253 simple_lock(&uobj->vmobjlock);
254
255 UVMHIST_LOG(ubchist, "slot_offset 0x%x writeoff 0x%x writelen 0x%x "
256 "u_size 0x%x", slot_offset, umap->writeoff, umap->writelen,
257 vp->v_uvm.u_size);
258
259 if (access_type & VM_PROT_WRITE &&
260 slot_offset >= umap->writeoff &&
261 (slot_offset + PAGE_SIZE <= umap->writeoff + umap->writelen ||
262 slot_offset + PAGE_SIZE >= vp->v_uvm.u_size - umap->offset)) {
263 UVMHIST_LOG(ubchist, "setting PGO_OVERWRITE", 0,0,0,0);
264 flags |= PGO_OVERWRITE;
265 }
266 else { UVMHIST_LOG(ubchist, "NOT setting PGO_OVERWRITE", 0,0,0,0); }
267 /* XXX be sure to zero any part of the page past EOF */
268
269 /*
270 * XXX
271 * ideally we'd like to pre-fault all of the pages we're overwriting.
272 * so for PGO_OVERWRITE, we should call VOP_GETPAGES() with all of the
273 * pages in [writeoff, writeoff+writesize] instead of just the one.
274 */
275
276 UVMHIST_LOG(ubchist, "getpages vp %p offset 0x%x npages %d",
277 uobj, umap->offset + slot_offset, npages, 0);
278
279 rv = VOP_GETPAGES(vp, umap->offset + slot_offset, pgs, &npages, 0,
280 access_type, 0, flags);
281 UVMHIST_LOG(ubchist, "getpages rv %d npages %d", rv, npages,0,0);
282
283 switch (rv) {
284 case VM_PAGER_OK:
285 break;
286
287 case VM_PAGER_AGAIN:
288 tsleep(&lbolt, PVM, "ubc_fault", 0);
289 goto again;
290
291 default:
292 return rv;
293 }
294
295 if (npages == 0) {
296 return VM_PAGER_OK;
297 }
298
299 va = ufi->orig_rvaddr;
300 eva = ufi->orig_rvaddr + (npages << PAGE_SHIFT);
301
302 UVMHIST_LOG(ubchist, "va 0x%lx eva 0x%lx", va, eva, 0,0);
303 simple_lock(&uobj->vmobjlock);
304 for (i = 0; va < eva; i++, va += PAGE_SIZE) {
305 UVMHIST_LOG(ubchist, "pgs[%d] = %p", i, pgs[i],0,0);
306 pg = pgs[i];
307
308 if (pg == NULL || pg == PGO_DONTCARE) {
309 continue;
310 }
311 if (pg->flags & PG_WANTED) {
312 wakeup(pg);
313 }
314 KASSERT((pg->flags & PG_FAKE) == 0);
315 if (pg->flags & PG_RELEASED) {
316 rv = uobj->pgops->pgo_releasepg(pg, NULL);
317 KASSERT(rv);
318 continue;
319 }
320 KASSERT(access_type == VM_PROT_READ ||
321 (pg->flags & PG_RDONLY) == 0);
322
323 uvm_lock_pageq();
324 uvm_pageactivate(pg);
325 uvm_unlock_pageq();
326
327 pmap_enter(ufi->orig_map->pmap, va, VM_PAGE_TO_PHYS(pg),
328 VM_PROT_ALL, access_type);
329
330 pg->flags &= ~(PG_BUSY);
331 UVM_PAGE_OWN(pg, NULL);
332 }
333 simple_unlock(&uobj->vmobjlock);
334 return VM_PAGER_OK;
335 }
336
337 /*
338 * local functions
339 */
340
341 static struct ubc_map *
342 ubc_find_mapping(uobj, offset)
343 struct uvm_object *uobj;
344 voff_t offset;
345 {
346 struct ubc_map *umap;
347
348 LIST_FOREACH(umap, &ubc_object.hash[UBC_HASH(uobj, offset)], hash) {
349 if (umap->uobj == uobj && umap->offset == offset) {
350 return umap;
351 }
352 }
353 return NULL;
354 }
355
356
357 /*
358 * ubc interface functions
359 */
360
361 /*
362 * ubc_alloc: allocate a buffer mapping
363 */
364 void *
365 ubc_alloc(uobj, offset, lenp, flags)
366 struct uvm_object *uobj;
367 voff_t offset;
368 vsize_t *lenp;
369 int flags;
370 {
371 int s;
372 vaddr_t umap_offset, slot_offset, va;
373 struct ubc_map *umap;
374 UVMHIST_FUNC("ubc_alloc"); UVMHIST_CALLED(ubchist);
375
376 UVMHIST_LOG(ubchist, "uobj %p offset 0x%lx len 0x%lx filesize 0x%x",
377 uobj, offset, *lenp, ((struct uvm_vnode *)uobj)->u_size);
378
379 umap_offset = (vaddr_t)(offset & ~((voff_t)UBC_WINSIZE - 1));
380 slot_offset = (vaddr_t)(offset & ((voff_t)UBC_WINSIZE - 1));
381 *lenp = min(*lenp, UBC_WINSIZE - slot_offset);
382
383 /*
384 * the vnode is always locked here, so we don't need to add a ref.
385 */
386
387 s = splbio();
388
389 again:
390 simple_lock(&ubc_object.uobj.vmobjlock);
391 umap = ubc_find_mapping(uobj, umap_offset);
392 if (umap == NULL) {
393 umap = TAILQ_FIRST(UBC_QUEUE(offset));
394 if (umap == NULL) {
395 simple_unlock(&ubc_object.uobj.vmobjlock);
396 tsleep(&lbolt, PVM, "ubc_alloc", 0);
397 goto again;
398 }
399
400 /*
401 * remove from old hash (if any),
402 * add to new hash.
403 */
404
405 if (umap->uobj != NULL) {
406 LIST_REMOVE(umap, hash);
407 }
408
409 umap->uobj = uobj;
410 umap->offset = umap_offset;
411
412 LIST_INSERT_HEAD(&ubc_object.hash[UBC_HASH(uobj, umap_offset)],
413 umap, hash);
414
415 va = (vaddr_t)(ubc_object.kva +
416 (umap - ubc_object.umap) * UBC_WINSIZE);
417 pmap_remove(pmap_kernel(), va, va + UBC_WINSIZE);
418 }
419
420 if (umap->refcount == 0) {
421 TAILQ_REMOVE(UBC_QUEUE(offset), umap, inactive);
422 }
423
424 #ifdef DIAGNOSTIC
425 if ((flags & UBC_WRITE) &&
426 (umap->writeoff || umap->writelen)) {
427 panic("ubc_fault: concurrent writes vp %p", uobj);
428 }
429 #endif
430 if (flags & UBC_WRITE) {
431 umap->writeoff = slot_offset;
432 umap->writelen = *lenp;
433 }
434
435 umap->refcount++;
436 simple_unlock(&ubc_object.uobj.vmobjlock);
437 splx(s);
438 UVMHIST_LOG(ubchist, "umap %p refs %d va %p",
439 umap, umap->refcount,
440 ubc_object.kva + (umap - ubc_object.umap) * UBC_WINSIZE,0);
441
442 return ubc_object.kva +
443 (umap - ubc_object.umap) * UBC_WINSIZE + slot_offset;
444 }
445
446
447 void
448 ubc_release(va, wlen)
449 void *va;
450 vsize_t wlen;
451 {
452 struct ubc_map *umap;
453 struct uvm_object *uobj;
454 int s;
455 UVMHIST_FUNC("ubc_release"); UVMHIST_CALLED(ubchist);
456
457 UVMHIST_LOG(ubchist, "va %p", va,0,0,0);
458
459 s = splbio();
460 simple_lock(&ubc_object.uobj.vmobjlock);
461
462 umap = &ubc_object.umap[((char *)va - ubc_object.kva) / UBC_WINSIZE];
463 uobj = umap->uobj;
464 KASSERT(uobj != NULL);
465
466 umap->writeoff = 0;
467 umap->writelen = 0;
468 umap->refcount--;
469 if (umap->refcount == 0) {
470 if (UBC_RELEASE_UNMAP &&
471 (((struct vnode *)uobj)->v_flag & VTEXT)) {
472 vaddr_t va;
473
474 /*
475 * if this file is the executable image of
476 * some process, that process will likely have
477 * the file mapped at an alignment other than
478 * what PMAP_PREFER() would like. we'd like
479 * to have process text be able to use the
480 * cache even if someone is also reading the
481 * file, so invalidate mappings of such files
482 * as soon as possible.
483 */
484
485 va = (vaddr_t)(ubc_object.kva +
486 (umap - ubc_object.umap) * UBC_WINSIZE);
487 pmap_remove(pmap_kernel(), va, va + UBC_WINSIZE);
488 LIST_REMOVE(umap, hash);
489 umap->uobj = NULL;
490 TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap,
491 inactive);
492 } else {
493 TAILQ_INSERT_TAIL(UBC_QUEUE(umap->offset), umap,
494 inactive);
495 }
496 }
497 UVMHIST_LOG(ubchist, "umap %p refs %d", umap, umap->refcount,0,0);
498 simple_unlock(&ubc_object.uobj.vmobjlock);
499 splx(s);
500 }
501
502
503 /*
504 * removing a range of mappings from the ubc mapping cache.
505 */
506
507 void
508 ubc_flush(uobj, start, end)
509 struct uvm_object *uobj;
510 voff_t start, end;
511 {
512 struct ubc_map *umap;
513 vaddr_t va;
514 int s;
515 UVMHIST_FUNC("ubc_flush"); UVMHIST_CALLED(ubchist);
516
517 UVMHIST_LOG(ubchist, "uobj %p start 0x%lx end 0x%lx",
518 uobj, start, end,0);
519
520 s = splbio();
521 simple_lock(&ubc_object.uobj.vmobjlock);
522 for (umap = ubc_object.umap;
523 umap < &ubc_object.umap[ubc_nwins];
524 umap++) {
525
526 if (umap->uobj != uobj ||
527 umap->offset < start ||
528 (umap->offset >= end && end != 0) ||
529 umap->refcount > 0) {
530 continue;
531 }
532
533 /*
534 * remove from hash,
535 * move to head of inactive queue.
536 */
537
538 va = (vaddr_t)(ubc_object.kva +
539 (umap - ubc_object.umap) * UBC_WINSIZE);
540 pmap_remove(pmap_kernel(), va, va + UBC_WINSIZE);
541
542 LIST_REMOVE(umap, hash);
543 umap->uobj = NULL;
544 TAILQ_REMOVE(UBC_QUEUE(umap->offset), umap, inactive);
545 TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap, inactive);
546 }
547 simple_unlock(&ubc_object.uobj.vmobjlock);
548 splx(s);
549 }
550