uvm_bio.c revision 1.33 1 /* $NetBSD: uvm_bio.c,v 1.33 2005/01/09 16:42:44 chs Exp $ */
2
3 /*
4 * Copyright (c) 1998 Chuck Silvers.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 */
31
32 /*
33 * uvm_bio.c: buffered i/o object mapping cache
34 */
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: uvm_bio.c,v 1.33 2005/01/09 16:42:44 chs Exp $");
38
39 #include "opt_uvmhist.h"
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/malloc.h>
44 #include <sys/kernel.h>
45 #include <sys/vnode.h>
46 #include <sys/proc.h>
47
48 #include <uvm/uvm.h>
49
50 /*
51 * global data structures
52 */
53
54 /*
55 * local functions
56 */
57
58 int ubc_fault(struct uvm_faultinfo *, vaddr_t, struct vm_page **, int,
59 int, vm_fault_t, vm_prot_t, int);
60 struct ubc_map *ubc_find_mapping(struct uvm_object *, voff_t);
61
62 /*
63 * local data structues
64 */
65
66 #define UBC_HASH(uobj, offset) \
67 (((((u_long)(uobj)) >> 8) + (((u_long)(offset)) >> PAGE_SHIFT)) & \
68 ubc_object.hashmask)
69
70 #define UBC_QUEUE(offset) \
71 (&ubc_object.inactive[(((u_long)(offset)) >> ubc_winshift) & \
72 (UBC_NQUEUES - 1)])
73
74 #define UBC_UMAP_ADDR(u) \
75 (vaddr_t)(ubc_object.kva + (((u) - ubc_object.umap) << ubc_winshift))
76
77
78 #define UMAP_PAGES_LOCKED 0x0001
79 #define UMAP_MAPPING_CACHED 0x0002
80
81 struct ubc_map
82 {
83 struct uvm_object * uobj; /* mapped object */
84 voff_t offset; /* offset into uobj */
85 voff_t writeoff; /* write offset */
86 vsize_t writelen; /* write len */
87 int refcount; /* refcount on mapping */
88 int flags; /* extra state */
89
90 LIST_ENTRY(ubc_map) hash; /* hash table */
91 TAILQ_ENTRY(ubc_map) inactive; /* inactive queue */
92 };
93
94 static struct ubc_object
95 {
96 struct uvm_object uobj; /* glue for uvm_map() */
97 char *kva; /* where ubc_object is mapped */
98 struct ubc_map *umap; /* array of ubc_map's */
99
100 LIST_HEAD(, ubc_map) *hash; /* hashtable for cached ubc_map's */
101 u_long hashmask; /* mask for hashtable */
102
103 TAILQ_HEAD(ubc_inactive_head, ubc_map) *inactive;
104 /* inactive queues for ubc_map's */
105
106 } ubc_object;
107
108 struct uvm_pagerops ubc_pager =
109 {
110 NULL, /* init */
111 NULL, /* reference */
112 NULL, /* detach */
113 ubc_fault, /* fault */
114 /* ... rest are NULL */
115 };
116
117 int ubc_nwins = UBC_NWINS;
118 int ubc_winshift = UBC_WINSHIFT;
119 int ubc_winsize;
120 #if defined(PMAP_PREFER)
121 int ubc_nqueues;
122 #define UBC_NQUEUES ubc_nqueues
123 #else
124 #define UBC_NQUEUES 1
125 #endif
126
127 /*
128 * ubc_init
129 *
130 * init pager private data structures.
131 */
132
133 void
134 ubc_init(void)
135 {
136 struct ubc_map *umap;
137 vaddr_t va;
138 int i;
139
140 /*
141 * Make sure ubc_winshift is sane.
142 */
143 if (ubc_winshift < PAGE_SHIFT)
144 ubc_winshift = PAGE_SHIFT;
145
146 /*
147 * init ubc_object.
148 * alloc and init ubc_map's.
149 * init inactive queues.
150 * alloc and init hashtable.
151 * map in ubc_object.
152 */
153
154 simple_lock_init(&ubc_object.uobj.vmobjlock);
155 ubc_object.uobj.pgops = &ubc_pager;
156 TAILQ_INIT(&ubc_object.uobj.memq);
157 ubc_object.uobj.uo_npages = 0;
158 ubc_object.uobj.uo_refs = UVM_OBJ_KERN;
159
160 ubc_object.umap = malloc(ubc_nwins * sizeof(struct ubc_map),
161 M_TEMP, M_NOWAIT);
162 if (ubc_object.umap == NULL)
163 panic("ubc_init: failed to allocate ubc_map");
164 memset(ubc_object.umap, 0, ubc_nwins * sizeof(struct ubc_map));
165
166 if (ubc_winshift < PAGE_SHIFT) {
167 ubc_winshift = PAGE_SHIFT;
168 }
169 va = (vaddr_t)1L;
170 #ifdef PMAP_PREFER
171 PMAP_PREFER(0, &va);
172 ubc_nqueues = va >> ubc_winshift;
173 if (ubc_nqueues == 0) {
174 ubc_nqueues = 1;
175 }
176 #endif
177 ubc_winsize = 1 << ubc_winshift;
178 ubc_object.inactive = malloc(UBC_NQUEUES *
179 sizeof(struct ubc_inactive_head), M_TEMP, M_NOWAIT);
180 if (ubc_object.inactive == NULL)
181 panic("ubc_init: failed to allocate inactive queue heads");
182 for (i = 0; i < UBC_NQUEUES; i++) {
183 TAILQ_INIT(&ubc_object.inactive[i]);
184 }
185 for (i = 0; i < ubc_nwins; i++) {
186 umap = &ubc_object.umap[i];
187 TAILQ_INSERT_TAIL(&ubc_object.inactive[i & (UBC_NQUEUES - 1)],
188 umap, inactive);
189 }
190
191 ubc_object.hash = hashinit(ubc_nwins, HASH_LIST, M_TEMP, M_NOWAIT,
192 &ubc_object.hashmask);
193 for (i = 0; i <= ubc_object.hashmask; i++) {
194 LIST_INIT(&ubc_object.hash[i]);
195 }
196
197 if (uvm_map(kernel_map, (vaddr_t *)&ubc_object.kva,
198 ubc_nwins << ubc_winshift, &ubc_object.uobj, 0, (vsize_t)va,
199 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
200 UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)) != 0) {
201 panic("ubc_init: failed to map ubc_object");
202 }
203 UVMHIST_INIT(ubchist, 300);
204 }
205
206 /*
207 * ubc_fault: fault routine for ubc mapping
208 */
209
210 int
211 ubc_fault(ufi, ign1, ign2, ign3, ign4, fault_type, access_type, flags)
212 struct uvm_faultinfo *ufi;
213 vaddr_t ign1;
214 struct vm_page **ign2;
215 int ign3, ign4;
216 vm_fault_t fault_type;
217 vm_prot_t access_type;
218 int flags;
219 {
220 struct uvm_object *uobj;
221 struct ubc_map *umap;
222 vaddr_t va, eva, ubc_offset, slot_offset;
223 int i, error, npages;
224 struct vm_page *pgs[ubc_winsize >> PAGE_SHIFT], *pg;
225 vm_prot_t prot;
226 UVMHIST_FUNC("ubc_fault"); UVMHIST_CALLED(ubchist);
227
228 /*
229 * no need to try with PGO_LOCKED...
230 * we don't need to have the map locked since we know that
231 * no one will mess with it until our reference is released.
232 */
233
234 if (flags & PGO_LOCKED) {
235 uvmfault_unlockall(ufi, NULL, &ubc_object.uobj, NULL);
236 flags &= ~PGO_LOCKED;
237 }
238
239 va = ufi->orig_rvaddr;
240 ubc_offset = va - (vaddr_t)ubc_object.kva;
241
242 UVMHIST_LOG(ubchist, "va 0x%lx ubc_offset 0x%lx at %d",
243 va, ubc_offset, access_type, 0);
244
245 umap = &ubc_object.umap[ubc_offset >> ubc_winshift];
246 KASSERT(umap->refcount != 0);
247 slot_offset = ubc_offset & (ubc_winsize - 1);
248
249 #ifdef DIAGNOSTIC
250 if ((access_type & VM_PROT_WRITE) != 0) {
251 if (slot_offset < trunc_page(umap->writeoff) ||
252 umap->writeoff + umap->writelen <= slot_offset) {
253 panic("ubc_fault: out of range write");
254 }
255 }
256 #endif
257
258 /* no umap locking needed since we have a ref on the umap */
259 uobj = umap->uobj;
260
261 if ((access_type & VM_PROT_WRITE) == 0) {
262 npages = (ubc_winsize - slot_offset) >> PAGE_SHIFT;
263 } else {
264 npages = (round_page(umap->offset + umap->writeoff +
265 umap->writelen) - (umap->offset + slot_offset))
266 >> PAGE_SHIFT;
267 flags |= PGO_PASTEOF;
268 }
269
270 again:
271 memset(pgs, 0, sizeof (pgs));
272 simple_lock(&uobj->vmobjlock);
273
274 UVMHIST_LOG(ubchist, "slot_offset 0x%x writeoff 0x%x writelen 0x%x ",
275 slot_offset, umap->writeoff, umap->writelen, 0);
276 UVMHIST_LOG(ubchist, "getpages uobj %p offset 0x%x npages %d",
277 uobj, umap->offset + slot_offset, npages, 0);
278
279 error = (*uobj->pgops->pgo_get)(uobj, umap->offset + slot_offset, pgs,
280 &npages, 0, access_type, 0, flags);
281 UVMHIST_LOG(ubchist, "getpages error %d npages %d", error, npages, 0,
282 0);
283
284 if (error == EAGAIN) {
285 tsleep(&lbolt, PVM, "ubc_fault", 0);
286 goto again;
287 }
288 if (error) {
289 return error;
290 }
291
292 va = ufi->orig_rvaddr;
293 eva = ufi->orig_rvaddr + (npages << PAGE_SHIFT);
294
295 UVMHIST_LOG(ubchist, "va 0x%lx eva 0x%lx", va, eva, 0, 0);
296 simple_lock(&uobj->vmobjlock);
297 uvm_lock_pageq();
298 for (i = 0; va < eva; i++, va += PAGE_SIZE) {
299 /*
300 * for virtually-indexed, virtually-tagged caches we should
301 * avoid creating writable mappings when we don't absolutely
302 * need them, since the "compatible alias" trick doesn't work
303 * on such caches. otherwise, we can always map the pages
304 * writable.
305 */
306
307 #ifdef PMAP_CACHE_VIVT
308 prot = VM_PROT_READ | access_type;
309 #else
310 prot = VM_PROT_READ | VM_PROT_WRITE;
311 #endif
312 UVMHIST_LOG(ubchist, "pgs[%d] = %p", i, pgs[i], 0, 0);
313 pg = pgs[i];
314
315 if (pg == NULL || pg == PGO_DONTCARE) {
316 continue;
317 }
318 if (pg->flags & PG_WANTED) {
319 wakeup(pg);
320 }
321 KASSERT((pg->flags & PG_FAKE) == 0);
322 if (pg->flags & PG_RELEASED) {
323 uvm_pagefree(pg);
324 continue;
325 }
326 if (pg->loan_count != 0) {
327 /*
328 * avoid unneeded loan break if possible.
329 */
330 if ((access_type & VM_PROT_WRITE) == 0)
331 prot &= ~VM_PROT_WRITE;
332
333 if (prot & VM_PROT_WRITE) {
334 uvm_unlock_pageq();
335 pg = uvm_loanbreak(pg);
336 uvm_lock_pageq();
337 if (pg == NULL)
338 continue; /* will re-fault */
339 }
340 }
341 KASSERT(access_type == VM_PROT_READ ||
342 (pg->flags & PG_RDONLY) == 0);
343 pmap_enter(ufi->orig_map->pmap, va, VM_PAGE_TO_PHYS(pg),
344 (pg->flags & PG_RDONLY) ? prot & ~VM_PROT_WRITE : prot,
345 access_type);
346 uvm_pageactivate(pg);
347 pg->flags &= ~(PG_BUSY);
348 UVM_PAGE_OWN(pg, NULL);
349 }
350 uvm_unlock_pageq();
351 simple_unlock(&uobj->vmobjlock);
352 pmap_update(ufi->orig_map->pmap);
353 return 0;
354 }
355
356 /*
357 * local functions
358 */
359
360 struct ubc_map *
361 ubc_find_mapping(uobj, offset)
362 struct uvm_object *uobj;
363 voff_t offset;
364 {
365 struct ubc_map *umap;
366
367 LIST_FOREACH(umap, &ubc_object.hash[UBC_HASH(uobj, offset)], hash) {
368 if (umap->uobj == uobj && umap->offset == offset) {
369 return umap;
370 }
371 }
372 return NULL;
373 }
374
375
376 /*
377 * ubc interface functions
378 */
379
380 /*
381 * ubc_alloc: allocate a file mapping window
382 */
383
384 void *
385 ubc_alloc(uobj, offset, lenp, flags)
386 struct uvm_object *uobj;
387 voff_t offset;
388 vsize_t *lenp;
389 int flags;
390 {
391 vaddr_t slot_offset, va;
392 struct ubc_map *umap;
393 voff_t umap_offset;
394 int error;
395 UVMHIST_FUNC("ubc_alloc"); UVMHIST_CALLED(ubchist);
396
397 UVMHIST_LOG(ubchist, "uobj %p offset 0x%lx len 0x%lx",
398 uobj, offset, *lenp, 0);
399
400 umap_offset = (offset & ~((voff_t)ubc_winsize - 1));
401 slot_offset = (vaddr_t)(offset & ((voff_t)ubc_winsize - 1));
402 *lenp = MIN(*lenp, ubc_winsize - slot_offset);
403
404 /*
405 * the object is always locked here, so we don't need to add a ref.
406 */
407
408 again:
409 simple_lock(&ubc_object.uobj.vmobjlock);
410 umap = ubc_find_mapping(uobj, umap_offset);
411 if (umap == NULL) {
412 umap = TAILQ_FIRST(UBC_QUEUE(offset));
413 if (umap == NULL) {
414 simple_unlock(&ubc_object.uobj.vmobjlock);
415 tsleep(&lbolt, PVM, "ubc_alloc", 0);
416 goto again;
417 }
418
419 /*
420 * remove from old hash (if any), add to new hash.
421 */
422
423 if (umap->uobj != NULL) {
424 LIST_REMOVE(umap, hash);
425 }
426 umap->uobj = uobj;
427 umap->offset = umap_offset;
428 LIST_INSERT_HEAD(&ubc_object.hash[UBC_HASH(uobj, umap_offset)],
429 umap, hash);
430 va = UBC_UMAP_ADDR(umap);
431 if (umap->flags & UMAP_MAPPING_CACHED) {
432 umap->flags &= ~UMAP_MAPPING_CACHED;
433 pmap_remove(pmap_kernel(), va, va + ubc_winsize);
434 pmap_update(pmap_kernel());
435 }
436 } else {
437 va = UBC_UMAP_ADDR(umap);
438 }
439
440 if (umap->refcount == 0) {
441 TAILQ_REMOVE(UBC_QUEUE(offset), umap, inactive);
442 }
443
444 #ifdef DIAGNOSTIC
445 if ((flags & UBC_WRITE) && (umap->writeoff || umap->writelen)) {
446 panic("ubc_alloc: concurrent writes uobj %p", uobj);
447 }
448 #endif
449 if (flags & UBC_WRITE) {
450 umap->writeoff = slot_offset;
451 umap->writelen = *lenp;
452 }
453
454 umap->refcount++;
455 simple_unlock(&ubc_object.uobj.vmobjlock);
456 UVMHIST_LOG(ubchist, "umap %p refs %d va %p flags 0x%x",
457 umap, umap->refcount, va, flags);
458
459 if (flags & UBC_FAULTBUSY) {
460 int npages = (*lenp + PAGE_SIZE - 1) >> PAGE_SHIFT;
461 struct vm_page *pgs[npages];
462 int gpflags = PGO_SYNCIO|PGO_OVERWRITE|PGO_PASTEOF;
463 int i;
464 KDASSERT(flags & UBC_WRITE);
465
466 if (umap->flags & UMAP_MAPPING_CACHED) {
467 umap->flags &= ~UMAP_MAPPING_CACHED;
468 pmap_remove(pmap_kernel(), va, va + ubc_winsize);
469 }
470 memset(pgs, 0, sizeof(pgs));
471 simple_lock(&uobj->vmobjlock);
472 error = (*uobj->pgops->pgo_get)(uobj, trunc_page(offset), pgs,
473 &npages, 0, VM_PROT_READ | VM_PROT_WRITE, 0, gpflags);
474 UVMHIST_LOG(ubchist, "faultbusy getpages %d", error, 0, 0, 0);
475 if (error) {
476 goto out;
477 }
478 for (i = 0; i < npages; i++) {
479 pmap_kenter_pa(va + slot_offset + (i << PAGE_SHIFT),
480 VM_PAGE_TO_PHYS(pgs[i]),
481 VM_PROT_READ | VM_PROT_WRITE);
482 }
483 pmap_update(pmap_kernel());
484 umap->flags |= UMAP_PAGES_LOCKED;
485 }
486
487 out:
488 return (void *)(va + slot_offset);
489 }
490
491 /*
492 * ubc_release: free a file mapping window.
493 */
494
495 void
496 ubc_release(va, flags)
497 void *va;
498 int flags;
499 {
500 struct ubc_map *umap;
501 struct uvm_object *uobj;
502 vaddr_t umapva;
503 boolean_t unmapped;
504 UVMHIST_FUNC("ubc_release"); UVMHIST_CALLED(ubchist);
505
506 UVMHIST_LOG(ubchist, "va %p", va, 0, 0, 0);
507 umap = &ubc_object.umap[((char *)va - ubc_object.kva) >> ubc_winshift];
508 umapva = UBC_UMAP_ADDR(umap);
509 uobj = umap->uobj;
510 KASSERT(uobj != NULL);
511
512 if (umap->flags & UMAP_PAGES_LOCKED) {
513 int slot_offset = umap->writeoff;
514 int endoff = umap->writeoff + umap->writelen;
515 int zerolen = round_page(endoff) - endoff;
516 int npages = (int)(round_page(umap->writeoff + umap->writelen)
517 - trunc_page(umap->writeoff)) >> PAGE_SHIFT;
518 struct vm_page *pgs[npages];
519 paddr_t pa;
520 int i;
521 boolean_t rv;
522
523 if (zerolen) {
524 memset((char *)umapva + endoff, 0, zerolen);
525 }
526 umap->flags &= ~UMAP_PAGES_LOCKED;
527 uvm_lock_pageq();
528 for (i = 0; i < npages; i++) {
529 rv = pmap_extract(pmap_kernel(),
530 umapva + slot_offset + (i << PAGE_SHIFT), &pa);
531 KASSERT(rv);
532 pgs[i] = PHYS_TO_VM_PAGE(pa);
533 pgs[i]->flags &= ~(PG_FAKE|PG_CLEAN);
534 KASSERT(pgs[i]->loan_count == 0);
535 uvm_pageactivate(pgs[i]);
536 }
537 uvm_unlock_pageq();
538 pmap_kremove(umapva, ubc_winsize);
539 pmap_update(pmap_kernel());
540 simple_lock(&uobj->vmobjlock);
541 uvm_page_unbusy(pgs, npages);
542 simple_unlock(&uobj->vmobjlock);
543 unmapped = TRUE;
544 } else {
545 unmapped = FALSE;
546 }
547
548 simple_lock(&ubc_object.uobj.vmobjlock);
549 umap->writeoff = 0;
550 umap->writelen = 0;
551 umap->refcount--;
552 if (umap->refcount == 0) {
553 if (flags & UBC_UNMAP) {
554
555 /*
556 * Invalidate any cached mappings if requested.
557 * This is typically used to avoid leaving
558 * incompatible cache aliases around indefinitely.
559 */
560
561 pmap_remove(pmap_kernel(), umapva,
562 umapva + ubc_winsize);
563 umap->flags &= ~UMAP_MAPPING_CACHED;
564 pmap_update(pmap_kernel());
565 LIST_REMOVE(umap, hash);
566 umap->uobj = NULL;
567 TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap,
568 inactive);
569 } else {
570 if (!unmapped) {
571 umap->flags |= UMAP_MAPPING_CACHED;
572 }
573 TAILQ_INSERT_TAIL(UBC_QUEUE(umap->offset), umap,
574 inactive);
575 }
576 }
577 UVMHIST_LOG(ubchist, "umap %p refs %d", umap, umap->refcount, 0, 0);
578 simple_unlock(&ubc_object.uobj.vmobjlock);
579 }
580
581
582 #if 0 /* notused */
583 /*
584 * removing a range of mappings from the ubc mapping cache.
585 */
586
587 void
588 ubc_flush(uobj, start, end)
589 struct uvm_object *uobj;
590 voff_t start, end;
591 {
592 struct ubc_map *umap;
593 vaddr_t va;
594 UVMHIST_FUNC("ubc_flush"); UVMHIST_CALLED(ubchist);
595
596 UVMHIST_LOG(ubchist, "uobj %p start 0x%lx end 0x%lx",
597 uobj, start, end, 0);
598
599 simple_lock(&ubc_object.uobj.vmobjlock);
600 for (umap = ubc_object.umap;
601 umap < &ubc_object.umap[ubc_nwins];
602 umap++) {
603
604 if (umap->uobj != uobj || umap->offset < start ||
605 (umap->offset >= end && end != 0) ||
606 umap->refcount > 0) {
607 continue;
608 }
609
610 /*
611 * remove from hash,
612 * move to head of inactive queue.
613 */
614
615 va = (vaddr_t)(ubc_object.kva +
616 ((umap - ubc_object.umap) << ubc_winshift));
617 pmap_remove(pmap_kernel(), va, va + ubc_winsize);
618
619 LIST_REMOVE(umap, hash);
620 umap->uobj = NULL;
621 TAILQ_REMOVE(UBC_QUEUE(umap->offset), umap, inactive);
622 TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap, inactive);
623 }
624 pmap_update(pmap_kernel());
625 simple_unlock(&ubc_object.uobj.vmobjlock);
626 }
627 #endif /* notused */
628