Home | History | Annotate | Line # | Download | only in uvm
uvm_bio.c revision 1.6
      1 /*	$NetBSD: uvm_bio.c,v 1.6 2000/12/27 09:01:45 chs Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1998 Chuck Silvers.
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  * 3. The name of the author may not be used to endorse or promote products
     16  *    derived from this software without specific prior written permission.
     17  *
     18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
     23  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
     24  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
     25  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
     26  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     28  * SUCH DAMAGE.
     29  *
     30  */
     31 
     32 #include "opt_uvmhist.h"
     33 
     34 /*
     35  * uvm_bio.c: buffered i/o vnode mapping cache
     36  */
     37 
     38 
     39 #include <sys/param.h>
     40 #include <sys/systm.h>
     41 #include <sys/malloc.h>
     42 #include <sys/kernel.h>
     43 #include <sys/vnode.h>
     44 
     45 #include <uvm/uvm.h>
     46 #include <uvm/uvm_page.h>
     47 
     48 /*
     49  * global data structures
     50  */
     51 
     52 /*
     53  * local functions
     54  */
     55 
     56 static int	ubc_fault __P((struct uvm_faultinfo *, vaddr_t,
     57 			       vm_page_t *, int, int, vm_fault_t, vm_prot_t,
     58 			       int));
     59 static struct ubc_map *ubc_find_mapping __P((struct uvm_object *, voff_t));
     60 
     61 /*
     62  * local data structues
     63  */
     64 
     65 #define UBC_HASH(uobj, offset) (((((u_long)(uobj)) >> 8) + \
     66 				 (((u_long)(offset)) >> PAGE_SHIFT)) & \
     67 				ubc_object.hashmask)
     68 
     69 #define UBC_QUEUE(offset) (&ubc_object.inactive[((offset) / ubc_winsize) & \
     70 					       (UBC_NQUEUES - 1)])
     71 
     72 struct ubc_map
     73 {
     74 	struct uvm_object *	uobj;		/* mapped object */
     75 	voff_t			offset;		/* offset into uobj */
     76 	int			refcount;	/* refcount on mapping */
     77 	voff_t			writeoff;	/* overwrite offset */
     78 	vsize_t			writelen;	/* overwrite len */
     79 
     80 	LIST_ENTRY(ubc_map)	hash;		/* hash table */
     81 	TAILQ_ENTRY(ubc_map)	inactive;	/* inactive queue */
     82 };
     83 
     84 static struct ubc_object
     85 {
     86 	struct uvm_object uobj;		/* glue for uvm_map() */
     87 	char *kva;			/* where ubc_object is mapped */
     88 	struct ubc_map *umap;		/* array of ubc_map's */
     89 
     90 	LIST_HEAD(, ubc_map) *hash;	/* hashtable for cached ubc_map's */
     91 	u_long hashmask;		/* mask for hashtable */
     92 
     93 	TAILQ_HEAD(ubc_inactive_head, ubc_map) *inactive;
     94 					/* inactive queues for ubc_map's */
     95 
     96 } ubc_object;
     97 
     98 struct uvm_pagerops ubc_pager =
     99 {
    100 	NULL,		/* init */
    101 	NULL,		/* reference */
    102 	NULL,		/* detach */
    103 	ubc_fault,	/* fault */
    104 	/* ... rest are NULL */
    105 };
    106 
    107 int ubc_nwins = UBC_NWINS;
    108 int ubc_winsize = UBC_WINSIZE;
    109 #ifdef PMAP_PREFER
    110 int ubc_nqueues;
    111 boolean_t ubc_release_unmap = FALSE;
    112 #define UBC_NQUEUES ubc_nqueues
    113 #define UBC_RELEASE_UNMAP ubc_release_unmap
    114 #else
    115 #define UBC_NQUEUES 1
    116 #define UBC_RELEASE_UNMAP FALSE
    117 #endif
    118 
    119 /*
    120  * ubc_init
    121  *
    122  * init pager private data structures.
    123  */
    124 
    125 void
    126 ubc_init(void)
    127 {
    128 	struct ubc_map *umap;
    129 	vaddr_t va;
    130 	int i;
    131 
    132 	/*
    133 	 * init ubc_object.
    134 	 * alloc and init ubc_map's.
    135 	 * init inactive queues.
    136 	 * alloc and init hashtable.
    137 	 * map in ubc_object.
    138 	 */
    139 
    140 	simple_lock_init(&ubc_object.uobj.vmobjlock);
    141 	ubc_object.uobj.pgops = &ubc_pager;
    142 	TAILQ_INIT(&ubc_object.uobj.memq);
    143 	ubc_object.uobj.uo_npages = 0;
    144 	ubc_object.uobj.uo_refs = UVM_OBJ_KERN;
    145 
    146 	ubc_object.umap = malloc(ubc_nwins * sizeof(struct ubc_map),
    147 				 M_TEMP, M_NOWAIT);
    148 	bzero(ubc_object.umap, ubc_nwins * sizeof(struct ubc_map));
    149 
    150 	va = (vaddr_t)1L;
    151 #ifdef PMAP_PREFER
    152 	PMAP_PREFER(0, &va);
    153 	if (va < ubc_winsize) {
    154 		va = ubc_winsize;
    155 	}
    156 	ubc_nqueues = va / ubc_winsize;
    157 	if (ubc_nqueues != 1) {
    158 		ubc_release_unmap = TRUE;
    159 	}
    160 #endif
    161 	ubc_object.inactive = malloc(UBC_NQUEUES *
    162 				     sizeof(struct ubc_inactive_head),
    163 				     M_TEMP, M_NOWAIT);
    164 	for (i = 0; i < UBC_NQUEUES; i++) {
    165 		TAILQ_INIT(&ubc_object.inactive[i]);
    166 	}
    167 	for (i = 0; i < ubc_nwins; i++) {
    168 		umap = &ubc_object.umap[i];
    169 		TAILQ_INSERT_TAIL(&ubc_object.inactive[i & (UBC_NQUEUES - 1)],
    170 				  umap, inactive);
    171 	}
    172 
    173 	ubc_object.hash = hashinit(ubc_nwins, HASH_LIST, M_TEMP, M_NOWAIT,
    174 				   &ubc_object.hashmask);
    175 	for (i = 0; i <= ubc_object.hashmask; i++) {
    176 		LIST_INIT(&ubc_object.hash[i]);
    177 	}
    178 
    179 	if (uvm_map(kernel_map, (vaddr_t *)&ubc_object.kva,
    180 		    ubc_nwins * ubc_winsize, &ubc_object.uobj, 0, (vsize_t)va,
    181 		    UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
    182 				UVM_ADV_RANDOM, UVM_FLAG_NOMERGE))
    183 	    != KERN_SUCCESS) {
    184 		panic("ubc_init: failed to map ubc_object\n");
    185 	}
    186 	UVMHIST_INIT(ubchist, 300);
    187 }
    188 
    189 
    190 /*
    191  * ubc_fault: fault routine for ubc mapping
    192  */
    193 static int
    194 ubc_fault(ufi, ign1, ign2, ign3, ign4, fault_type, access_type, flags)
    195 	struct uvm_faultinfo *ufi;
    196 	vaddr_t ign1;
    197 	vm_page_t *ign2;
    198 	int ign3, ign4;
    199 	vm_fault_t fault_type;
    200 	vm_prot_t access_type;
    201 	int flags;
    202 {
    203 	struct uvm_object *uobj;
    204 	struct vnode *vp;
    205 	struct ubc_map *umap;
    206 	vaddr_t va, eva, ubc_offset, slot_offset;
    207 	int i, error, rv, npages;
    208 	struct vm_page *pgs[ubc_winsize >> PAGE_SHIFT], *pg;
    209 	UVMHIST_FUNC("ubc_fault");  UVMHIST_CALLED(ubchist);
    210 
    211 	/*
    212 	 * no need to try with PGO_LOCKED...
    213 	 * we don't need to have the map locked since we know that
    214 	 * no one will mess with it until our reference is released.
    215 	 */
    216 	if (flags & PGO_LOCKED) {
    217 #if 0
    218 		return VM_PAGER_UNLOCK;
    219 #else
    220 		uvmfault_unlockall(ufi, NULL, &ubc_object.uobj, NULL);
    221 		flags &= ~PGO_LOCKED;
    222 #endif
    223 	}
    224 
    225 	va = ufi->orig_rvaddr;
    226 	ubc_offset = va - (vaddr_t)ubc_object.kva;
    227 
    228 	UVMHIST_LOG(ubchist, "va 0x%lx ubc_offset 0x%lx at %d",
    229 		    va, ubc_offset, access_type,0);
    230 
    231 	umap = &ubc_object.umap[ubc_offset / ubc_winsize];
    232 	KASSERT(umap->refcount != 0);
    233 	slot_offset = trunc_page(ubc_offset & (ubc_winsize - 1));
    234 
    235 	/* no umap locking needed since we have a ref on the umap */
    236 	uobj = umap->uobj;
    237 	vp = (struct vnode *)uobj;
    238 	KASSERT(uobj != NULL);
    239 
    240 	npages = (ubc_winsize - slot_offset) >> PAGE_SHIFT;
    241 
    242 	/*
    243 	 * XXXUBC
    244 	 * if npages is more than 1 we have to be sure that
    245 	 * we set PGO_OVERWRITE correctly.
    246 	 */
    247 	if (access_type == VM_PROT_WRITE) {
    248 		npages = 1;
    249 	}
    250 
    251 again:
    252 	memset(pgs, 0, sizeof (pgs));
    253 	simple_lock(&uobj->vmobjlock);
    254 
    255 	UVMHIST_LOG(ubchist, "slot_offset 0x%x writeoff 0x%x writelen 0x%x "
    256 		    "u_size 0x%x", slot_offset, umap->writeoff, umap->writelen,
    257 		    vp->v_uvm.u_size);
    258 
    259 	if (access_type & VM_PROT_WRITE &&
    260 	    slot_offset >= umap->writeoff &&
    261 	    (slot_offset + PAGE_SIZE <= umap->writeoff + umap->writelen ||
    262 	     slot_offset + PAGE_SIZE >= vp->v_uvm.u_size - umap->offset)) {
    263 		UVMHIST_LOG(ubchist, "setting PGO_OVERWRITE", 0,0,0,0);
    264 		flags |= PGO_OVERWRITE;
    265 	}
    266 	else { UVMHIST_LOG(ubchist, "NOT setting PGO_OVERWRITE", 0,0,0,0); }
    267 	/* XXX be sure to zero any part of the page past EOF */
    268 
    269 	/*
    270 	 * XXX
    271 	 * ideally we'd like to pre-fault all of the pages we're overwriting.
    272 	 * so for PGO_OVERWRITE, we should call VOP_GETPAGES() with all of the
    273 	 * pages in [writeoff, writeoff+writesize] instead of just the one.
    274 	 */
    275 
    276 	UVMHIST_LOG(ubchist, "getpages vp %p offset 0x%x npages %d",
    277 		    uobj, umap->offset + slot_offset, npages, 0);
    278 
    279 	error = VOP_GETPAGES(vp, umap->offset + slot_offset, pgs, &npages, 0,
    280 	    access_type, 0, flags);
    281 	UVMHIST_LOG(ubchist, "getpages error %d npages %d", error, npages,0,0);
    282 
    283 	if (error == EAGAIN) {
    284 		tsleep(&lbolt, PVM, "ubc_fault", 0);
    285 		goto again;
    286 	}
    287 	if (error) {
    288 		return VM_PAGER_ERROR;
    289 	}
    290 	if (npages == 0) {
    291 		return VM_PAGER_OK;
    292 	}
    293 
    294 	va = ufi->orig_rvaddr;
    295 	eva = ufi->orig_rvaddr + (npages << PAGE_SHIFT);
    296 
    297 	UVMHIST_LOG(ubchist, "va 0x%lx eva 0x%lx", va, eva, 0,0);
    298 	simple_lock(&uobj->vmobjlock);
    299 	for (i = 0; va < eva; i++, va += PAGE_SIZE) {
    300 		UVMHIST_LOG(ubchist, "pgs[%d] = %p", i, pgs[i],0,0);
    301 		pg = pgs[i];
    302 
    303 		if (pg == NULL || pg == PGO_DONTCARE) {
    304 			continue;
    305 		}
    306 		if (pg->flags & PG_WANTED) {
    307 			wakeup(pg);
    308 		}
    309 		KASSERT((pg->flags & PG_FAKE) == 0);
    310 		if (pg->flags & PG_RELEASED) {
    311 			rv = uobj->pgops->pgo_releasepg(pg, NULL);
    312 			KASSERT(rv);
    313 			continue;
    314 		}
    315 		KASSERT(access_type == VM_PROT_READ ||
    316 			(pg->flags & PG_RDONLY) == 0);
    317 
    318 		uvm_lock_pageq();
    319 		uvm_pageactivate(pg);
    320 		uvm_unlock_pageq();
    321 
    322 		pmap_enter(ufi->orig_map->pmap, va, VM_PAGE_TO_PHYS(pg),
    323 			   VM_PROT_READ | VM_PROT_WRITE, access_type);
    324 
    325 		pg->flags &= ~(PG_BUSY);
    326 		UVM_PAGE_OWN(pg, NULL);
    327 	}
    328 	simple_unlock(&uobj->vmobjlock);
    329 	return VM_PAGER_OK;
    330 }
    331 
    332 /*
    333  * local functions
    334  */
    335 
    336 static struct ubc_map *
    337 ubc_find_mapping(uobj, offset)
    338 	struct uvm_object *uobj;
    339 	voff_t offset;
    340 {
    341 	struct ubc_map *umap;
    342 
    343 	LIST_FOREACH(umap, &ubc_object.hash[UBC_HASH(uobj, offset)], hash) {
    344 		if (umap->uobj == uobj && umap->offset == offset) {
    345 			return umap;
    346 		}
    347 	}
    348 	return NULL;
    349 }
    350 
    351 
    352 /*
    353  * ubc interface functions
    354  */
    355 
    356 /*
    357  * ubc_alloc:  allocate a buffer mapping
    358  */
    359 void *
    360 ubc_alloc(uobj, offset, lenp, flags)
    361 	struct uvm_object *uobj;
    362 	voff_t offset;
    363 	vsize_t *lenp;
    364 	int flags;
    365 {
    366 	int s;
    367 	vaddr_t slot_offset, va;
    368 	struct ubc_map *umap;
    369 	voff_t umap_offset;
    370 	UVMHIST_FUNC("ubc_alloc"); UVMHIST_CALLED(ubchist);
    371 
    372 	UVMHIST_LOG(ubchist, "uobj %p offset 0x%lx len 0x%lx filesize 0x%x",
    373 		    uobj, offset, *lenp, ((struct uvm_vnode *)uobj)->u_size);
    374 
    375 	umap_offset = (offset & ~((voff_t)ubc_winsize - 1));
    376 	slot_offset = (vaddr_t)(offset & ((voff_t)ubc_winsize - 1));
    377 	*lenp = min(*lenp, ubc_winsize - slot_offset);
    378 
    379 	/*
    380 	 * the vnode is always locked here, so we don't need to add a ref.
    381 	 */
    382 
    383 	s = splbio();
    384 
    385 again:
    386 	simple_lock(&ubc_object.uobj.vmobjlock);
    387 	umap = ubc_find_mapping(uobj, umap_offset);
    388 	if (umap == NULL) {
    389 		umap = TAILQ_FIRST(UBC_QUEUE(offset));
    390 		if (umap == NULL) {
    391 			simple_unlock(&ubc_object.uobj.vmobjlock);
    392 			tsleep(&lbolt, PVM, "ubc_alloc", 0);
    393 			goto again;
    394 		}
    395 
    396 		/*
    397 		 * remove from old hash (if any),
    398 		 * add to new hash.
    399 		 */
    400 
    401 		if (umap->uobj != NULL) {
    402 			LIST_REMOVE(umap, hash);
    403 		}
    404 
    405 		umap->uobj = uobj;
    406 		umap->offset = umap_offset;
    407 
    408 		LIST_INSERT_HEAD(&ubc_object.hash[UBC_HASH(uobj, umap_offset)],
    409 				 umap, hash);
    410 
    411 		va = (vaddr_t)(ubc_object.kva +
    412 			       (umap - ubc_object.umap) * ubc_winsize);
    413 		pmap_remove(pmap_kernel(), va, va + ubc_winsize);
    414 	}
    415 
    416 	if (umap->refcount == 0) {
    417 		TAILQ_REMOVE(UBC_QUEUE(offset), umap, inactive);
    418 	}
    419 
    420 #ifdef DIAGNOSTIC
    421 	if ((flags & UBC_WRITE) &&
    422 	    (umap->writeoff || umap->writelen)) {
    423 		panic("ubc_fault: concurrent writes vp %p", uobj);
    424 	}
    425 #endif
    426 	if (flags & UBC_WRITE) {
    427 		umap->writeoff = slot_offset;
    428 		umap->writelen = *lenp;
    429 	}
    430 
    431 	umap->refcount++;
    432 	simple_unlock(&ubc_object.uobj.vmobjlock);
    433 	splx(s);
    434 	UVMHIST_LOG(ubchist, "umap %p refs %d va %p",
    435 		    umap, umap->refcount,
    436 		    ubc_object.kva + (umap - ubc_object.umap) * ubc_winsize,0);
    437 
    438 	return ubc_object.kva +
    439 		(umap - ubc_object.umap) * ubc_winsize + slot_offset;
    440 }
    441 
    442 
    443 void
    444 ubc_release(va, wlen)
    445 	void *va;
    446 	vsize_t wlen;
    447 {
    448 	struct ubc_map *umap;
    449 	struct uvm_object *uobj;
    450 	int s;
    451 	UVMHIST_FUNC("ubc_release"); UVMHIST_CALLED(ubchist);
    452 
    453 	UVMHIST_LOG(ubchist, "va %p", va,0,0,0);
    454 
    455 	s = splbio();
    456 	simple_lock(&ubc_object.uobj.vmobjlock);
    457 
    458 	umap = &ubc_object.umap[((char *)va - ubc_object.kva) / ubc_winsize];
    459 	uobj = umap->uobj;
    460 	KASSERT(uobj != NULL);
    461 
    462 	umap->writeoff = 0;
    463 	umap->writelen = 0;
    464 	umap->refcount--;
    465 	if (umap->refcount == 0) {
    466 		if (UBC_RELEASE_UNMAP &&
    467 		    (((struct vnode *)uobj)->v_flag & VTEXT)) {
    468 			vaddr_t va;
    469 
    470 			/*
    471 			 * if this file is the executable image of
    472 			 * some process, that process will likely have
    473 			 * the file mapped at an alignment other than
    474 			 * what PMAP_PREFER() would like.  we'd like
    475 			 * to have process text be able to use the
    476 			 * cache even if someone is also reading the
    477 			 * file, so invalidate mappings of such files
    478 			 * as soon as possible.
    479 			 */
    480 
    481 			va = (vaddr_t)(ubc_object.kva +
    482 			    (umap - ubc_object.umap) * ubc_winsize);
    483 			pmap_remove(pmap_kernel(), va, va + ubc_winsize);
    484 			LIST_REMOVE(umap, hash);
    485 			umap->uobj = NULL;
    486 			TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap,
    487 			    inactive);
    488 		} else {
    489 			TAILQ_INSERT_TAIL(UBC_QUEUE(umap->offset), umap,
    490 			    inactive);
    491 		}
    492 	}
    493 	UVMHIST_LOG(ubchist, "umap %p refs %d", umap, umap->refcount,0,0);
    494 	simple_unlock(&ubc_object.uobj.vmobjlock);
    495 	splx(s);
    496 }
    497 
    498 
    499 /*
    500  * removing a range of mappings from the ubc mapping cache.
    501  */
    502 
    503 void
    504 ubc_flush(uobj, start, end)
    505 	struct uvm_object *uobj;
    506 	voff_t start, end;
    507 {
    508 	struct ubc_map *umap;
    509 	vaddr_t va;
    510 	int s;
    511 	UVMHIST_FUNC("ubc_flush");  UVMHIST_CALLED(ubchist);
    512 
    513 	UVMHIST_LOG(ubchist, "uobj %p start 0x%lx end 0x%lx",
    514 		    uobj, start, end,0);
    515 
    516 	s = splbio();
    517 	simple_lock(&ubc_object.uobj.vmobjlock);
    518 	for (umap = ubc_object.umap;
    519 	     umap < &ubc_object.umap[ubc_nwins];
    520 	     umap++) {
    521 
    522 		if (umap->uobj != uobj ||
    523 		    umap->offset < start ||
    524 		    (umap->offset >= end && end != 0) ||
    525 		    umap->refcount > 0) {
    526 			continue;
    527 		}
    528 
    529 		/*
    530 		 * remove from hash,
    531 		 * move to head of inactive queue.
    532 		 */
    533 
    534 		va = (vaddr_t)(ubc_object.kva +
    535 			       (umap - ubc_object.umap) * ubc_winsize);
    536 		pmap_remove(pmap_kernel(), va, va + ubc_winsize);
    537 
    538 		LIST_REMOVE(umap, hash);
    539 		umap->uobj = NULL;
    540 		TAILQ_REMOVE(UBC_QUEUE(umap->offset), umap, inactive);
    541 		TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap, inactive);
    542 	}
    543 	simple_unlock(&ubc_object.uobj.vmobjlock);
    544 	splx(s);
    545 }
    546