Home | History | Annotate | Line # | Download | only in uvm
uvm_bio.c revision 1.7
      1 /*	$NetBSD: uvm_bio.c,v 1.7 2001/02/02 01:55:52 enami Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1998 Chuck Silvers.
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  * 3. The name of the author may not be used to endorse or promote products
     16  *    derived from this software without specific prior written permission.
     17  *
     18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
     23  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
     24  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
     25  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
     26  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     28  * SUCH DAMAGE.
     29  *
     30  */
     31 
     32 #include "opt_uvmhist.h"
     33 
     34 /*
     35  * uvm_bio.c: buffered i/o vnode mapping cache
     36  */
     37 
     38 
     39 #include <sys/param.h>
     40 #include <sys/systm.h>
     41 #include <sys/malloc.h>
     42 #include <sys/kernel.h>
     43 #include <sys/vnode.h>
     44 
     45 #include <uvm/uvm.h>
     46 #include <uvm/uvm_page.h>
     47 
     48 /*
     49  * global data structures
     50  */
     51 
     52 /*
     53  * local functions
     54  */
     55 
     56 static int	ubc_fault __P((struct uvm_faultinfo *, vaddr_t,
     57 			       vm_page_t *, int, int, vm_fault_t, vm_prot_t,
     58 			       int));
     59 static struct ubc_map *ubc_find_mapping __P((struct uvm_object *, voff_t));
     60 
     61 /*
     62  * local data structues
     63  */
     64 
     65 #define UBC_HASH(uobj, offset) (((((u_long)(uobj)) >> 8) + \
     66 				 (((u_long)(offset)) >> PAGE_SHIFT)) & \
     67 				ubc_object.hashmask)
     68 
     69 #define UBC_QUEUE(offset) (&ubc_object.inactive[((offset) / ubc_winsize) & \
     70 					       (UBC_NQUEUES - 1)])
     71 
     72 struct ubc_map
     73 {
     74 	struct uvm_object *	uobj;		/* mapped object */
     75 	voff_t			offset;		/* offset into uobj */
     76 	int			refcount;	/* refcount on mapping */
     77 	voff_t			writeoff;	/* overwrite offset */
     78 	vsize_t			writelen;	/* overwrite len */
     79 
     80 	LIST_ENTRY(ubc_map)	hash;		/* hash table */
     81 	TAILQ_ENTRY(ubc_map)	inactive;	/* inactive queue */
     82 };
     83 
     84 static struct ubc_object
     85 {
     86 	struct uvm_object uobj;		/* glue for uvm_map() */
     87 	char *kva;			/* where ubc_object is mapped */
     88 	struct ubc_map *umap;		/* array of ubc_map's */
     89 
     90 	LIST_HEAD(, ubc_map) *hash;	/* hashtable for cached ubc_map's */
     91 	u_long hashmask;		/* mask for hashtable */
     92 
     93 	TAILQ_HEAD(ubc_inactive_head, ubc_map) *inactive;
     94 					/* inactive queues for ubc_map's */
     95 
     96 } ubc_object;
     97 
     98 struct uvm_pagerops ubc_pager =
     99 {
    100 	NULL,		/* init */
    101 	NULL,		/* reference */
    102 	NULL,		/* detach */
    103 	ubc_fault,	/* fault */
    104 	/* ... rest are NULL */
    105 };
    106 
    107 int ubc_nwins = UBC_NWINS;
    108 int ubc_winsize = UBC_WINSIZE;
    109 #ifdef PMAP_PREFER
    110 int ubc_nqueues;
    111 boolean_t ubc_release_unmap = FALSE;
    112 #define UBC_NQUEUES ubc_nqueues
    113 #define UBC_RELEASE_UNMAP ubc_release_unmap
    114 #else
    115 #define UBC_NQUEUES 1
    116 #define UBC_RELEASE_UNMAP FALSE
    117 #endif
    118 
    119 /*
    120  * ubc_init
    121  *
    122  * init pager private data structures.
    123  */
    124 
    125 void
    126 ubc_init(void)
    127 {
    128 	struct ubc_map *umap;
    129 	vaddr_t va;
    130 	int i;
    131 
    132 	/*
    133 	 * init ubc_object.
    134 	 * alloc and init ubc_map's.
    135 	 * init inactive queues.
    136 	 * alloc and init hashtable.
    137 	 * map in ubc_object.
    138 	 */
    139 
    140 	simple_lock_init(&ubc_object.uobj.vmobjlock);
    141 	ubc_object.uobj.pgops = &ubc_pager;
    142 	TAILQ_INIT(&ubc_object.uobj.memq);
    143 	ubc_object.uobj.uo_npages = 0;
    144 	ubc_object.uobj.uo_refs = UVM_OBJ_KERN;
    145 
    146 	ubc_object.umap = malloc(ubc_nwins * sizeof(struct ubc_map),
    147 				 M_TEMP, M_NOWAIT);
    148 	if (ubc_object.umap == NULL)
    149 		panic("ubc_init: failed to allocate ubc_map");
    150 	bzero(ubc_object.umap, ubc_nwins * sizeof(struct ubc_map));
    151 
    152 	va = (vaddr_t)1L;
    153 #ifdef PMAP_PREFER
    154 	PMAP_PREFER(0, &va);
    155 	if (va < ubc_winsize) {
    156 		va = ubc_winsize;
    157 	}
    158 	ubc_nqueues = va / ubc_winsize;
    159 	if (ubc_nqueues != 1) {
    160 		ubc_release_unmap = TRUE;
    161 	}
    162 #endif
    163 	ubc_object.inactive = malloc(UBC_NQUEUES *
    164 				     sizeof(struct ubc_inactive_head),
    165 				     M_TEMP, M_NOWAIT);
    166 	if (ubc_object.inactive == NULL)
    167 		panic("ubc_init: failed to allocate inactive queue heads");
    168 	for (i = 0; i < UBC_NQUEUES; i++) {
    169 		TAILQ_INIT(&ubc_object.inactive[i]);
    170 	}
    171 	for (i = 0; i < ubc_nwins; i++) {
    172 		umap = &ubc_object.umap[i];
    173 		TAILQ_INSERT_TAIL(&ubc_object.inactive[i & (UBC_NQUEUES - 1)],
    174 				  umap, inactive);
    175 	}
    176 
    177 	ubc_object.hash = hashinit(ubc_nwins, HASH_LIST, M_TEMP, M_NOWAIT,
    178 				   &ubc_object.hashmask);
    179 	for (i = 0; i <= ubc_object.hashmask; i++) {
    180 		LIST_INIT(&ubc_object.hash[i]);
    181 	}
    182 
    183 	if (uvm_map(kernel_map, (vaddr_t *)&ubc_object.kva,
    184 		    ubc_nwins * ubc_winsize, &ubc_object.uobj, 0, (vsize_t)va,
    185 		    UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
    186 				UVM_ADV_RANDOM, UVM_FLAG_NOMERGE))
    187 	    != KERN_SUCCESS) {
    188 		panic("ubc_init: failed to map ubc_object\n");
    189 	}
    190 	UVMHIST_INIT(ubchist, 300);
    191 }
    192 
    193 
    194 /*
    195  * ubc_fault: fault routine for ubc mapping
    196  */
    197 static int
    198 ubc_fault(ufi, ign1, ign2, ign3, ign4, fault_type, access_type, flags)
    199 	struct uvm_faultinfo *ufi;
    200 	vaddr_t ign1;
    201 	vm_page_t *ign2;
    202 	int ign3, ign4;
    203 	vm_fault_t fault_type;
    204 	vm_prot_t access_type;
    205 	int flags;
    206 {
    207 	struct uvm_object *uobj;
    208 	struct vnode *vp;
    209 	struct ubc_map *umap;
    210 	vaddr_t va, eva, ubc_offset, slot_offset;
    211 	int i, error, rv, npages;
    212 	struct vm_page *pgs[ubc_winsize >> PAGE_SHIFT], *pg;
    213 	UVMHIST_FUNC("ubc_fault");  UVMHIST_CALLED(ubchist);
    214 
    215 	/*
    216 	 * no need to try with PGO_LOCKED...
    217 	 * we don't need to have the map locked since we know that
    218 	 * no one will mess with it until our reference is released.
    219 	 */
    220 	if (flags & PGO_LOCKED) {
    221 #if 0
    222 		return VM_PAGER_UNLOCK;
    223 #else
    224 		uvmfault_unlockall(ufi, NULL, &ubc_object.uobj, NULL);
    225 		flags &= ~PGO_LOCKED;
    226 #endif
    227 	}
    228 
    229 	va = ufi->orig_rvaddr;
    230 	ubc_offset = va - (vaddr_t)ubc_object.kva;
    231 
    232 	UVMHIST_LOG(ubchist, "va 0x%lx ubc_offset 0x%lx at %d",
    233 		    va, ubc_offset, access_type,0);
    234 
    235 	umap = &ubc_object.umap[ubc_offset / ubc_winsize];
    236 	KASSERT(umap->refcount != 0);
    237 	slot_offset = trunc_page(ubc_offset & (ubc_winsize - 1));
    238 
    239 	/* no umap locking needed since we have a ref on the umap */
    240 	uobj = umap->uobj;
    241 	vp = (struct vnode *)uobj;
    242 	KASSERT(uobj != NULL);
    243 
    244 	npages = (ubc_winsize - slot_offset) >> PAGE_SHIFT;
    245 
    246 	/*
    247 	 * XXXUBC
    248 	 * if npages is more than 1 we have to be sure that
    249 	 * we set PGO_OVERWRITE correctly.
    250 	 */
    251 	if (access_type == VM_PROT_WRITE) {
    252 		npages = 1;
    253 	}
    254 
    255 again:
    256 	memset(pgs, 0, sizeof (pgs));
    257 	simple_lock(&uobj->vmobjlock);
    258 
    259 	UVMHIST_LOG(ubchist, "slot_offset 0x%x writeoff 0x%x writelen 0x%x "
    260 		    "u_size 0x%x", slot_offset, umap->writeoff, umap->writelen,
    261 		    vp->v_uvm.u_size);
    262 
    263 	if (access_type & VM_PROT_WRITE &&
    264 	    slot_offset >= umap->writeoff &&
    265 	    (slot_offset + PAGE_SIZE <= umap->writeoff + umap->writelen ||
    266 	     slot_offset + PAGE_SIZE >= vp->v_uvm.u_size - umap->offset)) {
    267 		UVMHIST_LOG(ubchist, "setting PGO_OVERWRITE", 0,0,0,0);
    268 		flags |= PGO_OVERWRITE;
    269 	}
    270 	else { UVMHIST_LOG(ubchist, "NOT setting PGO_OVERWRITE", 0,0,0,0); }
    271 	/* XXX be sure to zero any part of the page past EOF */
    272 
    273 	/*
    274 	 * XXX
    275 	 * ideally we'd like to pre-fault all of the pages we're overwriting.
    276 	 * so for PGO_OVERWRITE, we should call VOP_GETPAGES() with all of the
    277 	 * pages in [writeoff, writeoff+writesize] instead of just the one.
    278 	 */
    279 
    280 	UVMHIST_LOG(ubchist, "getpages vp %p offset 0x%x npages %d",
    281 		    uobj, umap->offset + slot_offset, npages, 0);
    282 
    283 	error = VOP_GETPAGES(vp, umap->offset + slot_offset, pgs, &npages, 0,
    284 	    access_type, 0, flags);
    285 	UVMHIST_LOG(ubchist, "getpages error %d npages %d", error, npages,0,0);
    286 
    287 	if (error == EAGAIN) {
    288 		tsleep(&lbolt, PVM, "ubc_fault", 0);
    289 		goto again;
    290 	}
    291 	if (error) {
    292 		return VM_PAGER_ERROR;
    293 	}
    294 	if (npages == 0) {
    295 		return VM_PAGER_OK;
    296 	}
    297 
    298 	va = ufi->orig_rvaddr;
    299 	eva = ufi->orig_rvaddr + (npages << PAGE_SHIFT);
    300 
    301 	UVMHIST_LOG(ubchist, "va 0x%lx eva 0x%lx", va, eva, 0,0);
    302 	simple_lock(&uobj->vmobjlock);
    303 	for (i = 0; va < eva; i++, va += PAGE_SIZE) {
    304 		UVMHIST_LOG(ubchist, "pgs[%d] = %p", i, pgs[i],0,0);
    305 		pg = pgs[i];
    306 
    307 		if (pg == NULL || pg == PGO_DONTCARE) {
    308 			continue;
    309 		}
    310 		if (pg->flags & PG_WANTED) {
    311 			wakeup(pg);
    312 		}
    313 		KASSERT((pg->flags & PG_FAKE) == 0);
    314 		if (pg->flags & PG_RELEASED) {
    315 			rv = uobj->pgops->pgo_releasepg(pg, NULL);
    316 			KASSERT(rv);
    317 			continue;
    318 		}
    319 		KASSERT(access_type == VM_PROT_READ ||
    320 			(pg->flags & PG_RDONLY) == 0);
    321 
    322 		uvm_lock_pageq();
    323 		uvm_pageactivate(pg);
    324 		uvm_unlock_pageq();
    325 
    326 		pmap_enter(ufi->orig_map->pmap, va, VM_PAGE_TO_PHYS(pg),
    327 			   VM_PROT_READ | VM_PROT_WRITE, access_type);
    328 
    329 		pg->flags &= ~(PG_BUSY);
    330 		UVM_PAGE_OWN(pg, NULL);
    331 	}
    332 	simple_unlock(&uobj->vmobjlock);
    333 	return VM_PAGER_OK;
    334 }
    335 
    336 /*
    337  * local functions
    338  */
    339 
    340 static struct ubc_map *
    341 ubc_find_mapping(uobj, offset)
    342 	struct uvm_object *uobj;
    343 	voff_t offset;
    344 {
    345 	struct ubc_map *umap;
    346 
    347 	LIST_FOREACH(umap, &ubc_object.hash[UBC_HASH(uobj, offset)], hash) {
    348 		if (umap->uobj == uobj && umap->offset == offset) {
    349 			return umap;
    350 		}
    351 	}
    352 	return NULL;
    353 }
    354 
    355 
    356 /*
    357  * ubc interface functions
    358  */
    359 
    360 /*
    361  * ubc_alloc:  allocate a buffer mapping
    362  */
    363 void *
    364 ubc_alloc(uobj, offset, lenp, flags)
    365 	struct uvm_object *uobj;
    366 	voff_t offset;
    367 	vsize_t *lenp;
    368 	int flags;
    369 {
    370 	int s;
    371 	vaddr_t slot_offset, va;
    372 	struct ubc_map *umap;
    373 	voff_t umap_offset;
    374 	UVMHIST_FUNC("ubc_alloc"); UVMHIST_CALLED(ubchist);
    375 
    376 	UVMHIST_LOG(ubchist, "uobj %p offset 0x%lx len 0x%lx filesize 0x%x",
    377 		    uobj, offset, *lenp, ((struct uvm_vnode *)uobj)->u_size);
    378 
    379 	umap_offset = (offset & ~((voff_t)ubc_winsize - 1));
    380 	slot_offset = (vaddr_t)(offset & ((voff_t)ubc_winsize - 1));
    381 	*lenp = min(*lenp, ubc_winsize - slot_offset);
    382 
    383 	/*
    384 	 * the vnode is always locked here, so we don't need to add a ref.
    385 	 */
    386 
    387 	s = splbio();
    388 
    389 again:
    390 	simple_lock(&ubc_object.uobj.vmobjlock);
    391 	umap = ubc_find_mapping(uobj, umap_offset);
    392 	if (umap == NULL) {
    393 		umap = TAILQ_FIRST(UBC_QUEUE(offset));
    394 		if (umap == NULL) {
    395 			simple_unlock(&ubc_object.uobj.vmobjlock);
    396 			tsleep(&lbolt, PVM, "ubc_alloc", 0);
    397 			goto again;
    398 		}
    399 
    400 		/*
    401 		 * remove from old hash (if any),
    402 		 * add to new hash.
    403 		 */
    404 
    405 		if (umap->uobj != NULL) {
    406 			LIST_REMOVE(umap, hash);
    407 		}
    408 
    409 		umap->uobj = uobj;
    410 		umap->offset = umap_offset;
    411 
    412 		LIST_INSERT_HEAD(&ubc_object.hash[UBC_HASH(uobj, umap_offset)],
    413 				 umap, hash);
    414 
    415 		va = (vaddr_t)(ubc_object.kva +
    416 			       (umap - ubc_object.umap) * ubc_winsize);
    417 		pmap_remove(pmap_kernel(), va, va + ubc_winsize);
    418 	}
    419 
    420 	if (umap->refcount == 0) {
    421 		TAILQ_REMOVE(UBC_QUEUE(offset), umap, inactive);
    422 	}
    423 
    424 #ifdef DIAGNOSTIC
    425 	if ((flags & UBC_WRITE) &&
    426 	    (umap->writeoff || umap->writelen)) {
    427 		panic("ubc_fault: concurrent writes vp %p", uobj);
    428 	}
    429 #endif
    430 	if (flags & UBC_WRITE) {
    431 		umap->writeoff = slot_offset;
    432 		umap->writelen = *lenp;
    433 	}
    434 
    435 	umap->refcount++;
    436 	simple_unlock(&ubc_object.uobj.vmobjlock);
    437 	splx(s);
    438 	UVMHIST_LOG(ubchist, "umap %p refs %d va %p",
    439 		    umap, umap->refcount,
    440 		    ubc_object.kva + (umap - ubc_object.umap) * ubc_winsize,0);
    441 
    442 	return ubc_object.kva +
    443 		(umap - ubc_object.umap) * ubc_winsize + slot_offset;
    444 }
    445 
    446 
    447 void
    448 ubc_release(va, wlen)
    449 	void *va;
    450 	vsize_t wlen;
    451 {
    452 	struct ubc_map *umap;
    453 	struct uvm_object *uobj;
    454 	int s;
    455 	UVMHIST_FUNC("ubc_release"); UVMHIST_CALLED(ubchist);
    456 
    457 	UVMHIST_LOG(ubchist, "va %p", va,0,0,0);
    458 
    459 	s = splbio();
    460 	simple_lock(&ubc_object.uobj.vmobjlock);
    461 
    462 	umap = &ubc_object.umap[((char *)va - ubc_object.kva) / ubc_winsize];
    463 	uobj = umap->uobj;
    464 	KASSERT(uobj != NULL);
    465 
    466 	umap->writeoff = 0;
    467 	umap->writelen = 0;
    468 	umap->refcount--;
    469 	if (umap->refcount == 0) {
    470 		if (UBC_RELEASE_UNMAP &&
    471 		    (((struct vnode *)uobj)->v_flag & VTEXT)) {
    472 			vaddr_t va;
    473 
    474 			/*
    475 			 * if this file is the executable image of
    476 			 * some process, that process will likely have
    477 			 * the file mapped at an alignment other than
    478 			 * what PMAP_PREFER() would like.  we'd like
    479 			 * to have process text be able to use the
    480 			 * cache even if someone is also reading the
    481 			 * file, so invalidate mappings of such files
    482 			 * as soon as possible.
    483 			 */
    484 
    485 			va = (vaddr_t)(ubc_object.kva +
    486 			    (umap - ubc_object.umap) * ubc_winsize);
    487 			pmap_remove(pmap_kernel(), va, va + ubc_winsize);
    488 			LIST_REMOVE(umap, hash);
    489 			umap->uobj = NULL;
    490 			TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap,
    491 			    inactive);
    492 		} else {
    493 			TAILQ_INSERT_TAIL(UBC_QUEUE(umap->offset), umap,
    494 			    inactive);
    495 		}
    496 	}
    497 	UVMHIST_LOG(ubchist, "umap %p refs %d", umap, umap->refcount,0,0);
    498 	simple_unlock(&ubc_object.uobj.vmobjlock);
    499 	splx(s);
    500 }
    501 
    502 
    503 /*
    504  * removing a range of mappings from the ubc mapping cache.
    505  */
    506 
    507 void
    508 ubc_flush(uobj, start, end)
    509 	struct uvm_object *uobj;
    510 	voff_t start, end;
    511 {
    512 	struct ubc_map *umap;
    513 	vaddr_t va;
    514 	int s;
    515 	UVMHIST_FUNC("ubc_flush");  UVMHIST_CALLED(ubchist);
    516 
    517 	UVMHIST_LOG(ubchist, "uobj %p start 0x%lx end 0x%lx",
    518 		    uobj, start, end,0);
    519 
    520 	s = splbio();
    521 	simple_lock(&ubc_object.uobj.vmobjlock);
    522 	for (umap = ubc_object.umap;
    523 	     umap < &ubc_object.umap[ubc_nwins];
    524 	     umap++) {
    525 
    526 		if (umap->uobj != uobj ||
    527 		    umap->offset < start ||
    528 		    (umap->offset >= end && end != 0) ||
    529 		    umap->refcount > 0) {
    530 			continue;
    531 		}
    532 
    533 		/*
    534 		 * remove from hash,
    535 		 * move to head of inactive queue.
    536 		 */
    537 
    538 		va = (vaddr_t)(ubc_object.kva +
    539 			       (umap - ubc_object.umap) * ubc_winsize);
    540 		pmap_remove(pmap_kernel(), va, va + ubc_winsize);
    541 
    542 		LIST_REMOVE(umap, hash);
    543 		umap->uobj = NULL;
    544 		TAILQ_REMOVE(UBC_QUEUE(umap->offset), umap, inactive);
    545 		TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap, inactive);
    546 	}
    547 	simple_unlock(&ubc_object.uobj.vmobjlock);
    548 	splx(s);
    549 }
    550