Home | History | Annotate | Line # | Download | only in uvm
uvm_vnode.c revision 1.97.2.8
      1 /*	$NetBSD: uvm_vnode.c,v 1.97.2.8 2012/10/30 17:23:03 yamt Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      5  * Copyright (c) 1991, 1993
      6  *      The Regents of the University of California.
      7  * Copyright (c) 1990 University of Utah.
      8  *
      9  * All rights reserved.
     10  *
     11  * This code is derived from software contributed to Berkeley by
     12  * the Systems Programming Group of the University of Utah Computer
     13  * Science Department.
     14  *
     15  * Redistribution and use in source and binary forms, with or without
     16  * modification, are permitted provided that the following conditions
     17  * are met:
     18  * 1. Redistributions of source code must retain the above copyright
     19  *    notice, this list of conditions and the following disclaimer.
     20  * 2. Redistributions in binary form must reproduce the above copyright
     21  *    notice, this list of conditions and the following disclaimer in the
     22  *    documentation and/or other materials provided with the distribution.
     23  * 3. Neither the name of the University nor the names of its contributors
     24  *    may be used to endorse or promote products derived from this software
     25  *    without specific prior written permission.
     26  *
     27  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     28  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     29  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     30  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     31  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     32  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     33  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     34  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     35  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     36  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     37  * SUCH DAMAGE.
     38  *
     39  *      @(#)vnode_pager.c       8.8 (Berkeley) 2/13/94
     40  * from: Id: uvm_vnode.c,v 1.1.2.26 1998/02/02 20:38:07 chuck Exp
     41  */
     42 
     43 /*
     44  * uvm_vnode.c: the vnode pager.
     45  */
     46 
     47 #include <sys/cdefs.h>
     48 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.97.2.8 2012/10/30 17:23:03 yamt Exp $");
     49 
     50 #include "opt_uvmhist.h"
     51 
     52 #include <sys/param.h>
     53 #include <sys/systm.h>
     54 #include <sys/kernel.h>
     55 #include <sys/vnode.h>
     56 #include <sys/disklabel.h>
     57 #include <sys/ioctl.h>
     58 #include <sys/fcntl.h>
     59 #include <sys/conf.h>
     60 #include <sys/pool.h>
     61 #include <sys/mount.h>
     62 
     63 #include <miscfs/specfs/specdev.h>
     64 
     65 #include <uvm/uvm.h>
     66 #include <uvm/uvm_readahead.h>
     67 #include <uvm/uvm_page_array.h>
     68 
     69 #ifdef UVMHIST
     70 UVMHIST_DEFINE(ubchist);
     71 #endif
     72 
     73 /*
     74  * functions
     75  */
     76 
     77 static void	uvn_detach(struct uvm_object *);
     78 static int	uvn_get(struct uvm_object *, voff_t, struct vm_page **, int *,
     79 			int, vm_prot_t, int, int);
     80 static int	uvn_put(struct uvm_object *, voff_t, voff_t, int);
     81 static void	uvn_reference(struct uvm_object *);
     82 
     83 static int	uvn_findpage(struct uvm_object *, voff_t, struct vm_page **,
     84 			     unsigned int, struct uvm_page_array *a,
     85 			     unsigned int);
     86 
     87 /*
     88  * master pager structure
     89  */
     90 
     91 const struct uvm_pagerops uvm_vnodeops = {
     92 	.pgo_reference = uvn_reference,
     93 	.pgo_detach = uvn_detach,
     94 	.pgo_get = uvn_get,
     95 	.pgo_put = uvn_put,
     96 };
     97 
     98 /*
     99  * the ops!
    100  */
    101 
    102 /*
    103  * uvn_reference
    104  *
    105  * duplicate a reference to a VM object.  Note that the reference
    106  * count must already be at least one (the passed in reference) so
    107  * there is no chance of the uvn being killed or locked out here.
    108  *
    109  * => caller must call with object unlocked.
    110  * => caller must be using the same accessprot as was used at attach time
    111  */
    112 
    113 static void
    114 uvn_reference(struct uvm_object *uobj)
    115 {
    116 	vref((struct vnode *)uobj);
    117 }
    118 
    119 
    120 /*
    121  * uvn_detach
    122  *
    123  * remove a reference to a VM object.
    124  *
    125  * => caller must call with object unlocked and map locked.
    126  */
    127 
    128 static void
    129 uvn_detach(struct uvm_object *uobj)
    130 {
    131 	vrele((struct vnode *)uobj);
    132 }
    133 
    134 /*
    135  * uvn_put: flush page data to backing store.
    136  *
    137  * => object must be locked on entry!   VOP_PUTPAGES must unlock it.
    138  * => flags: PGO_SYNCIO -- use sync. I/O
    139  */
    140 
    141 static int
    142 uvn_put(struct uvm_object *uobj, voff_t offlo, voff_t offhi, int flags)
    143 {
    144 	struct vnode *vp = (struct vnode *)uobj;
    145 	int error;
    146 
    147 	KASSERT(mutex_owned(vp->v_interlock));
    148 	error = VOP_PUTPAGES(vp, offlo, offhi, flags);
    149 
    150 	return error;
    151 }
    152 
    153 
    154 /*
    155  * uvn_get: get pages (synchronously) from backing store
    156  *
    157  * => prefer map unlocked (not required)
    158  * => object must be locked!  we will _unlock_ it before starting any I/O.
    159  * => flags: PGO_ALLPAGES: get all of the pages
    160  *           PGO_LOCKED: fault data structures are locked
    161  * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
    162  * => NOTE: caller must check for released pages!!
    163  */
    164 
    165 static int
    166 uvn_get(struct uvm_object *uobj, voff_t offset,
    167     struct vm_page **pps /* IN/OUT */,
    168     int *npagesp /* IN (OUT if PGO_LOCKED)*/,
    169     int centeridx, vm_prot_t access_type, int advice, int flags)
    170 {
    171 	struct vnode *vp = (struct vnode *)uobj;
    172 	int error;
    173 
    174 	UVMHIST_FUNC("uvn_get"); UVMHIST_CALLED(ubchist);
    175 
    176 	UVMHIST_LOG(ubchist, "vp %p off 0x%x", vp, (int)offset, 0,0);
    177 
    178 	if (vp->v_type == VREG && (access_type & VM_PROT_WRITE) == 0
    179 	    && (flags & PGO_LOCKED) == 0) {
    180 		vn_ra_allocctx(vp);
    181 		uvm_ra_request(vp->v_ractx, advice, uobj, offset,
    182 		    *npagesp << PAGE_SHIFT);
    183 	}
    184 
    185 	error = VOP_GETPAGES(vp, offset, pps, npagesp, centeridx,
    186 			     access_type, advice, flags);
    187 
    188 	KASSERT(((flags & PGO_LOCKED) != 0 && mutex_owned(vp->v_interlock)) ||
    189 	    (flags & PGO_LOCKED) == 0);
    190 	return error;
    191 }
    192 
    193 
    194 /*
    195  * uvn_findpages:
    196  * return the page for the uobj and offset requested, allocating if needed.
    197  * => uobj must be locked.
    198  * => returned pages will be BUSY.
    199  */
    200 
    201 int
    202 uvn_findpages(struct uvm_object *uobj, voff_t offset, unsigned int *npagesp,
    203     struct vm_page **pgs, struct uvm_page_array *a, unsigned int flags)
    204 {
    205 	unsigned int count, found, npages;
    206 	int i, rv;
    207 	struct uvm_page_array a_store;
    208 
    209 	if (a == NULL) {
    210 		a = &a_store;
    211 		uvm_page_array_init(a);
    212 	}
    213 	count = found = 0;
    214 	npages = *npagesp;
    215 	if (flags & UFP_BACKWARD) {
    216 		for (i = npages - 1; i >= 0; i--, offset -= PAGE_SIZE) {
    217 			rv = uvn_findpage(uobj, offset, &pgs[i], flags, a,
    218 			    i + 1);
    219 			if (rv == 0) {
    220 				if (flags & UFP_DIRTYONLY)
    221 					break;
    222 			} else
    223 				found++;
    224 			count++;
    225 		}
    226 	} else {
    227 		for (i = 0; i < npages; i++, offset += PAGE_SIZE) {
    228 			rv = uvn_findpage(uobj, offset, &pgs[i], flags, a,
    229 			    npages - i);
    230 			if (rv == 0) {
    231 				if (flags & UFP_DIRTYONLY)
    232 					break;
    233 			} else
    234 				found++;
    235 			count++;
    236 		}
    237 	}
    238 	if (a == &a_store) {
    239 		uvm_page_array_fini(a);
    240 	}
    241 	*npagesp = count;
    242 	return (found);
    243 }
    244 
    245 static int
    246 uvn_findpage(struct uvm_object *uobj, voff_t offset, struct vm_page **pgp,
    247     unsigned int flags, struct uvm_page_array *a, unsigned int nleft)
    248 {
    249 	struct vm_page *pg;
    250 	const unsigned int fillflags =
    251 	    ((flags & UFP_BACKWARD) ? UVM_PAGE_ARRAY_FILL_BACKWARD : 0) |
    252 	    ((flags & UFP_DIRTYONLY) ?
    253 	    (UVM_PAGE_ARRAY_FILL_DIRTY|UVM_PAGE_ARRAY_FILL_DENSE) : 0);
    254 	UVMHIST_FUNC("uvn_findpage"); UVMHIST_CALLED(ubchist);
    255 	UVMHIST_LOG(ubchist, "vp %p off 0x%lx", uobj, offset,0,0);
    256 
    257 	KASSERT(mutex_owned(uobj->vmobjlock));
    258 
    259 	if (*pgp != NULL) {
    260 		UVMHIST_LOG(ubchist, "dontcare", 0,0,0,0);
    261 		goto skip_offset;
    262 	}
    263 	for (;;) {
    264 		/*
    265 		 * look for an existing page.
    266 		 *
    267 		 * XXX fragile API
    268 		 * note that the array can be the one supplied by the caller of
    269 		 * uvn_findpages.  in that case, fillflags used by the caller
    270 		 * might not match strictly with ours.
    271 		 * in particular, the caller might have filled the array
    272 		 * without DIRTYONLY or DENSE but passed us UFP_DIRTYONLY.
    273 		 */
    274 		pg = uvm_page_array_fill_and_peek(a, uobj, offset, nleft,
    275 		    fillflags);
    276 		if (pg != NULL && pg->offset != offset) {
    277 			KASSERT(
    278 			    ((fillflags & UVM_PAGE_ARRAY_FILL_BACKWARD) != 0)
    279 			    == (pg->offset < offset));
    280 			pg = NULL;
    281 		}
    282 
    283 		/* nope?  allocate one now */
    284 		if (pg == NULL) {
    285 			if (flags & UFP_NOALLOC) {
    286 				UVMHIST_LOG(ubchist, "noalloc", 0,0,0,0);
    287 				return 0;
    288 			}
    289 			pg = uvm_pagealloc(uobj, offset, NULL,
    290 			    UVM_FLAG_COLORMATCH);
    291 			if (pg == NULL) {
    292 				if (flags & UFP_NOWAIT) {
    293 					UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
    294 					return 0;
    295 				}
    296 				mutex_exit(uobj->vmobjlock);
    297 				uvm_wait("uvn_fp1");
    298 				uvm_page_array_clear(a);
    299 				mutex_enter(uobj->vmobjlock);
    300 				continue;
    301 			}
    302 			UVMHIST_LOG(ubchist, "alloced %p (color %u)", pg,
    303 			    VM_PGCOLOR_BUCKET(pg), 0,0);
    304 			break;
    305 		} else if (flags & UFP_NOCACHE) {
    306 			UVMHIST_LOG(ubchist, "nocache",0,0,0,0);
    307 			goto skip;
    308 		}
    309 
    310 		/* page is there, see if we need to wait on it */
    311 		if ((pg->flags & PG_BUSY) != 0) {
    312 			if (flags & UFP_NOWAIT) {
    313 				UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
    314 				goto skip;
    315 			}
    316 			pg->flags |= PG_WANTED;
    317 			UVMHIST_LOG(ubchist, "wait %p (color %u)", pg,
    318 			    VM_PGCOLOR_BUCKET(pg), 0,0);
    319 			UVM_UNLOCK_AND_WAIT(pg, uobj->vmobjlock, 0,
    320 					    "uvn_fp2", 0);
    321 			uvm_page_array_clear(a);
    322 			mutex_enter(uobj->vmobjlock);
    323 			continue;
    324 		}
    325 
    326 		/* skip PG_RDONLY pages if requested */
    327 		if ((flags & UFP_NORDONLY) != 0 &&
    328 		    (pg->flags & PG_RDONLY) != 0) {
    329 			UVMHIST_LOG(ubchist, "nordonly",0,0,0,0);
    330 			goto skip;
    331 		}
    332 
    333 		/*
    334 		 * check for PG_PAGER1 requests
    335 		 */
    336 		if ((flags & UFP_NOPAGER1) != 0 &&
    337 		    (pg->flags & PG_PAGER1) != 0) {
    338 			UVMHIST_LOG(ubchist, "nopager1",0,0,0,0);
    339 			goto skip;
    340 		}
    341 		if ((flags & UFP_ONLYPAGER1) != 0 &&
    342 		    (pg->flags & PG_PAGER1) == 0) {
    343 			UVMHIST_LOG(ubchist, "onlypager1",0,0,0,0);
    344 			goto skip;
    345 		}
    346 
    347 		/* stop on clean pages if requested */
    348 		if (flags & UFP_DIRTYONLY) {
    349 			const bool dirty = uvm_pagecheckdirty(pg, false);
    350 
    351 			if (!dirty) {
    352 				UVMHIST_LOG(ubchist, "dirtonly", 0,0,0,0);
    353 				return 0;
    354 			}
    355 		}
    356 
    357 		/* mark the page BUSY and we're done. */
    358 		pg->flags |= PG_BUSY;
    359 		UVM_PAGE_OWN(pg, "uvn_findpage");
    360 		UVMHIST_LOG(ubchist, "found %p (color %u)",
    361 		    pg, VM_PGCOLOR_BUCKET(pg), 0,0);
    362 		uvm_page_array_advance(a);
    363 		break;
    364 	}
    365 	*pgp = pg;
    366 	return 1;
    367 
    368 skip_offset:
    369 	/*
    370 	 * skip this offset
    371 	 */
    372 	pg = uvm_page_array_peek(a);
    373 	if (pg != NULL) {
    374 		if (pg->offset == offset) {
    375 			uvm_page_array_advance(a);
    376 		} else {
    377 			KASSERT((fillflags & UVM_PAGE_ARRAY_FILL_DENSE) == 0);
    378 		}
    379 	}
    380 	return 0;
    381 
    382 skip:
    383 	/*
    384 	 * skip this page
    385 	 */
    386 	KASSERT(pg != NULL);
    387 	uvm_page_array_advance(a);
    388 	return 0;
    389 }
    390 
    391 /*
    392  * uvm_vnp_setsize: grow or shrink a vnode uobj
    393  *
    394  * grow   => just update size value
    395  * shrink => toss un-needed pages
    396  *
    397  * => we assume that the caller has a reference of some sort to the
    398  *	vnode in question so that it will not be yanked out from under
    399  *	us.
    400  */
    401 
    402 void
    403 uvm_vnp_setsize(struct vnode *vp, voff_t newsize)
    404 {
    405 	struct uvm_object *uobj = &vp->v_uobj;
    406 	voff_t pgend = round_page(newsize);
    407 	voff_t oldsize;
    408 	UVMHIST_FUNC("uvm_vnp_setsize"); UVMHIST_CALLED(ubchist);
    409 
    410 	mutex_enter(uobj->vmobjlock);
    411 	UVMHIST_LOG(ubchist, "vp %p old 0x%x new 0x%x",
    412 	    vp, vp->v_size, newsize, 0);
    413 
    414 	/*
    415 	 * now check if the size has changed: if we shrink we had better
    416 	 * toss some pages...
    417 	 */
    418 
    419 	KASSERT(newsize != VSIZENOTSET);
    420 	KASSERT(vp->v_size <= vp->v_writesize);
    421 	KASSERT(vp->v_size == vp->v_writesize ||
    422 	    newsize == vp->v_writesize || newsize <= vp->v_size);
    423 
    424 	oldsize = vp->v_writesize;
    425 	KASSERT(oldsize != VSIZENOTSET || pgend > oldsize);
    426 
    427 	if (oldsize > pgend) {
    428 		(void) uvn_put(uobj, pgend, 0, PGO_FREE | PGO_SYNCIO);
    429 		mutex_enter(uobj->vmobjlock);
    430 	}
    431 	vp->v_size = vp->v_writesize = newsize;
    432 	mutex_exit(uobj->vmobjlock);
    433 }
    434 
    435 void
    436 uvm_vnp_setwritesize(struct vnode *vp, voff_t newsize)
    437 {
    438 
    439 	mutex_enter(vp->v_interlock);
    440 	KASSERT(newsize != VSIZENOTSET);
    441 	KASSERT(vp->v_size != VSIZENOTSET);
    442 	KASSERT(vp->v_writesize != VSIZENOTSET);
    443 	KASSERT(vp->v_size <= vp->v_writesize);
    444 	KASSERT(vp->v_size <= newsize);
    445 	vp->v_writesize = newsize;
    446 	mutex_exit(vp->v_interlock);
    447 }
    448 
    449 bool
    450 uvn_text_p(struct uvm_object *uobj)
    451 {
    452 	struct vnode *vp = (struct vnode *)uobj;
    453 
    454 	return (vp->v_iflag & VI_EXECMAP) != 0;
    455 }
    456 
    457 bool
    458 uvn_clean_p(struct uvm_object *uobj)
    459 {
    460 	struct vnode *vp = (struct vnode *)uobj;
    461 
    462 	return (vp->v_iflag & VI_ONWORKLST) == 0;
    463 }
    464 
    465 bool
    466 uvn_needs_writefault_p(struct uvm_object *uobj)
    467 {
    468 	struct vnode *vp = (struct vnode *)uobj;
    469 
    470 	return uvn_clean_p(uobj) ||
    471 	    (vp->v_iflag & (VI_WRMAP|VI_WRMAPDIRTY)) == VI_WRMAP;
    472 }
    473