Home | History | Annotate | Line # | Download | only in uvm
uvm_vnode.c revision 1.93.4.3
      1 /*	$NetBSD: uvm_vnode.c,v 1.93.4.3 2011/03/05 20:56:38 rmind Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      5  * Copyright (c) 1991, 1993
      6  *      The Regents of the University of California.
      7  * Copyright (c) 1990 University of Utah.
      8  *
      9  * All rights reserved.
     10  *
     11  * This code is derived from software contributed to Berkeley by
     12  * the Systems Programming Group of the University of Utah Computer
     13  * Science Department.
     14  *
     15  * Redistribution and use in source and binary forms, with or without
     16  * modification, are permitted provided that the following conditions
     17  * are met:
     18  * 1. Redistributions of source code must retain the above copyright
     19  *    notice, this list of conditions and the following disclaimer.
     20  * 2. Redistributions in binary form must reproduce the above copyright
     21  *    notice, this list of conditions and the following disclaimer in the
     22  *    documentation and/or other materials provided with the distribution.
     23  * 3. Neither the name of the University nor the names of its contributors
     24  *    may be used to endorse or promote products derived from this software
     25  *    without specific prior written permission.
     26  *
     27  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     28  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     29  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     30  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     31  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     32  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     33  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     34  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     35  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     36  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     37  * SUCH DAMAGE.
     38  *
     39  *      @(#)vnode_pager.c       8.8 (Berkeley) 2/13/94
     40  * from: Id: uvm_vnode.c,v 1.1.2.26 1998/02/02 20:38:07 chuck Exp
     41  */
     42 
     43 /*
     44  * uvm_vnode.c: the vnode pager.
     45  */
     46 
     47 #include <sys/cdefs.h>
     48 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.93.4.3 2011/03/05 20:56:38 rmind Exp $");
     49 
     50 #include "opt_uvmhist.h"
     51 
     52 #include <sys/param.h>
     53 #include <sys/systm.h>
     54 #include <sys/kernel.h>
     55 #include <sys/proc.h>
     56 #include <sys/malloc.h>
     57 #include <sys/vnode.h>
     58 #include <sys/disklabel.h>
     59 #include <sys/ioctl.h>
     60 #include <sys/fcntl.h>
     61 #include <sys/conf.h>
     62 #include <sys/pool.h>
     63 #include <sys/mount.h>
     64 
     65 #include <miscfs/specfs/specdev.h>
     66 
     67 #include <uvm/uvm.h>
     68 #include <uvm/uvm_readahead.h>
     69 
     70 /*
     71  * functions
     72  */
     73 
     74 static void	uvn_detach(struct uvm_object *);
     75 static int	uvn_get(struct uvm_object *, voff_t, struct vm_page **, int *,
     76 			int, vm_prot_t, int, int);
     77 static int	uvn_put(struct uvm_object *, voff_t, voff_t, int);
     78 static void	uvn_reference(struct uvm_object *);
     79 
     80 static int	uvn_findpage(struct uvm_object *, voff_t, struct vm_page **,
     81 			     int);
     82 
     83 /*
     84  * master pager structure
     85  */
     86 
     87 const struct uvm_pagerops uvm_vnodeops = {
     88 	.pgo_reference = uvn_reference,
     89 	.pgo_detach = uvn_detach,
     90 	.pgo_get = uvn_get,
     91 	.pgo_put = uvn_put,
     92 };
     93 
     94 /*
     95  * the ops!
     96  */
     97 
     98 /*
     99  * uvn_reference
    100  *
    101  * duplicate a reference to a VM object.  Note that the reference
    102  * count must already be at least one (the passed in reference) so
    103  * there is no chance of the uvn being killed or locked out here.
    104  *
    105  * => caller must call with object unlocked.
    106  * => caller must be using the same accessprot as was used at attach time
    107  */
    108 
    109 static void
    110 uvn_reference(struct uvm_object *uobj)
    111 {
    112 	vref((struct vnode *)uobj);
    113 }
    114 
    115 
    116 /*
    117  * uvn_detach
    118  *
    119  * remove a reference to a VM object.
    120  *
    121  * => caller must call with object unlocked and map locked.
    122  */
    123 
    124 static void
    125 uvn_detach(struct uvm_object *uobj)
    126 {
    127 	vrele((struct vnode *)uobj);
    128 }
    129 
    130 /*
    131  * uvn_put: flush page data to backing store.
    132  *
    133  * => object must be locked on entry!   VOP_PUTPAGES must unlock it.
    134  * => flags: PGO_SYNCIO -- use sync. I/O
    135  * => note: caller must set PG_CLEAN and pmap_clear_modify (if needed)
    136  */
    137 
    138 static int
    139 uvn_put(struct uvm_object *uobj, voff_t offlo, voff_t offhi, int flags)
    140 {
    141 	struct vnode *vp = (struct vnode *)uobj;
    142 	int error;
    143 
    144 	KASSERT(mutex_owned(vp->v_interlock));
    145 	error = VOP_PUTPAGES(vp, offlo, offhi, flags);
    146 
    147 	return error;
    148 }
    149 
    150 
    151 /*
    152  * uvn_get: get pages (synchronously) from backing store
    153  *
    154  * => prefer map unlocked (not required)
    155  * => object must be locked!  we will _unlock_ it before starting any I/O.
    156  * => flags: PGO_ALLPAGES: get all of the pages
    157  *           PGO_LOCKED: fault data structures are locked
    158  * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
    159  * => NOTE: caller must check for released pages!!
    160  */
    161 
    162 static int
    163 uvn_get(struct uvm_object *uobj, voff_t offset,
    164     struct vm_page **pps /* IN/OUT */,
    165     int *npagesp /* IN (OUT if PGO_LOCKED)*/,
    166     int centeridx, vm_prot_t access_type, int advice, int flags)
    167 {
    168 	struct vnode *vp = (struct vnode *)uobj;
    169 	int error;
    170 
    171 	UVMHIST_FUNC("uvn_get"); UVMHIST_CALLED(ubchist);
    172 
    173 	UVMHIST_LOG(ubchist, "vp %p off 0x%x", vp, (int)offset, 0,0);
    174 
    175 	if ((access_type & VM_PROT_WRITE) == 0 && (flags & PGO_LOCKED) == 0) {
    176 		vn_ra_allocctx(vp);
    177 		uvm_ra_request(vp->v_ractx, advice, uobj, offset,
    178 		    *npagesp << PAGE_SHIFT);
    179 	}
    180 
    181 	error = VOP_GETPAGES(vp, offset, pps, npagesp, centeridx,
    182 			     access_type, advice, flags);
    183 
    184 	KASSERT(((flags & PGO_LOCKED) != 0 && mutex_owned(vp->v_interlock)) ||
    185 	    (flags & PGO_LOCKED) == 0);
    186 	return error;
    187 }
    188 
    189 
    190 /*
    191  * uvn_findpages:
    192  * return the page for the uobj and offset requested, allocating if needed.
    193  * => uobj must be locked.
    194  * => returned pages will be BUSY.
    195  */
    196 
    197 int
    198 uvn_findpages(struct uvm_object *uobj, voff_t offset, int *npagesp,
    199     struct vm_page **pgs, int flags)
    200 {
    201 	int i, count, found, npages, rv;
    202 
    203 	count = found = 0;
    204 	npages = *npagesp;
    205 	if (flags & UFP_BACKWARD) {
    206 		for (i = npages - 1; i >= 0; i--, offset -= PAGE_SIZE) {
    207 			rv = uvn_findpage(uobj, offset, &pgs[i], flags);
    208 			if (rv == 0) {
    209 				if (flags & UFP_DIRTYONLY)
    210 					break;
    211 			} else
    212 				found++;
    213 			count++;
    214 		}
    215 	} else {
    216 		for (i = 0; i < npages; i++, offset += PAGE_SIZE) {
    217 			rv = uvn_findpage(uobj, offset, &pgs[i], flags);
    218 			if (rv == 0) {
    219 				if (flags & UFP_DIRTYONLY)
    220 					break;
    221 			} else
    222 				found++;
    223 			count++;
    224 		}
    225 	}
    226 	*npagesp = count;
    227 	return (found);
    228 }
    229 
    230 static int
    231 uvn_findpage(struct uvm_object *uobj, voff_t offset, struct vm_page **pgp,
    232     int flags)
    233 {
    234 	struct vm_page *pg;
    235 	bool dirty;
    236 	UVMHIST_FUNC("uvn_findpage"); UVMHIST_CALLED(ubchist);
    237 	UVMHIST_LOG(ubchist, "vp %p off 0x%lx", uobj, offset,0,0);
    238 
    239 	KASSERT(mutex_owned(uobj->vmobjlock));
    240 
    241 	if (*pgp != NULL) {
    242 		UVMHIST_LOG(ubchist, "dontcare", 0,0,0,0);
    243 		return 0;
    244 	}
    245 	for (;;) {
    246 		/* look for an existing page */
    247 		pg = uvm_pagelookup(uobj, offset);
    248 
    249 		/* nope?  allocate one now */
    250 		if (pg == NULL) {
    251 			if (flags & UFP_NOALLOC) {
    252 				UVMHIST_LOG(ubchist, "noalloc", 0,0,0,0);
    253 				return 0;
    254 			}
    255 			pg = uvm_pagealloc(uobj, offset, NULL, 0);
    256 			if (pg == NULL) {
    257 				if (flags & UFP_NOWAIT) {
    258 					UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
    259 					return 0;
    260 				}
    261 				mutex_exit(uobj->vmobjlock);
    262 				uvm_wait("uvn_fp1");
    263 				mutex_enter(uobj->vmobjlock);
    264 				continue;
    265 			}
    266 			UVMHIST_LOG(ubchist, "alloced %p", pg,0,0,0);
    267 			break;
    268 		} else if (flags & UFP_NOCACHE) {
    269 			UVMHIST_LOG(ubchist, "nocache",0,0,0,0);
    270 			return 0;
    271 		}
    272 
    273 		/* page is there, see if we need to wait on it */
    274 		if ((pg->flags & PG_BUSY) != 0) {
    275 			if (flags & UFP_NOWAIT) {
    276 				UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
    277 				return 0;
    278 			}
    279 			pg->flags |= PG_WANTED;
    280 			UVMHIST_LOG(ubchist, "wait %p", pg,0,0,0);
    281 			UVM_UNLOCK_AND_WAIT(pg, uobj->vmobjlock, 0,
    282 					    "uvn_fp2", 0);
    283 			mutex_enter(uobj->vmobjlock);
    284 			continue;
    285 		}
    286 
    287 		/* skip PG_RDONLY pages if requested */
    288 		if ((flags & UFP_NORDONLY) && (pg->flags & PG_RDONLY)) {
    289 			UVMHIST_LOG(ubchist, "nordonly",0,0,0,0);
    290 			return 0;
    291 		}
    292 
    293 		/* stop on clean pages if requested */
    294 		if (flags & UFP_DIRTYONLY) {
    295 			dirty = pmap_clear_modify(pg) ||
    296 				(pg->flags & PG_CLEAN) == 0;
    297 			pg->flags |= PG_CLEAN;
    298 			if (!dirty) {
    299 				UVMHIST_LOG(ubchist, "dirtonly", 0,0,0,0);
    300 				return 0;
    301 			}
    302 		}
    303 
    304 		/* mark the page BUSY and we're done. */
    305 		pg->flags |= PG_BUSY;
    306 		UVM_PAGE_OWN(pg, "uvn_findpage");
    307 		UVMHIST_LOG(ubchist, "found %p", pg,0,0,0);
    308 		break;
    309 	}
    310 	*pgp = pg;
    311 	return 1;
    312 }
    313 
    314 /*
    315  * uvm_vnp_setsize: grow or shrink a vnode uobj
    316  *
    317  * grow   => just update size value
    318  * shrink => toss un-needed pages
    319  *
    320  * => we assume that the caller has a reference of some sort to the
    321  *	vnode in question so that it will not be yanked out from under
    322  *	us.
    323  */
    324 
    325 void
    326 uvm_vnp_setsize(struct vnode *vp, voff_t newsize)
    327 {
    328 	struct uvm_object *uobj = &vp->v_uobj;
    329 	voff_t pgend = round_page(newsize);
    330 	voff_t oldsize;
    331 	UVMHIST_FUNC("uvm_vnp_setsize"); UVMHIST_CALLED(ubchist);
    332 
    333 	mutex_enter(uobj->vmobjlock);
    334 	UVMHIST_LOG(ubchist, "vp %p old 0x%x new 0x%x",
    335 	    vp, vp->v_size, newsize, 0);
    336 
    337 	/*
    338 	 * now check if the size has changed: if we shrink we had better
    339 	 * toss some pages...
    340 	 */
    341 
    342 	KASSERT(newsize != VSIZENOTSET);
    343 	KASSERT(vp->v_size <= vp->v_writesize);
    344 	KASSERT(vp->v_size == vp->v_writesize ||
    345 	    newsize == vp->v_writesize || newsize <= vp->v_size);
    346 
    347 	oldsize = vp->v_writesize;
    348 	KASSERT(oldsize != VSIZENOTSET || pgend > oldsize);
    349 
    350 	if (oldsize > pgend) {
    351 		(void) uvn_put(uobj, pgend, 0, PGO_FREE | PGO_SYNCIO);
    352 		mutex_enter(uobj->vmobjlock);
    353 	}
    354 	vp->v_size = vp->v_writesize = newsize;
    355 	mutex_exit(uobj->vmobjlock);
    356 }
    357 
    358 void
    359 uvm_vnp_setwritesize(struct vnode *vp, voff_t newsize)
    360 {
    361 
    362 	mutex_enter(vp->v_interlock);
    363 	KASSERT(newsize != VSIZENOTSET);
    364 	KASSERT(vp->v_size != VSIZENOTSET);
    365 	KASSERT(vp->v_writesize != VSIZENOTSET);
    366 	KASSERT(vp->v_size <= vp->v_writesize);
    367 	KASSERT(vp->v_size <= newsize);
    368 	vp->v_writesize = newsize;
    369 	mutex_exit(vp->v_interlock);
    370 }
    371 
    372 bool
    373 uvn_text_p(struct uvm_object *uobj)
    374 {
    375 	struct vnode *vp = (struct vnode *)uobj;
    376 
    377 	return (vp->v_iflag & VI_EXECMAP) != 0;
    378 }
    379 
    380 bool
    381 uvn_clean_p(struct uvm_object *uobj)
    382 {
    383 	struct vnode *vp = (struct vnode *)uobj;
    384 
    385 	return (vp->v_iflag & VI_ONWORKLST) == 0;
    386 }
    387 
    388 bool
    389 uvn_needs_writefault_p(struct uvm_object *uobj)
    390 {
    391 	struct vnode *vp = (struct vnode *)uobj;
    392 
    393 	return uvn_clean_p(uobj) ||
    394 	    (vp->v_iflag & (VI_WRMAP|VI_WRMAPDIRTY)) == VI_WRMAP;
    395 }
    396