Home | History | Annotate | Line # | Download | only in uvm
uvm_vnode.c revision 1.106
      1 /*	$NetBSD: uvm_vnode.c,v 1.106 2020/02/23 15:46:43 ad Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      5  * Copyright (c) 1991, 1993
      6  *      The Regents of the University of California.
      7  * Copyright (c) 1990 University of Utah.
      8  *
      9  * All rights reserved.
     10  *
     11  * This code is derived from software contributed to Berkeley by
     12  * the Systems Programming Group of the University of Utah Computer
     13  * Science Department.
     14  *
     15  * Redistribution and use in source and binary forms, with or without
     16  * modification, are permitted provided that the following conditions
     17  * are met:
     18  * 1. Redistributions of source code must retain the above copyright
     19  *    notice, this list of conditions and the following disclaimer.
     20  * 2. Redistributions in binary form must reproduce the above copyright
     21  *    notice, this list of conditions and the following disclaimer in the
     22  *    documentation and/or other materials provided with the distribution.
     23  * 3. Neither the name of the University nor the names of its contributors
     24  *    may be used to endorse or promote products derived from this software
     25  *    without specific prior written permission.
     26  *
     27  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     28  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     29  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     30  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     31  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     32  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     33  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     34  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     35  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     36  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     37  * SUCH DAMAGE.
     38  *
     39  *      @(#)vnode_pager.c       8.8 (Berkeley) 2/13/94
     40  * from: Id: uvm_vnode.c,v 1.1.2.26 1998/02/02 20:38:07 chuck Exp
     41  */
     42 
     43 /*
     44  * uvm_vnode.c: the vnode pager.
     45  */
     46 
     47 #include <sys/cdefs.h>
     48 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.106 2020/02/23 15:46:43 ad Exp $");
     49 
     50 #ifdef _KERNEL_OPT
     51 #include "opt_uvmhist.h"
     52 #endif
     53 
     54 #include <sys/param.h>
     55 #include <sys/systm.h>
     56 #include <sys/kernel.h>
     57 #include <sys/vnode.h>
     58 #include <sys/disklabel.h>
     59 #include <sys/ioctl.h>
     60 #include <sys/fcntl.h>
     61 #include <sys/conf.h>
     62 #include <sys/pool.h>
     63 #include <sys/mount.h>
     64 
     65 #include <miscfs/specfs/specdev.h>
     66 
     67 #include <uvm/uvm.h>
     68 #include <uvm/uvm_readahead.h>
     69 #include <uvm/uvm_page_array.h>
     70 
     71 #ifdef UVMHIST
     72 UVMHIST_DEFINE(ubchist);
     73 #endif
     74 
     75 /*
     76  * functions
     77  */
     78 
     79 static void	uvn_alloc_ractx(struct uvm_object *);
     80 static void	uvn_detach(struct uvm_object *);
     81 static int	uvn_get(struct uvm_object *, voff_t, struct vm_page **, int *,
     82 			int, vm_prot_t, int, int);
     83 static int	uvn_put(struct uvm_object *, voff_t, voff_t, int);
     84 static void	uvn_reference(struct uvm_object *);
     85 
     86 static int	uvn_findpage(struct uvm_object *, voff_t, struct vm_page **,
     87 			     unsigned int, struct uvm_page_array *a,
     88 			     unsigned int);
     89 
     90 /*
     91  * master pager structure
     92  */
     93 
     94 const struct uvm_pagerops uvm_vnodeops = {
     95 	.pgo_reference = uvn_reference,
     96 	.pgo_detach = uvn_detach,
     97 	.pgo_get = uvn_get,
     98 	.pgo_put = uvn_put,
     99 };
    100 
    101 /*
    102  * the ops!
    103  */
    104 
    105 /*
    106  * uvn_reference
    107  *
    108  * duplicate a reference to a VM object.  Note that the reference
    109  * count must already be at least one (the passed in reference) so
    110  * there is no chance of the uvn being killed or locked out here.
    111  *
    112  * => caller must call with object unlocked.
    113  * => caller must be using the same accessprot as was used at attach time
    114  */
    115 
    116 static void
    117 uvn_reference(struct uvm_object *uobj)
    118 {
    119 	vref((struct vnode *)uobj);
    120 }
    121 
    122 
    123 /*
    124  * uvn_detach
    125  *
    126  * remove a reference to a VM object.
    127  *
    128  * => caller must call with object unlocked and map locked.
    129  */
    130 
    131 static void
    132 uvn_detach(struct uvm_object *uobj)
    133 {
    134 	vrele((struct vnode *)uobj);
    135 }
    136 
    137 /*
    138  * uvn_put: flush page data to backing store.
    139  *
    140  * => object must be locked on entry!   VOP_PUTPAGES must unlock it.
    141  * => flags: PGO_SYNCIO -- use sync. I/O
    142  */
    143 
    144 static int
    145 uvn_put(struct uvm_object *uobj, voff_t offlo, voff_t offhi, int flags)
    146 {
    147 	struct vnode *vp = (struct vnode *)uobj;
    148 	int error;
    149 
    150 	KASSERT(rw_write_held(uobj->vmobjlock));
    151 	error = VOP_PUTPAGES(vp, offlo, offhi, flags);
    152 
    153 	return error;
    154 }
    155 
    156 
    157 /*
    158  * uvn_get: get pages (synchronously) from backing store
    159  *
    160  * => prefer map unlocked (not required)
    161  * => object must be locked!  we will _unlock_ it before starting any I/O.
    162  * => flags: PGO_ALLPAGES: get all of the pages
    163  *           PGO_LOCKED: fault data structures are locked
    164  * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
    165  * => NOTE: caller must check for released pages!!
    166  */
    167 
    168 static int
    169 uvn_get(struct uvm_object *uobj, voff_t offset,
    170     struct vm_page **pps /* IN/OUT */,
    171     int *npagesp /* IN (OUT if PGO_LOCKED)*/,
    172     int centeridx, vm_prot_t access_type, int advice, int flags)
    173 {
    174 	struct vnode *vp = (struct vnode *)uobj;
    175 	int error;
    176 
    177 	UVMHIST_FUNC("uvn_get"); UVMHIST_CALLED(ubchist);
    178 
    179 	UVMHIST_LOG(ubchist, "vp %#jx off 0x%jx", (uintptr_t)vp, (int)offset,
    180 	    0, 0);
    181 
    182 	if (vp->v_type == VREG && (access_type & VM_PROT_WRITE) == 0
    183 	    && (flags & PGO_LOCKED) == 0) {
    184 		uvn_alloc_ractx(uobj);
    185 		uvm_ra_request(vp->v_ractx, advice, uobj, offset,
    186 		    *npagesp << PAGE_SHIFT);
    187 	}
    188 
    189 	error = VOP_GETPAGES(vp, offset, pps, npagesp, centeridx,
    190 			     access_type, advice, flags);
    191 
    192 	KASSERT(((flags & PGO_LOCKED) != 0 && rw_lock_held(uobj->vmobjlock)) ||
    193 	    (flags & PGO_LOCKED) == 0);
    194 	return error;
    195 }
    196 
    197 
    198 /*
    199  * uvn_findpages:
    200  * return the page for the uobj and offset requested, allocating if needed.
    201  * => uobj must be locked.
    202  * => returned pages will be BUSY.
    203  */
    204 
    205 int
    206 uvn_findpages(struct uvm_object *uobj, voff_t offset, unsigned int *npagesp,
    207     struct vm_page **pgs, struct uvm_page_array *a, unsigned int flags)
    208 {
    209 	unsigned int count, found, npages;
    210 	int i, rv;
    211 	struct uvm_page_array a_store;
    212 
    213 	if (a == NULL) {
    214 		a = &a_store;
    215 		uvm_page_array_init(a);
    216 	}
    217 	count = found = 0;
    218 	npages = *npagesp;
    219 	if (flags & UFP_BACKWARD) {
    220 		for (i = npages - 1; i >= 0; i--, offset -= PAGE_SIZE) {
    221 			rv = uvn_findpage(uobj, offset, &pgs[i], flags, a,
    222 			    i + 1);
    223 			if (rv == 0) {
    224 				if (flags & UFP_DIRTYONLY)
    225 					break;
    226 			} else
    227 				found++;
    228 			count++;
    229 		}
    230 	} else {
    231 		for (i = 0; i < npages; i++, offset += PAGE_SIZE) {
    232 			rv = uvn_findpage(uobj, offset, &pgs[i], flags, a,
    233 			    npages - i);
    234 			if (rv == 0) {
    235 				if (flags & UFP_DIRTYONLY)
    236 					break;
    237 			} else
    238 				found++;
    239 			count++;
    240 		}
    241 	}
    242 	if (a == &a_store) {
    243 		uvm_page_array_fini(a);
    244 	}
    245 	*npagesp = count;
    246 	return (found);
    247 }
    248 
    249 /*
    250  * uvn_findpage: find a single page
    251  *
    252  * if a suitable page was found, put it in *pgp and return 1.
    253  * otherwise return 0.
    254  */
    255 
    256 static int
    257 uvn_findpage(struct uvm_object *uobj, voff_t offset, struct vm_page **pgp,
    258     unsigned int flags, struct uvm_page_array *a, unsigned int nleft)
    259 {
    260 	struct vm_page *pg;
    261 	const unsigned int fillflags =
    262 	    ((flags & UFP_BACKWARD) ? UVM_PAGE_ARRAY_FILL_BACKWARD : 0) |
    263 	    ((flags & UFP_DIRTYONLY) ?
    264 	    (UVM_PAGE_ARRAY_FILL_DIRTY|UVM_PAGE_ARRAY_FILL_DENSE) : 0);
    265 	UVMHIST_FUNC("uvn_findpage"); UVMHIST_CALLED(ubchist);
    266 	UVMHIST_LOG(ubchist, "vp %#jx off 0x%jx", (uintptr_t)uobj, offset,
    267 	    0, 0);
    268 
    269 	KASSERT(rw_write_held(uobj->vmobjlock));
    270 
    271 	if (*pgp != NULL) {
    272 		UVMHIST_LOG(ubchist, "dontcare", 0,0,0,0);
    273 		goto skip_offset;
    274 	}
    275 	for (;;) {
    276 		/*
    277 		 * look for an existing page.
    278 		 *
    279 		 * XXX fragile API
    280 		 * note that the array can be the one supplied by the caller of
    281 		 * uvn_findpages.  in that case, fillflags used by the caller
    282 		 * might not match strictly with ours.
    283 		 * in particular, the caller might have filled the array
    284 		 * without DENSE but passed us UFP_DIRTYONLY (thus DENSE).
    285 		 */
    286 		pg = uvm_page_array_fill_and_peek(a, uobj, offset, nleft,
    287 		    fillflags);
    288 		if (pg != NULL && pg->offset != offset) {
    289 			KASSERT(
    290 			    ((fillflags & UVM_PAGE_ARRAY_FILL_BACKWARD) != 0)
    291 			    == (pg->offset < offset));
    292 			KASSERT(uvm_pagelookup(uobj, offset) == NULL
    293 			    || ((fillflags & UVM_PAGE_ARRAY_FILL_DIRTY) != 0 &&
    294 			    radix_tree_get_tag(&uobj->uo_pages,
    295 			    offset >> PAGE_SHIFT, UVM_PAGE_DIRTY_TAG) == 0));
    296 			pg = NULL;
    297 			if ((fillflags & UVM_PAGE_ARRAY_FILL_DENSE) != 0) {
    298 				UVMHIST_LOG(ubchist, "dense", 0,0,0,0);
    299 				return 0;
    300 			}
    301 		}
    302 
    303 		/* nope?  allocate one now */
    304 		if (pg == NULL) {
    305 			if (flags & UFP_NOALLOC) {
    306 				UVMHIST_LOG(ubchist, "noalloc", 0,0,0,0);
    307 				return 0;
    308 			}
    309 			pg = uvm_pagealloc(uobj, offset, NULL,
    310 			    UVM_FLAG_COLORMATCH);
    311 			if (pg == NULL) {
    312 				if (flags & UFP_NOWAIT) {
    313 					UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
    314 					return 0;
    315 				}
    316 				rw_exit(uobj->vmobjlock);
    317 				uvm_wait("uvn_fp1");
    318 				uvm_page_array_clear(a);
    319 				rw_enter(uobj->vmobjlock, RW_WRITER);
    320 				continue;
    321 			}
    322 			UVMHIST_LOG(ubchist, "alloced %#jx (color %ju)",
    323 			    (uintptr_t)pg, VM_PGCOLOR(pg), 0, 0);
    324 			KASSERTMSG(uvm_pagegetdirty(pg) ==
    325 			    UVM_PAGE_STATUS_CLEAN, "page %p not clean", pg);
    326 			break;
    327 		} else if (flags & UFP_NOCACHE) {
    328 			UVMHIST_LOG(ubchist, "nocache",0,0,0,0);
    329 			goto skip;
    330 		}
    331 
    332 		/* page is there, see if we need to wait on it */
    333 		if ((pg->flags & PG_BUSY) != 0) {
    334 			if (flags & UFP_NOWAIT) {
    335 				UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
    336 				goto skip;
    337 			}
    338 			pg->flags |= PG_WANTED;
    339 			UVMHIST_LOG(ubchist, "wait %#jx (color %ju)",
    340 			    (uintptr_t)pg, VM_PGCOLOR(pg), 0, 0);
    341 			UVM_UNLOCK_AND_WAIT_RW(pg, uobj->vmobjlock, 0,
    342 					       "uvn_fp2", 0);
    343 			uvm_page_array_clear(a);
    344 			rw_enter(uobj->vmobjlock, RW_WRITER);
    345 			continue;
    346 		}
    347 
    348 		/* skip PG_RDONLY pages if requested */
    349 		if ((flags & UFP_NORDONLY) && (pg->flags & PG_RDONLY)) {
    350 			UVMHIST_LOG(ubchist, "nordonly",0,0,0,0);
    351 			goto skip;
    352 		}
    353 
    354 		/* stop on clean pages if requested */
    355 		if (flags & UFP_DIRTYONLY) {
    356 			const bool dirty = uvm_pagecheckdirty(pg, false);
    357 			if (!dirty) {
    358 				UVMHIST_LOG(ubchist, "dirtonly", 0,0,0,0);
    359 				return 0;
    360 			}
    361 		}
    362 
    363 		/* mark the page BUSY and we're done. */
    364 		pg->flags |= PG_BUSY;
    365 		UVM_PAGE_OWN(pg, "uvn_findpage");
    366 		UVMHIST_LOG(ubchist, "found %#jx (color %ju)",
    367 		    (uintptr_t)pg, VM_PGCOLOR(pg), 0, 0);
    368 		uvm_page_array_advance(a);
    369 		break;
    370 	}
    371 	*pgp = pg;
    372 	return 1;
    373 
    374  skip_offset:
    375 	/*
    376 	 * skip this offset
    377 	 */
    378 	pg = uvm_page_array_peek(a);
    379 	if (pg != NULL) {
    380 		if (pg->offset == offset) {
    381 			uvm_page_array_advance(a);
    382 		} else {
    383 			KASSERT((fillflags & UVM_PAGE_ARRAY_FILL_DENSE) == 0);
    384 		}
    385 	}
    386 	return 0;
    387 
    388  skip:
    389 	/*
    390 	 * skip this page
    391 	 */
    392 	KASSERT(pg != NULL);
    393 	uvm_page_array_advance(a);
    394 	return 0;
    395 }
    396 
    397 /*
    398  * uvm_vnp_setsize: grow or shrink a vnode uobj
    399  *
    400  * grow   => just update size value
    401  * shrink => toss un-needed pages
    402  *
    403  * => we assume that the caller has a reference of some sort to the
    404  *	vnode in question so that it will not be yanked out from under
    405  *	us.
    406  */
    407 
    408 void
    409 uvm_vnp_setsize(struct vnode *vp, voff_t newsize)
    410 {
    411 	struct uvm_object *uobj = &vp->v_uobj;
    412 	voff_t pgend = round_page(newsize);
    413 	voff_t oldsize;
    414 	UVMHIST_FUNC("uvm_vnp_setsize"); UVMHIST_CALLED(ubchist);
    415 
    416 	rw_enter(uobj->vmobjlock, RW_WRITER);
    417 	UVMHIST_LOG(ubchist, "vp %#jx old 0x%jx new 0x%jx",
    418 	    (uintptr_t)vp, vp->v_size, newsize, 0);
    419 
    420 	/*
    421 	 * now check if the size has changed: if we shrink we had better
    422 	 * toss some pages...
    423 	 */
    424 
    425 	KASSERT(newsize != VSIZENOTSET && newsize >= 0);
    426 	KASSERT(vp->v_size <= vp->v_writesize);
    427 	KASSERT(vp->v_size == vp->v_writesize ||
    428 	    newsize == vp->v_writesize || newsize <= vp->v_size);
    429 
    430 	oldsize = vp->v_writesize;
    431 
    432 	/*
    433 	 * check whether size shrinks
    434 	 * if old size hasn't been set, there are no pages to drop
    435 	 * if there was an integer overflow in pgend, then this is no shrink
    436 	 */
    437 	if (oldsize > pgend && oldsize != VSIZENOTSET && pgend >= 0) {
    438 		(void) uvn_put(uobj, pgend, 0, PGO_FREE | PGO_SYNCIO);
    439 		rw_enter(uobj->vmobjlock, RW_WRITER);
    440 	}
    441 	mutex_enter(vp->v_interlock);
    442 	vp->v_size = vp->v_writesize = newsize;
    443 	mutex_exit(vp->v_interlock);
    444 	rw_exit(uobj->vmobjlock);
    445 }
    446 
    447 void
    448 uvm_vnp_setwritesize(struct vnode *vp, voff_t newsize)
    449 {
    450 
    451 	rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
    452 	KASSERT(newsize != VSIZENOTSET && newsize >= 0);
    453 	KASSERT(vp->v_size != VSIZENOTSET);
    454 	KASSERT(vp->v_writesize != VSIZENOTSET);
    455 	KASSERT(vp->v_size <= vp->v_writesize);
    456 	KASSERT(vp->v_size <= newsize);
    457 	mutex_enter(vp->v_interlock);
    458 	vp->v_writesize = newsize;
    459 	mutex_exit(vp->v_interlock);
    460 	rw_exit(vp->v_uobj.vmobjlock);
    461 }
    462 
    463 bool
    464 uvn_text_p(struct uvm_object *uobj)
    465 {
    466 	struct vnode *vp = (struct vnode *)uobj;
    467 
    468 	return (vp->v_iflag & VI_EXECMAP) != 0;
    469 }
    470 
    471 bool
    472 uvn_clean_p(struct uvm_object *uobj)
    473 {
    474 	struct vnode *vp = (struct vnode *)uobj;
    475 
    476 	return (vp->v_iflag & VI_ONWORKLST) == 0;
    477 }
    478 
    479 bool
    480 uvn_needs_writefault_p(struct uvm_object *uobj)
    481 {
    482 	struct vnode *vp = (struct vnode *)uobj;
    483 
    484 	return uvn_clean_p(uobj) ||
    485 	    (vp->v_iflag & (VI_WRMAP|VI_WRMAPDIRTY)) == VI_WRMAP;
    486 }
    487 
    488 static void
    489 uvn_alloc_ractx(struct uvm_object *uobj)
    490 {
    491 	struct vnode *vp = (struct vnode *)uobj;
    492 	struct uvm_ractx *ra = NULL;
    493 
    494 	KASSERT(rw_write_held(uobj->vmobjlock));
    495 
    496 	if (vp->v_type != VREG) {
    497 		return;
    498 	}
    499 	if (vp->v_ractx != NULL) {
    500 		return;
    501 	}
    502 	if (vp->v_ractx == NULL) {
    503 		rw_exit(uobj->vmobjlock);
    504 		ra = uvm_ra_allocctx();
    505 		rw_enter(uobj->vmobjlock, RW_WRITER);
    506 		if (ra != NULL && vp->v_ractx == NULL) {
    507 			vp->v_ractx = ra;
    508 			ra = NULL;
    509 		}
    510 	}
    511 	if (ra != NULL) {
    512 		uvm_ra_freectx(ra);
    513 	}
    514 }
    515