uvm_vnode.c revision 1.97.2.8 1 1.97.2.8 yamt /* $NetBSD: uvm_vnode.c,v 1.97.2.8 2012/10/30 17:23:03 yamt Exp $ */
2 1.1 mrg
3 1.1 mrg /*
4 1.1 mrg * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 1.1 mrg * Copyright (c) 1991, 1993
6 1.49 chs * The Regents of the University of California.
7 1.1 mrg * Copyright (c) 1990 University of Utah.
8 1.1 mrg *
9 1.1 mrg * All rights reserved.
10 1.1 mrg *
11 1.1 mrg * This code is derived from software contributed to Berkeley by
12 1.1 mrg * the Systems Programming Group of the University of Utah Computer
13 1.1 mrg * Science Department.
14 1.1 mrg *
15 1.1 mrg * Redistribution and use in source and binary forms, with or without
16 1.1 mrg * modification, are permitted provided that the following conditions
17 1.1 mrg * are met:
18 1.1 mrg * 1. Redistributions of source code must retain the above copyright
19 1.1 mrg * notice, this list of conditions and the following disclaimer.
20 1.1 mrg * 2. Redistributions in binary form must reproduce the above copyright
21 1.1 mrg * notice, this list of conditions and the following disclaimer in the
22 1.1 mrg * documentation and/or other materials provided with the distribution.
23 1.94 chuck * 3. Neither the name of the University nor the names of its contributors
24 1.1 mrg * may be used to endorse or promote products derived from this software
25 1.1 mrg * without specific prior written permission.
26 1.1 mrg *
27 1.1 mrg * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 1.1 mrg * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 1.1 mrg * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 1.1 mrg * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 1.1 mrg * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 1.1 mrg * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 1.1 mrg * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 1.1 mrg * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 1.1 mrg * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 1.1 mrg * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 1.1 mrg * SUCH DAMAGE.
38 1.1 mrg *
39 1.1 mrg * @(#)vnode_pager.c 8.8 (Berkeley) 2/13/94
40 1.3 mrg * from: Id: uvm_vnode.c,v 1.1.2.26 1998/02/02 20:38:07 chuck Exp
41 1.1 mrg */
42 1.1 mrg
43 1.55 lukem /*
44 1.55 lukem * uvm_vnode.c: the vnode pager.
45 1.55 lukem */
46 1.55 lukem
47 1.55 lukem #include <sys/cdefs.h>
48 1.97.2.8 yamt __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.97.2.8 2012/10/30 17:23:03 yamt Exp $");
49 1.55 lukem
50 1.4 mrg #include "opt_uvmhist.h"
51 1.1 mrg
52 1.1 mrg #include <sys/param.h>
53 1.1 mrg #include <sys/systm.h>
54 1.37 chs #include <sys/kernel.h>
55 1.1 mrg #include <sys/vnode.h>
56 1.13 thorpej #include <sys/disklabel.h>
57 1.13 thorpej #include <sys/ioctl.h>
58 1.13 thorpej #include <sys/fcntl.h>
59 1.13 thorpej #include <sys/conf.h>
60 1.37 chs #include <sys/pool.h>
61 1.37 chs #include <sys/mount.h>
62 1.13 thorpej
63 1.13 thorpej #include <miscfs/specfs/specdev.h>
64 1.1 mrg
65 1.1 mrg #include <uvm/uvm.h>
66 1.68 yamt #include <uvm/uvm_readahead.h>
67 1.97.2.2 yamt #include <uvm/uvm_page_array.h>
68 1.1 mrg
69 1.97.2.8 yamt #ifdef UVMHIST
70 1.97.2.8 yamt UVMHIST_DEFINE(ubchist);
71 1.97.2.8 yamt #endif
72 1.97.2.8 yamt
73 1.1 mrg /*
74 1.1 mrg * functions
75 1.1 mrg */
76 1.1 mrg
77 1.66 thorpej static void uvn_detach(struct uvm_object *);
78 1.66 thorpej static int uvn_get(struct uvm_object *, voff_t, struct vm_page **, int *,
79 1.66 thorpej int, vm_prot_t, int, int);
80 1.66 thorpej static int uvn_put(struct uvm_object *, voff_t, voff_t, int);
81 1.66 thorpej static void uvn_reference(struct uvm_object *);
82 1.52 chs
83 1.66 thorpej static int uvn_findpage(struct uvm_object *, voff_t, struct vm_page **,
84 1.97.2.3 yamt unsigned int, struct uvm_page_array *a,
85 1.97.2.3 yamt unsigned int);
86 1.1 mrg
87 1.1 mrg /*
88 1.1 mrg * master pager structure
89 1.1 mrg */
90 1.1 mrg
91 1.89 yamt const struct uvm_pagerops uvm_vnodeops = {
92 1.88 yamt .pgo_reference = uvn_reference,
93 1.88 yamt .pgo_detach = uvn_detach,
94 1.88 yamt .pgo_get = uvn_get,
95 1.88 yamt .pgo_put = uvn_put,
96 1.1 mrg };
97 1.1 mrg
98 1.1 mrg /*
99 1.1 mrg * the ops!
100 1.1 mrg */
101 1.1 mrg
102 1.1 mrg /*
103 1.1 mrg * uvn_reference
104 1.1 mrg *
105 1.1 mrg * duplicate a reference to a VM object. Note that the reference
106 1.49 chs * count must already be at least one (the passed in reference) so
107 1.1 mrg * there is no chance of the uvn being killed or locked out here.
108 1.1 mrg *
109 1.49 chs * => caller must call with object unlocked.
110 1.1 mrg * => caller must be using the same accessprot as was used at attach time
111 1.1 mrg */
112 1.1 mrg
113 1.66 thorpej static void
114 1.65 thorpej uvn_reference(struct uvm_object *uobj)
115 1.1 mrg {
116 1.93 pooka vref((struct vnode *)uobj);
117 1.1 mrg }
118 1.1 mrg
119 1.52 chs
120 1.1 mrg /*
121 1.1 mrg * uvn_detach
122 1.1 mrg *
123 1.1 mrg * remove a reference to a VM object.
124 1.1 mrg *
125 1.1 mrg * => caller must call with object unlocked and map locked.
126 1.1 mrg */
127 1.52 chs
128 1.66 thorpej static void
129 1.65 thorpej uvn_detach(struct uvm_object *uobj)
130 1.8 mrg {
131 1.37 chs vrele((struct vnode *)uobj);
132 1.1 mrg }
133 1.1 mrg
134 1.1 mrg /*
135 1.1 mrg * uvn_put: flush page data to backing store.
136 1.1 mrg *
137 1.53 sommerfe * => object must be locked on entry! VOP_PUTPAGES must unlock it.
138 1.1 mrg * => flags: PGO_SYNCIO -- use sync. I/O
139 1.1 mrg */
140 1.1 mrg
141 1.66 thorpej static int
142 1.65 thorpej uvn_put(struct uvm_object *uobj, voff_t offlo, voff_t offhi, int flags)
143 1.1 mrg {
144 1.37 chs struct vnode *vp = (struct vnode *)uobj;
145 1.37 chs int error;
146 1.1 mrg
147 1.96 rmind KASSERT(mutex_owned(vp->v_interlock));
148 1.54 chs error = VOP_PUTPAGES(vp, offlo, offhi, flags);
149 1.90 ad
150 1.48 chs return error;
151 1.1 mrg }
152 1.1 mrg
153 1.1 mrg
154 1.1 mrg /*
155 1.1 mrg * uvn_get: get pages (synchronously) from backing store
156 1.1 mrg *
157 1.1 mrg * => prefer map unlocked (not required)
158 1.1 mrg * => object must be locked! we will _unlock_ it before starting any I/O.
159 1.1 mrg * => flags: PGO_ALLPAGES: get all of the pages
160 1.1 mrg * PGO_LOCKED: fault data structures are locked
161 1.1 mrg * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
162 1.1 mrg * => NOTE: caller must check for released pages!!
163 1.1 mrg */
164 1.49 chs
165 1.66 thorpej static int
166 1.65 thorpej uvn_get(struct uvm_object *uobj, voff_t offset,
167 1.65 thorpej struct vm_page **pps /* IN/OUT */,
168 1.65 thorpej int *npagesp /* IN (OUT if PGO_LOCKED)*/,
169 1.65 thorpej int centeridx, vm_prot_t access_type, int advice, int flags)
170 1.8 mrg {
171 1.37 chs struct vnode *vp = (struct vnode *)uobj;
172 1.37 chs int error;
173 1.67 yamt
174 1.37 chs UVMHIST_FUNC("uvn_get"); UVMHIST_CALLED(ubchist);
175 1.37 chs
176 1.37 chs UVMHIST_LOG(ubchist, "vp %p off 0x%x", vp, (int)offset, 0,0);
177 1.68 yamt
178 1.97.2.8 yamt if (vp->v_type == VREG && (access_type & VM_PROT_WRITE) == 0
179 1.97.2.8 yamt && (flags & PGO_LOCKED) == 0) {
180 1.68 yamt vn_ra_allocctx(vp);
181 1.68 yamt uvm_ra_request(vp->v_ractx, advice, uobj, offset,
182 1.68 yamt *npagesp << PAGE_SHIFT);
183 1.68 yamt }
184 1.68 yamt
185 1.37 chs error = VOP_GETPAGES(vp, offset, pps, npagesp, centeridx,
186 1.37 chs access_type, advice, flags);
187 1.67 yamt
188 1.96 rmind KASSERT(((flags & PGO_LOCKED) != 0 && mutex_owned(vp->v_interlock)) ||
189 1.90 ad (flags & PGO_LOCKED) == 0);
190 1.48 chs return error;
191 1.37 chs }
192 1.8 mrg
193 1.8 mrg
194 1.37 chs /*
195 1.37 chs * uvn_findpages:
196 1.37 chs * return the page for the uobj and offset requested, allocating if needed.
197 1.37 chs * => uobj must be locked.
198 1.52 chs * => returned pages will be BUSY.
199 1.37 chs */
200 1.1 mrg
201 1.58 enami int
202 1.97.2.3 yamt uvn_findpages(struct uvm_object *uobj, voff_t offset, unsigned int *npagesp,
203 1.97.2.3 yamt struct vm_page **pgs, struct uvm_page_array *a, unsigned int flags)
204 1.37 chs {
205 1.97.2.3 yamt unsigned int count, found, npages;
206 1.97.2.3 yamt int i, rv;
207 1.97.2.3 yamt struct uvm_page_array a_store;
208 1.97.2.3 yamt
209 1.97.2.3 yamt if (a == NULL) {
210 1.97.2.3 yamt a = &a_store;
211 1.97.2.3 yamt uvm_page_array_init(a);
212 1.97.2.3 yamt }
213 1.58 enami count = found = 0;
214 1.37 chs npages = *npagesp;
215 1.52 chs if (flags & UFP_BACKWARD) {
216 1.52 chs for (i = npages - 1; i >= 0; i--, offset -= PAGE_SIZE) {
217 1.97.2.3 yamt rv = uvn_findpage(uobj, offset, &pgs[i], flags, a,
218 1.97.2.4 yamt i + 1);
219 1.58 enami if (rv == 0) {
220 1.58 enami if (flags & UFP_DIRTYONLY)
221 1.58 enami break;
222 1.58 enami } else
223 1.58 enami found++;
224 1.52 chs count++;
225 1.52 chs }
226 1.52 chs } else {
227 1.52 chs for (i = 0; i < npages; i++, offset += PAGE_SIZE) {
228 1.97.2.3 yamt rv = uvn_findpage(uobj, offset, &pgs[i], flags, a,
229 1.97.2.2 yamt npages - i);
230 1.58 enami if (rv == 0) {
231 1.58 enami if (flags & UFP_DIRTYONLY)
232 1.58 enami break;
233 1.58 enami } else
234 1.58 enami found++;
235 1.52 chs count++;
236 1.52 chs }
237 1.37 chs }
238 1.97.2.3 yamt if (a == &a_store) {
239 1.97.2.3 yamt uvm_page_array_fini(a);
240 1.97.2.3 yamt }
241 1.52 chs *npagesp = count;
242 1.58 enami return (found);
243 1.37 chs }
244 1.8 mrg
245 1.66 thorpej static int
246 1.65 thorpej uvn_findpage(struct uvm_object *uobj, voff_t offset, struct vm_page **pgp,
247 1.97.2.3 yamt unsigned int flags, struct uvm_page_array *a, unsigned int nleft)
248 1.37 chs {
249 1.37 chs struct vm_page *pg;
250 1.97.2.2 yamt const unsigned int fillflags =
251 1.97.2.4 yamt ((flags & UFP_BACKWARD) ? UVM_PAGE_ARRAY_FILL_BACKWARD : 0) |
252 1.97.2.2 yamt ((flags & UFP_DIRTYONLY) ?
253 1.97.2.7 yamt (UVM_PAGE_ARRAY_FILL_DIRTY|UVM_PAGE_ARRAY_FILL_DENSE) : 0);
254 1.37 chs UVMHIST_FUNC("uvn_findpage"); UVMHIST_CALLED(ubchist);
255 1.37 chs UVMHIST_LOG(ubchist, "vp %p off 0x%lx", uobj, offset,0,0);
256 1.8 mrg
257 1.96 rmind KASSERT(mutex_owned(uobj->vmobjlock));
258 1.96 rmind
259 1.37 chs if (*pgp != NULL) {
260 1.37 chs UVMHIST_LOG(ubchist, "dontcare", 0,0,0,0);
261 1.97.2.4 yamt goto skip_offset;
262 1.37 chs }
263 1.37 chs for (;;) {
264 1.97.2.4 yamt /*
265 1.97.2.4 yamt * look for an existing page.
266 1.97.2.4 yamt *
267 1.97.2.4 yamt * XXX fragile API
268 1.97.2.4 yamt * note that the array can be the one supplied by the caller of
269 1.97.2.6 yamt * uvn_findpages. in that case, fillflags used by the caller
270 1.97.2.4 yamt * might not match strictly with ours.
271 1.97.2.4 yamt * in particular, the caller might have filled the array
272 1.97.2.4 yamt * without DIRTYONLY or DENSE but passed us UFP_DIRTYONLY.
273 1.97.2.4 yamt */
274 1.97.2.2 yamt pg = uvm_page_array_fill_and_peek(a, uobj, offset, nleft,
275 1.97.2.2 yamt fillflags);
276 1.97.2.2 yamt if (pg != NULL && pg->offset != offset) {
277 1.97.2.2 yamt KASSERT(
278 1.97.2.2 yamt ((fillflags & UVM_PAGE_ARRAY_FILL_BACKWARD) != 0)
279 1.97.2.2 yamt == (pg->offset < offset));
280 1.97.2.2 yamt pg = NULL;
281 1.97.2.2 yamt }
282 1.37 chs
283 1.52 chs /* nope? allocate one now */
284 1.37 chs if (pg == NULL) {
285 1.37 chs if (flags & UFP_NOALLOC) {
286 1.37 chs UVMHIST_LOG(ubchist, "noalloc", 0,0,0,0);
287 1.37 chs return 0;
288 1.37 chs }
289 1.97 matt pg = uvm_pagealloc(uobj, offset, NULL,
290 1.97 matt UVM_FLAG_COLORMATCH);
291 1.37 chs if (pg == NULL) {
292 1.37 chs if (flags & UFP_NOWAIT) {
293 1.37 chs UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
294 1.37 chs return 0;
295 1.8 mrg }
296 1.96 rmind mutex_exit(uobj->vmobjlock);
297 1.37 chs uvm_wait("uvn_fp1");
298 1.97.2.2 yamt uvm_page_array_clear(a);
299 1.96 rmind mutex_enter(uobj->vmobjlock);
300 1.37 chs continue;
301 1.47 chs }
302 1.97 matt UVMHIST_LOG(ubchist, "alloced %p (color %u)", pg,
303 1.97 matt VM_PGCOLOR_BUCKET(pg), 0,0);
304 1.37 chs break;
305 1.37 chs } else if (flags & UFP_NOCACHE) {
306 1.37 chs UVMHIST_LOG(ubchist, "nocache",0,0,0,0);
307 1.97.2.2 yamt goto skip;
308 1.8 mrg }
309 1.8 mrg
310 1.37 chs /* page is there, see if we need to wait on it */
311 1.52 chs if ((pg->flags & PG_BUSY) != 0) {
312 1.37 chs if (flags & UFP_NOWAIT) {
313 1.37 chs UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
314 1.97.2.2 yamt goto skip;
315 1.37 chs }
316 1.37 chs pg->flags |= PG_WANTED;
317 1.97 matt UVMHIST_LOG(ubchist, "wait %p (color %u)", pg,
318 1.97 matt VM_PGCOLOR_BUCKET(pg), 0,0);
319 1.96 rmind UVM_UNLOCK_AND_WAIT(pg, uobj->vmobjlock, 0,
320 1.37 chs "uvn_fp2", 0);
321 1.97.2.2 yamt uvm_page_array_clear(a);
322 1.96 rmind mutex_enter(uobj->vmobjlock);
323 1.37 chs continue;
324 1.8 mrg }
325 1.49 chs
326 1.97.2.5 yamt /* skip PG_RDONLY pages if requested */
327 1.97.2.5 yamt if ((flags & UFP_NORDONLY) != 0 &&
328 1.97.2.5 yamt (pg->flags & PG_RDONLY) != 0) {
329 1.37 chs UVMHIST_LOG(ubchist, "nordonly",0,0,0,0);
330 1.97.2.2 yamt goto skip;
331 1.8 mrg }
332 1.8 mrg
333 1.97.2.3 yamt /*
334 1.97.2.3 yamt * check for PG_PAGER1 requests
335 1.97.2.3 yamt */
336 1.97.2.3 yamt if ((flags & UFP_NOPAGER1) != 0 &&
337 1.97.2.3 yamt (pg->flags & PG_PAGER1) != 0) {
338 1.97.2.3 yamt UVMHIST_LOG(ubchist, "nopager1",0,0,0,0);
339 1.97.2.3 yamt goto skip;
340 1.97.2.3 yamt }
341 1.97.2.3 yamt if ((flags & UFP_ONLYPAGER1) != 0 &&
342 1.97.2.3 yamt (pg->flags & PG_PAGER1) == 0) {
343 1.97.2.3 yamt UVMHIST_LOG(ubchist, "onlypager1",0,0,0,0);
344 1.97.2.3 yamt goto skip;
345 1.97.2.3 yamt }
346 1.97.2.3 yamt
347 1.52 chs /* stop on clean pages if requested */
348 1.52 chs if (flags & UFP_DIRTYONLY) {
349 1.97.2.4 yamt const bool dirty = uvm_pagecheckdirty(pg, false);
350 1.97.2.4 yamt
351 1.52 chs if (!dirty) {
352 1.58 enami UVMHIST_LOG(ubchist, "dirtonly", 0,0,0,0);
353 1.52 chs return 0;
354 1.52 chs }
355 1.52 chs }
356 1.52 chs
357 1.37 chs /* mark the page BUSY and we're done. */
358 1.37 chs pg->flags |= PG_BUSY;
359 1.37 chs UVM_PAGE_OWN(pg, "uvn_findpage");
360 1.97 matt UVMHIST_LOG(ubchist, "found %p (color %u)",
361 1.97 matt pg, VM_PGCOLOR_BUCKET(pg), 0,0);
362 1.97.2.2 yamt uvm_page_array_advance(a);
363 1.37 chs break;
364 1.8 mrg }
365 1.37 chs *pgp = pg;
366 1.37 chs return 1;
367 1.97.2.2 yamt
368 1.97.2.4 yamt skip_offset:
369 1.97.2.4 yamt /*
370 1.97.2.4 yamt * skip this offset
371 1.97.2.4 yamt */
372 1.97.2.2 yamt pg = uvm_page_array_peek(a);
373 1.97.2.2 yamt if (pg != NULL) {
374 1.97.2.2 yamt if (pg->offset == offset) {
375 1.97.2.2 yamt uvm_page_array_advance(a);
376 1.97.2.2 yamt } else {
377 1.97.2.2 yamt KASSERT((fillflags & UVM_PAGE_ARRAY_FILL_DENSE) == 0);
378 1.97.2.2 yamt }
379 1.97.2.2 yamt }
380 1.97.2.2 yamt return 0;
381 1.97.2.4 yamt
382 1.97.2.4 yamt skip:
383 1.97.2.4 yamt /*
384 1.97.2.4 yamt * skip this page
385 1.97.2.4 yamt */
386 1.97.2.4 yamt KASSERT(pg != NULL);
387 1.97.2.4 yamt uvm_page_array_advance(a);
388 1.97.2.4 yamt return 0;
389 1.1 mrg }
390 1.1 mrg
391 1.1 mrg /*
392 1.52 chs * uvm_vnp_setsize: grow or shrink a vnode uobj
393 1.1 mrg *
394 1.1 mrg * grow => just update size value
395 1.1 mrg * shrink => toss un-needed pages
396 1.1 mrg *
397 1.49 chs * => we assume that the caller has a reference of some sort to the
398 1.1 mrg * vnode in question so that it will not be yanked out from under
399 1.1 mrg * us.
400 1.1 mrg */
401 1.1 mrg
402 1.8 mrg void
403 1.65 thorpej uvm_vnp_setsize(struct vnode *vp, voff_t newsize)
404 1.8 mrg {
405 1.52 chs struct uvm_object *uobj = &vp->v_uobj;
406 1.46 enami voff_t pgend = round_page(newsize);
407 1.72 yamt voff_t oldsize;
408 1.37 chs UVMHIST_FUNC("uvm_vnp_setsize"); UVMHIST_CALLED(ubchist);
409 1.37 chs
410 1.96 rmind mutex_enter(uobj->vmobjlock);
411 1.52 chs UVMHIST_LOG(ubchist, "vp %p old 0x%x new 0x%x",
412 1.52 chs vp, vp->v_size, newsize, 0);
413 1.1 mrg
414 1.8 mrg /*
415 1.37 chs * now check if the size has changed: if we shrink we had better
416 1.37 chs * toss some pages...
417 1.8 mrg */
418 1.1 mrg
419 1.85 pooka KASSERT(newsize != VSIZENOTSET);
420 1.85 pooka KASSERT(vp->v_size <= vp->v_writesize);
421 1.85 pooka KASSERT(vp->v_size == vp->v_writesize ||
422 1.85 pooka newsize == vp->v_writesize || newsize <= vp->v_size);
423 1.85 pooka
424 1.85 pooka oldsize = vp->v_writesize;
425 1.85 pooka KASSERT(oldsize != VSIZENOTSET || pgend > oldsize);
426 1.85 pooka
427 1.85 pooka if (oldsize > pgend) {
428 1.57 chs (void) uvn_put(uobj, pgend, 0, PGO_FREE | PGO_SYNCIO);
429 1.96 rmind mutex_enter(uobj->vmobjlock);
430 1.8 mrg }
431 1.82 yamt vp->v_size = vp->v_writesize = newsize;
432 1.96 rmind mutex_exit(uobj->vmobjlock);
433 1.1 mrg }
434 1.1 mrg
435 1.82 yamt void
436 1.82 yamt uvm_vnp_setwritesize(struct vnode *vp, voff_t newsize)
437 1.82 yamt {
438 1.82 yamt
439 1.96 rmind mutex_enter(vp->v_interlock);
440 1.85 pooka KASSERT(newsize != VSIZENOTSET);
441 1.82 yamt KASSERT(vp->v_size != VSIZENOTSET);
442 1.82 yamt KASSERT(vp->v_writesize != VSIZENOTSET);
443 1.82 yamt KASSERT(vp->v_size <= vp->v_writesize);
444 1.82 yamt KASSERT(vp->v_size <= newsize);
445 1.82 yamt vp->v_writesize = newsize;
446 1.96 rmind mutex_exit(vp->v_interlock);
447 1.82 yamt }
448 1.82 yamt
449 1.79 thorpej bool
450 1.75 yamt uvn_text_p(struct uvm_object *uobj)
451 1.75 yamt {
452 1.75 yamt struct vnode *vp = (struct vnode *)uobj;
453 1.75 yamt
454 1.86 ad return (vp->v_iflag & VI_EXECMAP) != 0;
455 1.75 yamt }
456 1.75 yamt
457 1.79 thorpej bool
458 1.75 yamt uvn_clean_p(struct uvm_object *uobj)
459 1.75 yamt {
460 1.75 yamt struct vnode *vp = (struct vnode *)uobj;
461 1.75 yamt
462 1.86 ad return (vp->v_iflag & VI_ONWORKLST) == 0;
463 1.75 yamt }
464 1.75 yamt
465 1.79 thorpej bool
466 1.75 yamt uvn_needs_writefault_p(struct uvm_object *uobj)
467 1.75 yamt {
468 1.75 yamt struct vnode *vp = (struct vnode *)uobj;
469 1.75 yamt
470 1.75 yamt return uvn_clean_p(uobj) ||
471 1.86 ad (vp->v_iflag & (VI_WRMAP|VI_WRMAPDIRTY)) == VI_WRMAP;
472 1.75 yamt }
473