uvm_vnode.c revision 1.110 1 1.110 ad /* $NetBSD: uvm_vnode.c,v 1.110 2020/03/14 20:45:23 ad Exp $ */
2 1.1 mrg
3 1.1 mrg /*
4 1.1 mrg * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 1.1 mrg * Copyright (c) 1991, 1993
6 1.49 chs * The Regents of the University of California.
7 1.1 mrg * Copyright (c) 1990 University of Utah.
8 1.1 mrg *
9 1.1 mrg * All rights reserved.
10 1.1 mrg *
11 1.1 mrg * This code is derived from software contributed to Berkeley by
12 1.1 mrg * the Systems Programming Group of the University of Utah Computer
13 1.1 mrg * Science Department.
14 1.1 mrg *
15 1.1 mrg * Redistribution and use in source and binary forms, with or without
16 1.1 mrg * modification, are permitted provided that the following conditions
17 1.1 mrg * are met:
18 1.1 mrg * 1. Redistributions of source code must retain the above copyright
19 1.1 mrg * notice, this list of conditions and the following disclaimer.
20 1.1 mrg * 2. Redistributions in binary form must reproduce the above copyright
21 1.1 mrg * notice, this list of conditions and the following disclaimer in the
22 1.1 mrg * documentation and/or other materials provided with the distribution.
23 1.94 chuck * 3. Neither the name of the University nor the names of its contributors
24 1.1 mrg * may be used to endorse or promote products derived from this software
25 1.1 mrg * without specific prior written permission.
26 1.1 mrg *
27 1.1 mrg * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 1.1 mrg * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 1.1 mrg * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 1.1 mrg * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 1.1 mrg * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 1.1 mrg * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 1.1 mrg * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 1.1 mrg * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 1.1 mrg * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 1.1 mrg * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 1.1 mrg * SUCH DAMAGE.
38 1.1 mrg *
39 1.1 mrg * @(#)vnode_pager.c 8.8 (Berkeley) 2/13/94
40 1.3 mrg * from: Id: uvm_vnode.c,v 1.1.2.26 1998/02/02 20:38:07 chuck Exp
41 1.1 mrg */
42 1.1 mrg
43 1.55 lukem /*
44 1.55 lukem * uvm_vnode.c: the vnode pager.
45 1.55 lukem */
46 1.55 lukem
47 1.55 lukem #include <sys/cdefs.h>
48 1.110 ad __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.110 2020/03/14 20:45:23 ad Exp $");
49 1.55 lukem
50 1.100 pooka #ifdef _KERNEL_OPT
51 1.4 mrg #include "opt_uvmhist.h"
52 1.100 pooka #endif
53 1.1 mrg
54 1.110 ad #include <sys/atomic.h>
55 1.1 mrg #include <sys/param.h>
56 1.1 mrg #include <sys/systm.h>
57 1.37 chs #include <sys/kernel.h>
58 1.1 mrg #include <sys/vnode.h>
59 1.13 thorpej #include <sys/disklabel.h>
60 1.13 thorpej #include <sys/ioctl.h>
61 1.13 thorpej #include <sys/fcntl.h>
62 1.13 thorpej #include <sys/conf.h>
63 1.37 chs #include <sys/pool.h>
64 1.37 chs #include <sys/mount.h>
65 1.13 thorpej
66 1.13 thorpej #include <miscfs/specfs/specdev.h>
67 1.1 mrg
68 1.1 mrg #include <uvm/uvm.h>
69 1.68 yamt #include <uvm/uvm_readahead.h>
70 1.105 ad #include <uvm/uvm_page_array.h>
71 1.1 mrg
72 1.99 matt #ifdef UVMHIST
73 1.99 matt UVMHIST_DEFINE(ubchist);
74 1.99 matt #endif
75 1.99 matt
76 1.1 mrg /*
77 1.1 mrg * functions
78 1.1 mrg */
79 1.1 mrg
80 1.106 ad static void uvn_alloc_ractx(struct uvm_object *);
81 1.66 thorpej static void uvn_detach(struct uvm_object *);
82 1.66 thorpej static int uvn_get(struct uvm_object *, voff_t, struct vm_page **, int *,
83 1.66 thorpej int, vm_prot_t, int, int);
84 1.110 ad static void uvn_markdirty(struct uvm_object *);
85 1.66 thorpej static int uvn_put(struct uvm_object *, voff_t, voff_t, int);
86 1.66 thorpej static void uvn_reference(struct uvm_object *);
87 1.52 chs
88 1.66 thorpej static int uvn_findpage(struct uvm_object *, voff_t, struct vm_page **,
89 1.105 ad unsigned int, struct uvm_page_array *a,
90 1.105 ad unsigned int);
91 1.1 mrg
92 1.1 mrg /*
93 1.1 mrg * master pager structure
94 1.1 mrg */
95 1.1 mrg
96 1.89 yamt const struct uvm_pagerops uvm_vnodeops = {
97 1.88 yamt .pgo_reference = uvn_reference,
98 1.88 yamt .pgo_detach = uvn_detach,
99 1.88 yamt .pgo_get = uvn_get,
100 1.88 yamt .pgo_put = uvn_put,
101 1.110 ad .pgo_markdirty = uvn_markdirty,
102 1.1 mrg };
103 1.1 mrg
104 1.1 mrg /*
105 1.1 mrg * the ops!
106 1.1 mrg */
107 1.1 mrg
108 1.1 mrg /*
109 1.1 mrg * uvn_reference
110 1.1 mrg *
111 1.1 mrg * duplicate a reference to a VM object. Note that the reference
112 1.49 chs * count must already be at least one (the passed in reference) so
113 1.1 mrg * there is no chance of the uvn being killed or locked out here.
114 1.1 mrg *
115 1.49 chs * => caller must call with object unlocked.
116 1.1 mrg * => caller must be using the same accessprot as was used at attach time
117 1.1 mrg */
118 1.1 mrg
119 1.66 thorpej static void
120 1.65 thorpej uvn_reference(struct uvm_object *uobj)
121 1.1 mrg {
122 1.93 pooka vref((struct vnode *)uobj);
123 1.1 mrg }
124 1.1 mrg
125 1.52 chs
126 1.1 mrg /*
127 1.1 mrg * uvn_detach
128 1.1 mrg *
129 1.1 mrg * remove a reference to a VM object.
130 1.1 mrg *
131 1.1 mrg * => caller must call with object unlocked and map locked.
132 1.1 mrg */
133 1.52 chs
134 1.66 thorpej static void
135 1.65 thorpej uvn_detach(struct uvm_object *uobj)
136 1.8 mrg {
137 1.37 chs vrele((struct vnode *)uobj);
138 1.1 mrg }
139 1.1 mrg
140 1.1 mrg /*
141 1.1 mrg * uvn_put: flush page data to backing store.
142 1.1 mrg *
143 1.53 sommerfe * => object must be locked on entry! VOP_PUTPAGES must unlock it.
144 1.1 mrg * => flags: PGO_SYNCIO -- use sync. I/O
145 1.1 mrg */
146 1.1 mrg
147 1.66 thorpej static int
148 1.65 thorpej uvn_put(struct uvm_object *uobj, voff_t offlo, voff_t offhi, int flags)
149 1.1 mrg {
150 1.37 chs struct vnode *vp = (struct vnode *)uobj;
151 1.37 chs int error;
152 1.1 mrg
153 1.106 ad KASSERT(rw_write_held(uobj->vmobjlock));
154 1.54 chs error = VOP_PUTPAGES(vp, offlo, offhi, flags);
155 1.90 ad
156 1.48 chs return error;
157 1.1 mrg }
158 1.1 mrg
159 1.1 mrg /*
160 1.1 mrg * uvn_get: get pages (synchronously) from backing store
161 1.1 mrg *
162 1.1 mrg * => prefer map unlocked (not required)
163 1.1 mrg * => object must be locked! we will _unlock_ it before starting any I/O.
164 1.1 mrg * => flags: PGO_ALLPAGES: get all of the pages
165 1.1 mrg * PGO_LOCKED: fault data structures are locked
166 1.1 mrg * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
167 1.1 mrg * => NOTE: caller must check for released pages!!
168 1.1 mrg */
169 1.49 chs
170 1.66 thorpej static int
171 1.65 thorpej uvn_get(struct uvm_object *uobj, voff_t offset,
172 1.65 thorpej struct vm_page **pps /* IN/OUT */,
173 1.65 thorpej int *npagesp /* IN (OUT if PGO_LOCKED)*/,
174 1.65 thorpej int centeridx, vm_prot_t access_type, int advice, int flags)
175 1.8 mrg {
176 1.37 chs struct vnode *vp = (struct vnode *)uobj;
177 1.37 chs int error;
178 1.67 yamt
179 1.37 chs UVMHIST_FUNC("uvn_get"); UVMHIST_CALLED(ubchist);
180 1.37 chs
181 1.103 pgoyette UVMHIST_LOG(ubchist, "vp %#jx off 0x%jx", (uintptr_t)vp, (int)offset,
182 1.103 pgoyette 0, 0);
183 1.68 yamt
184 1.98 martin if (vp->v_type == VREG && (access_type & VM_PROT_WRITE) == 0
185 1.98 martin && (flags & PGO_LOCKED) == 0) {
186 1.106 ad uvn_alloc_ractx(uobj);
187 1.68 yamt uvm_ra_request(vp->v_ractx, advice, uobj, offset,
188 1.68 yamt *npagesp << PAGE_SHIFT);
189 1.68 yamt }
190 1.68 yamt
191 1.37 chs error = VOP_GETPAGES(vp, offset, pps, npagesp, centeridx,
192 1.37 chs access_type, advice, flags);
193 1.67 yamt
194 1.106 ad KASSERT(((flags & PGO_LOCKED) != 0 && rw_lock_held(uobj->vmobjlock)) ||
195 1.90 ad (flags & PGO_LOCKED) == 0);
196 1.48 chs return error;
197 1.37 chs }
198 1.8 mrg
199 1.110 ad /*
200 1.110 ad * uvn_markdirty: called when the object gains first dirty page
201 1.110 ad *
202 1.110 ad * => uobj must be write locked.
203 1.110 ad */
204 1.110 ad
205 1.110 ad static void
206 1.110 ad uvn_markdirty(struct uvm_object *uobj)
207 1.110 ad {
208 1.110 ad struct vnode *vp = (struct vnode *)uobj;
209 1.110 ad
210 1.110 ad KASSERT(rw_write_held(uobj->vmobjlock));
211 1.110 ad
212 1.110 ad mutex_enter(vp->v_interlock);
213 1.110 ad if ((vp->v_iflag & VI_ONWORKLST) == 0) {
214 1.110 ad vn_syncer_add_to_worklist(vp, filedelay);
215 1.110 ad }
216 1.110 ad mutex_exit(vp->v_interlock);
217 1.110 ad }
218 1.8 mrg
219 1.37 chs /*
220 1.37 chs * uvn_findpages:
221 1.37 chs * return the page for the uobj and offset requested, allocating if needed.
222 1.37 chs * => uobj must be locked.
223 1.52 chs * => returned pages will be BUSY.
224 1.37 chs */
225 1.1 mrg
226 1.58 enami int
227 1.105 ad uvn_findpages(struct uvm_object *uobj, voff_t offset, unsigned int *npagesp,
228 1.105 ad struct vm_page **pgs, struct uvm_page_array *a, unsigned int flags)
229 1.37 chs {
230 1.105 ad unsigned int count, found, npages;
231 1.105 ad int i, rv;
232 1.105 ad struct uvm_page_array a_store;
233 1.105 ad
234 1.105 ad if (a == NULL) {
235 1.105 ad a = &a_store;
236 1.105 ad uvm_page_array_init(a);
237 1.105 ad }
238 1.58 enami count = found = 0;
239 1.37 chs npages = *npagesp;
240 1.52 chs if (flags & UFP_BACKWARD) {
241 1.52 chs for (i = npages - 1; i >= 0; i--, offset -= PAGE_SIZE) {
242 1.105 ad rv = uvn_findpage(uobj, offset, &pgs[i], flags, a,
243 1.105 ad i + 1);
244 1.58 enami if (rv == 0) {
245 1.58 enami if (flags & UFP_DIRTYONLY)
246 1.58 enami break;
247 1.58 enami } else
248 1.58 enami found++;
249 1.52 chs count++;
250 1.52 chs }
251 1.52 chs } else {
252 1.52 chs for (i = 0; i < npages; i++, offset += PAGE_SIZE) {
253 1.105 ad rv = uvn_findpage(uobj, offset, &pgs[i], flags, a,
254 1.105 ad npages - i);
255 1.58 enami if (rv == 0) {
256 1.58 enami if (flags & UFP_DIRTYONLY)
257 1.58 enami break;
258 1.58 enami } else
259 1.58 enami found++;
260 1.52 chs count++;
261 1.52 chs }
262 1.37 chs }
263 1.105 ad if (a == &a_store) {
264 1.105 ad uvm_page_array_fini(a);
265 1.105 ad }
266 1.52 chs *npagesp = count;
267 1.58 enami return (found);
268 1.37 chs }
269 1.8 mrg
270 1.105 ad /*
271 1.105 ad * uvn_findpage: find a single page
272 1.105 ad *
273 1.105 ad * if a suitable page was found, put it in *pgp and return 1.
274 1.105 ad * otherwise return 0.
275 1.105 ad */
276 1.105 ad
277 1.66 thorpej static int
278 1.65 thorpej uvn_findpage(struct uvm_object *uobj, voff_t offset, struct vm_page **pgp,
279 1.105 ad unsigned int flags, struct uvm_page_array *a, unsigned int nleft)
280 1.37 chs {
281 1.37 chs struct vm_page *pg;
282 1.105 ad const unsigned int fillflags =
283 1.105 ad ((flags & UFP_BACKWARD) ? UVM_PAGE_ARRAY_FILL_BACKWARD : 0) |
284 1.105 ad ((flags & UFP_DIRTYONLY) ?
285 1.105 ad (UVM_PAGE_ARRAY_FILL_DIRTY|UVM_PAGE_ARRAY_FILL_DENSE) : 0);
286 1.37 chs UVMHIST_FUNC("uvn_findpage"); UVMHIST_CALLED(ubchist);
287 1.103 pgoyette UVMHIST_LOG(ubchist, "vp %#jx off 0x%jx", (uintptr_t)uobj, offset,
288 1.103 pgoyette 0, 0);
289 1.8 mrg
290 1.106 ad KASSERT(rw_write_held(uobj->vmobjlock));
291 1.96 rmind
292 1.37 chs if (*pgp != NULL) {
293 1.37 chs UVMHIST_LOG(ubchist, "dontcare", 0,0,0,0);
294 1.105 ad goto skip_offset;
295 1.37 chs }
296 1.37 chs for (;;) {
297 1.105 ad /*
298 1.105 ad * look for an existing page.
299 1.105 ad *
300 1.105 ad * XXX fragile API
301 1.105 ad * note that the array can be the one supplied by the caller of
302 1.105 ad * uvn_findpages. in that case, fillflags used by the caller
303 1.105 ad * might not match strictly with ours.
304 1.105 ad * in particular, the caller might have filled the array
305 1.105 ad * without DENSE but passed us UFP_DIRTYONLY (thus DENSE).
306 1.105 ad */
307 1.105 ad pg = uvm_page_array_fill_and_peek(a, uobj, offset, nleft,
308 1.105 ad fillflags);
309 1.105 ad if (pg != NULL && pg->offset != offset) {
310 1.105 ad KASSERT(
311 1.105 ad ((fillflags & UVM_PAGE_ARRAY_FILL_BACKWARD) != 0)
312 1.105 ad == (pg->offset < offset));
313 1.105 ad KASSERT(uvm_pagelookup(uobj, offset) == NULL
314 1.105 ad || ((fillflags & UVM_PAGE_ARRAY_FILL_DIRTY) != 0 &&
315 1.105 ad radix_tree_get_tag(&uobj->uo_pages,
316 1.105 ad offset >> PAGE_SHIFT, UVM_PAGE_DIRTY_TAG) == 0));
317 1.105 ad pg = NULL;
318 1.105 ad if ((fillflags & UVM_PAGE_ARRAY_FILL_DENSE) != 0) {
319 1.105 ad UVMHIST_LOG(ubchist, "dense", 0,0,0,0);
320 1.105 ad return 0;
321 1.105 ad }
322 1.105 ad }
323 1.37 chs
324 1.52 chs /* nope? allocate one now */
325 1.37 chs if (pg == NULL) {
326 1.37 chs if (flags & UFP_NOALLOC) {
327 1.37 chs UVMHIST_LOG(ubchist, "noalloc", 0,0,0,0);
328 1.37 chs return 0;
329 1.37 chs }
330 1.97 matt pg = uvm_pagealloc(uobj, offset, NULL,
331 1.97 matt UVM_FLAG_COLORMATCH);
332 1.37 chs if (pg == NULL) {
333 1.37 chs if (flags & UFP_NOWAIT) {
334 1.37 chs UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
335 1.37 chs return 0;
336 1.8 mrg }
337 1.106 ad rw_exit(uobj->vmobjlock);
338 1.108 rjs uvm_wait("uvnfp1");
339 1.105 ad uvm_page_array_clear(a);
340 1.106 ad rw_enter(uobj->vmobjlock, RW_WRITER);
341 1.37 chs continue;
342 1.47 chs }
343 1.103 pgoyette UVMHIST_LOG(ubchist, "alloced %#jx (color %ju)",
344 1.104 ad (uintptr_t)pg, VM_PGCOLOR(pg), 0, 0);
345 1.105 ad KASSERTMSG(uvm_pagegetdirty(pg) ==
346 1.105 ad UVM_PAGE_STATUS_CLEAN, "page %p not clean", pg);
347 1.37 chs break;
348 1.37 chs } else if (flags & UFP_NOCACHE) {
349 1.37 chs UVMHIST_LOG(ubchist, "nocache",0,0,0,0);
350 1.105 ad goto skip;
351 1.8 mrg }
352 1.8 mrg
353 1.37 chs /* page is there, see if we need to wait on it */
354 1.52 chs if ((pg->flags & PG_BUSY) != 0) {
355 1.37 chs if (flags & UFP_NOWAIT) {
356 1.37 chs UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
357 1.105 ad goto skip;
358 1.37 chs }
359 1.103 pgoyette UVMHIST_LOG(ubchist, "wait %#jx (color %ju)",
360 1.104 ad (uintptr_t)pg, VM_PGCOLOR(pg), 0, 0);
361 1.109 ad uvm_pagewait(pg, uobj->vmobjlock, "uvnfp2");
362 1.105 ad uvm_page_array_clear(a);
363 1.106 ad rw_enter(uobj->vmobjlock, RW_WRITER);
364 1.37 chs continue;
365 1.8 mrg }
366 1.49 chs
367 1.37 chs /* skip PG_RDONLY pages if requested */
368 1.37 chs if ((flags & UFP_NORDONLY) && (pg->flags & PG_RDONLY)) {
369 1.37 chs UVMHIST_LOG(ubchist, "nordonly",0,0,0,0);
370 1.105 ad goto skip;
371 1.8 mrg }
372 1.8 mrg
373 1.52 chs /* stop on clean pages if requested */
374 1.52 chs if (flags & UFP_DIRTYONLY) {
375 1.105 ad const bool dirty = uvm_pagecheckdirty(pg, false);
376 1.52 chs if (!dirty) {
377 1.58 enami UVMHIST_LOG(ubchist, "dirtonly", 0,0,0,0);
378 1.52 chs return 0;
379 1.52 chs }
380 1.52 chs }
381 1.52 chs
382 1.37 chs /* mark the page BUSY and we're done. */
383 1.37 chs pg->flags |= PG_BUSY;
384 1.37 chs UVM_PAGE_OWN(pg, "uvn_findpage");
385 1.103 pgoyette UVMHIST_LOG(ubchist, "found %#jx (color %ju)",
386 1.104 ad (uintptr_t)pg, VM_PGCOLOR(pg), 0, 0);
387 1.105 ad uvm_page_array_advance(a);
388 1.37 chs break;
389 1.8 mrg }
390 1.37 chs *pgp = pg;
391 1.37 chs return 1;
392 1.105 ad
393 1.105 ad skip_offset:
394 1.105 ad /*
395 1.105 ad * skip this offset
396 1.105 ad */
397 1.105 ad pg = uvm_page_array_peek(a);
398 1.105 ad if (pg != NULL) {
399 1.105 ad if (pg->offset == offset) {
400 1.105 ad uvm_page_array_advance(a);
401 1.105 ad } else {
402 1.105 ad KASSERT((fillflags & UVM_PAGE_ARRAY_FILL_DENSE) == 0);
403 1.105 ad }
404 1.105 ad }
405 1.105 ad return 0;
406 1.105 ad
407 1.105 ad skip:
408 1.105 ad /*
409 1.105 ad * skip this page
410 1.105 ad */
411 1.105 ad KASSERT(pg != NULL);
412 1.105 ad uvm_page_array_advance(a);
413 1.105 ad return 0;
414 1.1 mrg }
415 1.1 mrg
416 1.1 mrg /*
417 1.52 chs * uvm_vnp_setsize: grow or shrink a vnode uobj
418 1.1 mrg *
419 1.1 mrg * grow => just update size value
420 1.1 mrg * shrink => toss un-needed pages
421 1.1 mrg *
422 1.49 chs * => we assume that the caller has a reference of some sort to the
423 1.1 mrg * vnode in question so that it will not be yanked out from under
424 1.1 mrg * us.
425 1.1 mrg */
426 1.1 mrg
427 1.8 mrg void
428 1.65 thorpej uvm_vnp_setsize(struct vnode *vp, voff_t newsize)
429 1.8 mrg {
430 1.52 chs struct uvm_object *uobj = &vp->v_uobj;
431 1.46 enami voff_t pgend = round_page(newsize);
432 1.72 yamt voff_t oldsize;
433 1.37 chs UVMHIST_FUNC("uvm_vnp_setsize"); UVMHIST_CALLED(ubchist);
434 1.37 chs
435 1.106 ad rw_enter(uobj->vmobjlock, RW_WRITER);
436 1.103 pgoyette UVMHIST_LOG(ubchist, "vp %#jx old 0x%jx new 0x%jx",
437 1.103 pgoyette (uintptr_t)vp, vp->v_size, newsize, 0);
438 1.1 mrg
439 1.8 mrg /*
440 1.37 chs * now check if the size has changed: if we shrink we had better
441 1.37 chs * toss some pages...
442 1.8 mrg */
443 1.1 mrg
444 1.101 mlelstv KASSERT(newsize != VSIZENOTSET && newsize >= 0);
445 1.85 pooka KASSERT(vp->v_size <= vp->v_writesize);
446 1.85 pooka KASSERT(vp->v_size == vp->v_writesize ||
447 1.85 pooka newsize == vp->v_writesize || newsize <= vp->v_size);
448 1.85 pooka
449 1.85 pooka oldsize = vp->v_writesize;
450 1.85 pooka
451 1.101 mlelstv /*
452 1.102 wiz * check whether size shrinks
453 1.101 mlelstv * if old size hasn't been set, there are no pages to drop
454 1.101 mlelstv * if there was an integer overflow in pgend, then this is no shrink
455 1.101 mlelstv */
456 1.101 mlelstv if (oldsize > pgend && oldsize != VSIZENOTSET && pgend >= 0) {
457 1.57 chs (void) uvn_put(uobj, pgend, 0, PGO_FREE | PGO_SYNCIO);
458 1.106 ad rw_enter(uobj->vmobjlock, RW_WRITER);
459 1.8 mrg }
460 1.106 ad mutex_enter(vp->v_interlock);
461 1.82 yamt vp->v_size = vp->v_writesize = newsize;
462 1.106 ad mutex_exit(vp->v_interlock);
463 1.106 ad rw_exit(uobj->vmobjlock);
464 1.1 mrg }
465 1.1 mrg
466 1.82 yamt void
467 1.82 yamt uvm_vnp_setwritesize(struct vnode *vp, voff_t newsize)
468 1.82 yamt {
469 1.82 yamt
470 1.106 ad rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
471 1.101 mlelstv KASSERT(newsize != VSIZENOTSET && newsize >= 0);
472 1.82 yamt KASSERT(vp->v_size != VSIZENOTSET);
473 1.82 yamt KASSERT(vp->v_writesize != VSIZENOTSET);
474 1.82 yamt KASSERT(vp->v_size <= vp->v_writesize);
475 1.82 yamt KASSERT(vp->v_size <= newsize);
476 1.106 ad mutex_enter(vp->v_interlock);
477 1.82 yamt vp->v_writesize = newsize;
478 1.96 rmind mutex_exit(vp->v_interlock);
479 1.106 ad rw_exit(vp->v_uobj.vmobjlock);
480 1.82 yamt }
481 1.82 yamt
482 1.79 thorpej bool
483 1.75 yamt uvn_text_p(struct uvm_object *uobj)
484 1.75 yamt {
485 1.75 yamt struct vnode *vp = (struct vnode *)uobj;
486 1.110 ad int iflag;
487 1.75 yamt
488 1.107 ad /*
489 1.107 ad * v_interlock is not held here, but VI_EXECMAP is only ever changed
490 1.107 ad * with the vmobjlock held too.
491 1.107 ad */
492 1.110 ad iflag = atomic_load_relaxed(&vp->v_iflag);
493 1.110 ad return (iflag & VI_EXECMAP) != 0;
494 1.75 yamt }
495 1.75 yamt
496 1.79 thorpej bool
497 1.75 yamt uvn_clean_p(struct uvm_object *uobj)
498 1.75 yamt {
499 1.75 yamt
500 1.107 ad return radix_tree_empty_tagged_tree_p(&uobj->uo_pages,
501 1.107 ad UVM_PAGE_DIRTY_TAG);
502 1.75 yamt }
503 1.75 yamt
504 1.106 ad static void
505 1.106 ad uvn_alloc_ractx(struct uvm_object *uobj)
506 1.106 ad {
507 1.106 ad struct vnode *vp = (struct vnode *)uobj;
508 1.106 ad struct uvm_ractx *ra = NULL;
509 1.106 ad
510 1.106 ad KASSERT(rw_write_held(uobj->vmobjlock));
511 1.106 ad
512 1.106 ad if (vp->v_type != VREG) {
513 1.106 ad return;
514 1.106 ad }
515 1.106 ad if (vp->v_ractx != NULL) {
516 1.106 ad return;
517 1.106 ad }
518 1.106 ad if (vp->v_ractx == NULL) {
519 1.106 ad rw_exit(uobj->vmobjlock);
520 1.106 ad ra = uvm_ra_allocctx();
521 1.106 ad rw_enter(uobj->vmobjlock, RW_WRITER);
522 1.106 ad if (ra != NULL && vp->v_ractx == NULL) {
523 1.106 ad vp->v_ractx = ra;
524 1.106 ad ra = NULL;
525 1.106 ad }
526 1.106 ad }
527 1.106 ad if (ra != NULL) {
528 1.106 ad uvm_ra_freectx(ra);
529 1.106 ad }
530 1.106 ad }
531