uvm_vnode.c revision 1.105 1 /* $NetBSD: uvm_vnode.c,v 1.105 2020/01/15 17:55:45 ad Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993
6 * The Regents of the University of California.
7 * Copyright (c) 1990 University of Utah.
8 *
9 * All rights reserved.
10 *
11 * This code is derived from software contributed to Berkeley by
12 * the Systems Programming Group of the University of Utah Computer
13 * Science Department.
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 * 1. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 * notice, this list of conditions and the following disclaimer in the
22 * documentation and/or other materials provided with the distribution.
23 * 3. Neither the name of the University nor the names of its contributors
24 * may be used to endorse or promote products derived from this software
25 * without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 * @(#)vnode_pager.c 8.8 (Berkeley) 2/13/94
40 * from: Id: uvm_vnode.c,v 1.1.2.26 1998/02/02 20:38:07 chuck Exp
41 */
42
43 /*
44 * uvm_vnode.c: the vnode pager.
45 */
46
47 #include <sys/cdefs.h>
48 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.105 2020/01/15 17:55:45 ad Exp $");
49
50 #ifdef _KERNEL_OPT
51 #include "opt_uvmhist.h"
52 #endif
53
54 #include <sys/param.h>
55 #include <sys/systm.h>
56 #include <sys/kernel.h>
57 #include <sys/vnode.h>
58 #include <sys/disklabel.h>
59 #include <sys/ioctl.h>
60 #include <sys/fcntl.h>
61 #include <sys/conf.h>
62 #include <sys/pool.h>
63 #include <sys/mount.h>
64
65 #include <miscfs/specfs/specdev.h>
66
67 #include <uvm/uvm.h>
68 #include <uvm/uvm_readahead.h>
69 #include <uvm/uvm_page_array.h>
70
71 #ifdef UVMHIST
72 UVMHIST_DEFINE(ubchist);
73 #endif
74
75 /*
76 * functions
77 */
78
79 static void uvn_detach(struct uvm_object *);
80 static int uvn_get(struct uvm_object *, voff_t, struct vm_page **, int *,
81 int, vm_prot_t, int, int);
82 static int uvn_put(struct uvm_object *, voff_t, voff_t, int);
83 static void uvn_reference(struct uvm_object *);
84
85 static int uvn_findpage(struct uvm_object *, voff_t, struct vm_page **,
86 unsigned int, struct uvm_page_array *a,
87 unsigned int);
88
89 /*
90 * master pager structure
91 */
92
93 const struct uvm_pagerops uvm_vnodeops = {
94 .pgo_reference = uvn_reference,
95 .pgo_detach = uvn_detach,
96 .pgo_get = uvn_get,
97 .pgo_put = uvn_put,
98 };
99
100 /*
101 * the ops!
102 */
103
104 /*
105 * uvn_reference
106 *
107 * duplicate a reference to a VM object. Note that the reference
108 * count must already be at least one (the passed in reference) so
109 * there is no chance of the uvn being killed or locked out here.
110 *
111 * => caller must call with object unlocked.
112 * => caller must be using the same accessprot as was used at attach time
113 */
114
115 static void
116 uvn_reference(struct uvm_object *uobj)
117 {
118 vref((struct vnode *)uobj);
119 }
120
121
122 /*
123 * uvn_detach
124 *
125 * remove a reference to a VM object.
126 *
127 * => caller must call with object unlocked and map locked.
128 */
129
130 static void
131 uvn_detach(struct uvm_object *uobj)
132 {
133 vrele((struct vnode *)uobj);
134 }
135
136 /*
137 * uvn_put: flush page data to backing store.
138 *
139 * => object must be locked on entry! VOP_PUTPAGES must unlock it.
140 * => flags: PGO_SYNCIO -- use sync. I/O
141 */
142
143 static int
144 uvn_put(struct uvm_object *uobj, voff_t offlo, voff_t offhi, int flags)
145 {
146 struct vnode *vp = (struct vnode *)uobj;
147 int error;
148
149 KASSERT(mutex_owned(vp->v_interlock));
150 error = VOP_PUTPAGES(vp, offlo, offhi, flags);
151
152 return error;
153 }
154
155
156 /*
157 * uvn_get: get pages (synchronously) from backing store
158 *
159 * => prefer map unlocked (not required)
160 * => object must be locked! we will _unlock_ it before starting any I/O.
161 * => flags: PGO_ALLPAGES: get all of the pages
162 * PGO_LOCKED: fault data structures are locked
163 * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
164 * => NOTE: caller must check for released pages!!
165 */
166
167 static int
168 uvn_get(struct uvm_object *uobj, voff_t offset,
169 struct vm_page **pps /* IN/OUT */,
170 int *npagesp /* IN (OUT if PGO_LOCKED)*/,
171 int centeridx, vm_prot_t access_type, int advice, int flags)
172 {
173 struct vnode *vp = (struct vnode *)uobj;
174 int error;
175
176 UVMHIST_FUNC("uvn_get"); UVMHIST_CALLED(ubchist);
177
178 UVMHIST_LOG(ubchist, "vp %#jx off 0x%jx", (uintptr_t)vp, (int)offset,
179 0, 0);
180
181 if (vp->v_type == VREG && (access_type & VM_PROT_WRITE) == 0
182 && (flags & PGO_LOCKED) == 0) {
183 vn_ra_allocctx(vp);
184 uvm_ra_request(vp->v_ractx, advice, uobj, offset,
185 *npagesp << PAGE_SHIFT);
186 }
187
188 error = VOP_GETPAGES(vp, offset, pps, npagesp, centeridx,
189 access_type, advice, flags);
190
191 KASSERT(((flags & PGO_LOCKED) != 0 && mutex_owned(vp->v_interlock)) ||
192 (flags & PGO_LOCKED) == 0);
193 return error;
194 }
195
196
197 /*
198 * uvn_findpages:
199 * return the page for the uobj and offset requested, allocating if needed.
200 * => uobj must be locked.
201 * => returned pages will be BUSY.
202 */
203
204 int
205 uvn_findpages(struct uvm_object *uobj, voff_t offset, unsigned int *npagesp,
206 struct vm_page **pgs, struct uvm_page_array *a, unsigned int flags)
207 {
208 unsigned int count, found, npages;
209 int i, rv;
210 struct uvm_page_array a_store;
211
212 if (a == NULL) {
213 a = &a_store;
214 uvm_page_array_init(a);
215 }
216 count = found = 0;
217 npages = *npagesp;
218 if (flags & UFP_BACKWARD) {
219 for (i = npages - 1; i >= 0; i--, offset -= PAGE_SIZE) {
220 rv = uvn_findpage(uobj, offset, &pgs[i], flags, a,
221 i + 1);
222 if (rv == 0) {
223 if (flags & UFP_DIRTYONLY)
224 break;
225 } else
226 found++;
227 count++;
228 }
229 } else {
230 for (i = 0; i < npages; i++, offset += PAGE_SIZE) {
231 rv = uvn_findpage(uobj, offset, &pgs[i], flags, a,
232 npages - i);
233 if (rv == 0) {
234 if (flags & UFP_DIRTYONLY)
235 break;
236 } else
237 found++;
238 count++;
239 }
240 }
241 if (a == &a_store) {
242 uvm_page_array_fini(a);
243 }
244 *npagesp = count;
245 return (found);
246 }
247
248 /*
249 * uvn_findpage: find a single page
250 *
251 * if a suitable page was found, put it in *pgp and return 1.
252 * otherwise return 0.
253 */
254
255 static int
256 uvn_findpage(struct uvm_object *uobj, voff_t offset, struct vm_page **pgp,
257 unsigned int flags, struct uvm_page_array *a, unsigned int nleft)
258 {
259 struct vm_page *pg;
260 const unsigned int fillflags =
261 ((flags & UFP_BACKWARD) ? UVM_PAGE_ARRAY_FILL_BACKWARD : 0) |
262 ((flags & UFP_DIRTYONLY) ?
263 (UVM_PAGE_ARRAY_FILL_DIRTY|UVM_PAGE_ARRAY_FILL_DENSE) : 0);
264 UVMHIST_FUNC("uvn_findpage"); UVMHIST_CALLED(ubchist);
265 UVMHIST_LOG(ubchist, "vp %#jx off 0x%jx", (uintptr_t)uobj, offset,
266 0, 0);
267
268 KASSERT(mutex_owned(uobj->vmobjlock));
269
270 if (*pgp != NULL) {
271 UVMHIST_LOG(ubchist, "dontcare", 0,0,0,0);
272 goto skip_offset;
273 }
274 for (;;) {
275 /*
276 * look for an existing page.
277 *
278 * XXX fragile API
279 * note that the array can be the one supplied by the caller of
280 * uvn_findpages. in that case, fillflags used by the caller
281 * might not match strictly with ours.
282 * in particular, the caller might have filled the array
283 * without DENSE but passed us UFP_DIRTYONLY (thus DENSE).
284 */
285 pg = uvm_page_array_fill_and_peek(a, uobj, offset, nleft,
286 fillflags);
287 if (pg != NULL && pg->offset != offset) {
288 KASSERT(
289 ((fillflags & UVM_PAGE_ARRAY_FILL_BACKWARD) != 0)
290 == (pg->offset < offset));
291 KASSERT(uvm_pagelookup(uobj, offset) == NULL
292 || ((fillflags & UVM_PAGE_ARRAY_FILL_DIRTY) != 0 &&
293 radix_tree_get_tag(&uobj->uo_pages,
294 offset >> PAGE_SHIFT, UVM_PAGE_DIRTY_TAG) == 0));
295 pg = NULL;
296 if ((fillflags & UVM_PAGE_ARRAY_FILL_DENSE) != 0) {
297 UVMHIST_LOG(ubchist, "dense", 0,0,0,0);
298 return 0;
299 }
300 }
301
302 /* nope? allocate one now */
303 if (pg == NULL) {
304 if (flags & UFP_NOALLOC) {
305 UVMHIST_LOG(ubchist, "noalloc", 0,0,0,0);
306 return 0;
307 }
308 pg = uvm_pagealloc(uobj, offset, NULL,
309 UVM_FLAG_COLORMATCH);
310 if (pg == NULL) {
311 if (flags & UFP_NOWAIT) {
312 UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
313 return 0;
314 }
315 mutex_exit(uobj->vmobjlock);
316 uvm_wait("uvn_fp1");
317 uvm_page_array_clear(a);
318 mutex_enter(uobj->vmobjlock);
319 continue;
320 }
321 UVMHIST_LOG(ubchist, "alloced %#jx (color %ju)",
322 (uintptr_t)pg, VM_PGCOLOR(pg), 0, 0);
323 KASSERTMSG(uvm_pagegetdirty(pg) ==
324 UVM_PAGE_STATUS_CLEAN, "page %p not clean", pg);
325 break;
326 } else if (flags & UFP_NOCACHE) {
327 UVMHIST_LOG(ubchist, "nocache",0,0,0,0);
328 goto skip;
329 }
330
331 /* page is there, see if we need to wait on it */
332 if ((pg->flags & PG_BUSY) != 0) {
333 if (flags & UFP_NOWAIT) {
334 UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
335 goto skip;
336 }
337 pg->flags |= PG_WANTED;
338 UVMHIST_LOG(ubchist, "wait %#jx (color %ju)",
339 (uintptr_t)pg, VM_PGCOLOR(pg), 0, 0);
340 UVM_UNLOCK_AND_WAIT(pg, uobj->vmobjlock, 0,
341 "uvn_fp2", 0);
342 uvm_page_array_clear(a);
343 mutex_enter(uobj->vmobjlock);
344 continue;
345 }
346
347 /* skip PG_RDONLY pages if requested */
348 if ((flags & UFP_NORDONLY) && (pg->flags & PG_RDONLY)) {
349 UVMHIST_LOG(ubchist, "nordonly",0,0,0,0);
350 goto skip;
351 }
352
353 /* stop on clean pages if requested */
354 if (flags & UFP_DIRTYONLY) {
355 const bool dirty = uvm_pagecheckdirty(pg, false);
356 if (!dirty) {
357 UVMHIST_LOG(ubchist, "dirtonly", 0,0,0,0);
358 return 0;
359 }
360 }
361
362 /* mark the page BUSY and we're done. */
363 pg->flags |= PG_BUSY;
364 UVM_PAGE_OWN(pg, "uvn_findpage");
365 UVMHIST_LOG(ubchist, "found %#jx (color %ju)",
366 (uintptr_t)pg, VM_PGCOLOR(pg), 0, 0);
367 uvm_page_array_advance(a);
368 break;
369 }
370 *pgp = pg;
371 return 1;
372
373 skip_offset:
374 /*
375 * skip this offset
376 */
377 pg = uvm_page_array_peek(a);
378 if (pg != NULL) {
379 if (pg->offset == offset) {
380 uvm_page_array_advance(a);
381 } else {
382 KASSERT((fillflags & UVM_PAGE_ARRAY_FILL_DENSE) == 0);
383 }
384 }
385 return 0;
386
387 skip:
388 /*
389 * skip this page
390 */
391 KASSERT(pg != NULL);
392 uvm_page_array_advance(a);
393 return 0;
394 }
395
396 /*
397 * uvm_vnp_setsize: grow or shrink a vnode uobj
398 *
399 * grow => just update size value
400 * shrink => toss un-needed pages
401 *
402 * => we assume that the caller has a reference of some sort to the
403 * vnode in question so that it will not be yanked out from under
404 * us.
405 */
406
407 void
408 uvm_vnp_setsize(struct vnode *vp, voff_t newsize)
409 {
410 struct uvm_object *uobj = &vp->v_uobj;
411 voff_t pgend = round_page(newsize);
412 voff_t oldsize;
413 UVMHIST_FUNC("uvm_vnp_setsize"); UVMHIST_CALLED(ubchist);
414
415 mutex_enter(uobj->vmobjlock);
416 UVMHIST_LOG(ubchist, "vp %#jx old 0x%jx new 0x%jx",
417 (uintptr_t)vp, vp->v_size, newsize, 0);
418
419 /*
420 * now check if the size has changed: if we shrink we had better
421 * toss some pages...
422 */
423
424 KASSERT(newsize != VSIZENOTSET && newsize >= 0);
425 KASSERT(vp->v_size <= vp->v_writesize);
426 KASSERT(vp->v_size == vp->v_writesize ||
427 newsize == vp->v_writesize || newsize <= vp->v_size);
428
429 oldsize = vp->v_writesize;
430
431 /*
432 * check whether size shrinks
433 * if old size hasn't been set, there are no pages to drop
434 * if there was an integer overflow in pgend, then this is no shrink
435 */
436 if (oldsize > pgend && oldsize != VSIZENOTSET && pgend >= 0) {
437 (void) uvn_put(uobj, pgend, 0, PGO_FREE | PGO_SYNCIO);
438 mutex_enter(uobj->vmobjlock);
439 }
440 vp->v_size = vp->v_writesize = newsize;
441 mutex_exit(uobj->vmobjlock);
442 }
443
444 void
445 uvm_vnp_setwritesize(struct vnode *vp, voff_t newsize)
446 {
447
448 mutex_enter(vp->v_interlock);
449 KASSERT(newsize != VSIZENOTSET && newsize >= 0);
450 KASSERT(vp->v_size != VSIZENOTSET);
451 KASSERT(vp->v_writesize != VSIZENOTSET);
452 KASSERT(vp->v_size <= vp->v_writesize);
453 KASSERT(vp->v_size <= newsize);
454 vp->v_writesize = newsize;
455 mutex_exit(vp->v_interlock);
456 }
457
458 bool
459 uvn_text_p(struct uvm_object *uobj)
460 {
461 struct vnode *vp = (struct vnode *)uobj;
462
463 return (vp->v_iflag & VI_EXECMAP) != 0;
464 }
465
466 bool
467 uvn_clean_p(struct uvm_object *uobj)
468 {
469 struct vnode *vp = (struct vnode *)uobj;
470
471 return (vp->v_iflag & VI_ONWORKLST) == 0;
472 }
473
474 bool
475 uvn_needs_writefault_p(struct uvm_object *uobj)
476 {
477 struct vnode *vp = (struct vnode *)uobj;
478
479 return uvn_clean_p(uobj) ||
480 (vp->v_iflag & (VI_WRMAP|VI_WRMAPDIRTY)) == VI_WRMAP;
481 }
482