uvm_vnode.c revision 1.113 1 /* $NetBSD: uvm_vnode.c,v 1.113 2020/05/19 22:22:15 ad Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993
6 * The Regents of the University of California.
7 * Copyright (c) 1990 University of Utah.
8 *
9 * All rights reserved.
10 *
11 * This code is derived from software contributed to Berkeley by
12 * the Systems Programming Group of the University of Utah Computer
13 * Science Department.
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 * 1. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 * notice, this list of conditions and the following disclaimer in the
22 * documentation and/or other materials provided with the distribution.
23 * 3. Neither the name of the University nor the names of its contributors
24 * may be used to endorse or promote products derived from this software
25 * without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 * @(#)vnode_pager.c 8.8 (Berkeley) 2/13/94
40 * from: Id: uvm_vnode.c,v 1.1.2.26 1998/02/02 20:38:07 chuck Exp
41 */
42
43 /*
44 * uvm_vnode.c: the vnode pager.
45 */
46
47 #include <sys/cdefs.h>
48 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.113 2020/05/19 22:22:15 ad Exp $");
49
50 #ifdef _KERNEL_OPT
51 #include "opt_uvmhist.h"
52 #endif
53
54 #include <sys/atomic.h>
55 #include <sys/param.h>
56 #include <sys/systm.h>
57 #include <sys/kernel.h>
58 #include <sys/vnode.h>
59 #include <sys/disklabel.h>
60 #include <sys/ioctl.h>
61 #include <sys/fcntl.h>
62 #include <sys/conf.h>
63 #include <sys/pool.h>
64 #include <sys/mount.h>
65
66 #include <miscfs/specfs/specdev.h>
67
68 #include <uvm/uvm.h>
69 #include <uvm/uvm_readahead.h>
70 #include <uvm/uvm_page_array.h>
71
72 #ifdef UVMHIST
73 UVMHIST_DEFINE(ubchist);
74 #endif
75
76 /*
77 * functions
78 */
79
80 static void uvn_alloc_ractx(struct uvm_object *);
81 static void uvn_detach(struct uvm_object *);
82 static int uvn_get(struct uvm_object *, voff_t, struct vm_page **, int *,
83 int, vm_prot_t, int, int);
84 static void uvn_markdirty(struct uvm_object *);
85 static int uvn_put(struct uvm_object *, voff_t, voff_t, int);
86 static void uvn_reference(struct uvm_object *);
87
88 static int uvn_findpage(struct uvm_object *, voff_t, struct vm_page **,
89 unsigned int, struct uvm_page_array *a,
90 unsigned int);
91
92 /*
93 * master pager structure
94 */
95
96 const struct uvm_pagerops uvm_vnodeops = {
97 .pgo_reference = uvn_reference,
98 .pgo_detach = uvn_detach,
99 .pgo_get = uvn_get,
100 .pgo_put = uvn_put,
101 .pgo_markdirty = uvn_markdirty,
102 };
103
104 /*
105 * the ops!
106 */
107
108 /*
109 * uvn_reference
110 *
111 * duplicate a reference to a VM object. Note that the reference
112 * count must already be at least one (the passed in reference) so
113 * there is no chance of the uvn being killed or locked out here.
114 *
115 * => caller must call with object unlocked.
116 * => caller must be using the same accessprot as was used at attach time
117 */
118
119 static void
120 uvn_reference(struct uvm_object *uobj)
121 {
122 vref((struct vnode *)uobj);
123 }
124
125
126 /*
127 * uvn_detach
128 *
129 * remove a reference to a VM object.
130 *
131 * => caller must call with object unlocked and map locked.
132 */
133
134 static void
135 uvn_detach(struct uvm_object *uobj)
136 {
137 vrele((struct vnode *)uobj);
138 }
139
140 /*
141 * uvn_put: flush page data to backing store.
142 *
143 * => object must be locked on entry! VOP_PUTPAGES must unlock it.
144 * => flags: PGO_SYNCIO -- use sync. I/O
145 */
146
147 static int
148 uvn_put(struct uvm_object *uobj, voff_t offlo, voff_t offhi, int flags)
149 {
150 struct vnode *vp = (struct vnode *)uobj;
151 int error;
152
153 KASSERT(rw_write_held(uobj->vmobjlock));
154 error = VOP_PUTPAGES(vp, offlo, offhi, flags);
155
156 return error;
157 }
158
159 /*
160 * uvn_get: get pages (synchronously) from backing store
161 *
162 * => prefer map unlocked (not required)
163 * => object must be locked! we will _unlock_ it before starting any I/O.
164 * => flags: PGO_LOCKED: fault data structures are locked
165 * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
166 * => NOTE: caller must check for released pages!!
167 */
168
169 static int
170 uvn_get(struct uvm_object *uobj, voff_t offset,
171 struct vm_page **pps /* IN/OUT */,
172 int *npagesp /* IN (OUT if PGO_LOCKED)*/,
173 int centeridx, vm_prot_t access_type, int advice, int flags)
174 {
175 struct vnode *vp = (struct vnode *)uobj;
176 int error;
177
178 UVMHIST_FUNC("uvn_get"); UVMHIST_CALLED(ubchist);
179
180 UVMHIST_LOG(ubchist, "vp %#jx off 0x%jx", (uintptr_t)vp, (int)offset,
181 0, 0);
182
183 if (vp->v_type == VREG && (access_type & VM_PROT_WRITE) == 0
184 && (flags & PGO_LOCKED) == 0 && vp->v_tag != VT_TMPFS) {
185 uvn_alloc_ractx(uobj);
186 uvm_ra_request(vp->v_ractx, advice, uobj, offset,
187 *npagesp << PAGE_SHIFT);
188 }
189
190 error = VOP_GETPAGES(vp, offset, pps, npagesp, centeridx,
191 access_type, advice, flags);
192
193 KASSERT(((flags & PGO_LOCKED) != 0 && rw_lock_held(uobj->vmobjlock)) ||
194 (flags & PGO_LOCKED) == 0);
195 return error;
196 }
197
198 /*
199 * uvn_markdirty: called when the object gains first dirty page
200 *
201 * => uobj must be write locked.
202 */
203
204 static void
205 uvn_markdirty(struct uvm_object *uobj)
206 {
207 struct vnode *vp = (struct vnode *)uobj;
208
209 KASSERT(rw_write_held(uobj->vmobjlock));
210
211 mutex_enter(vp->v_interlock);
212 if ((vp->v_iflag & VI_ONWORKLST) == 0) {
213 vn_syncer_add_to_worklist(vp, filedelay);
214 }
215 mutex_exit(vp->v_interlock);
216 }
217
218 /*
219 * uvn_findpages:
220 * return the page for the uobj and offset requested, allocating if needed.
221 * => uobj must be locked.
222 * => returned pages will be BUSY.
223 */
224
225 int
226 uvn_findpages(struct uvm_object *uobj, voff_t offset, unsigned int *npagesp,
227 struct vm_page **pgs, struct uvm_page_array *a, unsigned int flags)
228 {
229 unsigned int count, found, npages;
230 int i, rv;
231 struct uvm_page_array a_store;
232
233 if (a == NULL) {
234 a = &a_store;
235 uvm_page_array_init(a);
236 }
237 count = found = 0;
238 npages = *npagesp;
239 if (flags & UFP_BACKWARD) {
240 for (i = npages - 1; i >= 0; i--, offset -= PAGE_SIZE) {
241 rv = uvn_findpage(uobj, offset, &pgs[i], flags, a,
242 i + 1);
243 if (rv == 0) {
244 if (flags & UFP_DIRTYONLY)
245 break;
246 } else
247 found++;
248 count++;
249 }
250 } else {
251 for (i = 0; i < npages; i++, offset += PAGE_SIZE) {
252 rv = uvn_findpage(uobj, offset, &pgs[i], flags, a,
253 npages - i);
254 if (rv == 0) {
255 if (flags & UFP_DIRTYONLY)
256 break;
257 } else
258 found++;
259 count++;
260 }
261 }
262 if (a == &a_store) {
263 uvm_page_array_fini(a);
264 }
265 *npagesp = count;
266 return (found);
267 }
268
269 /*
270 * uvn_findpage: find a single page
271 *
272 * if a suitable page was found, put it in *pgp and return 1.
273 * otherwise return 0.
274 */
275
276 static int
277 uvn_findpage(struct uvm_object *uobj, voff_t offset, struct vm_page **pgp,
278 unsigned int flags, struct uvm_page_array *a, unsigned int nleft)
279 {
280 struct vm_page *pg;
281 const unsigned int fillflags =
282 ((flags & UFP_BACKWARD) ? UVM_PAGE_ARRAY_FILL_BACKWARD : 0) |
283 ((flags & UFP_DIRTYONLY) ?
284 (UVM_PAGE_ARRAY_FILL_DIRTY|UVM_PAGE_ARRAY_FILL_DENSE) : 0);
285 UVMHIST_FUNC("uvn_findpage"); UVMHIST_CALLED(ubchist);
286 UVMHIST_LOG(ubchist, "vp %#jx off 0x%jx", (uintptr_t)uobj, offset,
287 0, 0);
288
289 /*
290 * NOBUSY must come with NOWAIT and NOALLOC. if NOBUSY is
291 * specified, this may be called with a reader lock.
292 */
293
294 KASSERT(rw_lock_held(uobj->vmobjlock));
295 KASSERT((flags & UFP_NOBUSY) == 0 || (flags & UFP_NOWAIT) != 0);
296 KASSERT((flags & UFP_NOBUSY) == 0 || (flags & UFP_NOALLOC) != 0);
297 KASSERT((flags & UFP_NOBUSY) != 0 || rw_write_held(uobj->vmobjlock));
298
299 if (*pgp != NULL) {
300 UVMHIST_LOG(ubchist, "dontcare", 0,0,0,0);
301 goto skip_offset;
302 }
303 for (;;) {
304 /*
305 * look for an existing page.
306 *
307 * XXX fragile API
308 * note that the array can be the one supplied by the caller of
309 * uvn_findpages. in that case, fillflags used by the caller
310 * might not match strictly with ours.
311 * in particular, the caller might have filled the array
312 * without DENSE but passed us UFP_DIRTYONLY (thus DENSE).
313 */
314 pg = uvm_page_array_fill_and_peek(a, uobj, offset, nleft,
315 fillflags);
316 if (pg != NULL && pg->offset != offset) {
317 KASSERT(
318 ((fillflags & UVM_PAGE_ARRAY_FILL_BACKWARD) != 0)
319 == (pg->offset < offset));
320 KASSERT(uvm_pagelookup(uobj, offset) == NULL
321 || ((fillflags & UVM_PAGE_ARRAY_FILL_DIRTY) != 0 &&
322 radix_tree_get_tag(&uobj->uo_pages,
323 offset >> PAGE_SHIFT, UVM_PAGE_DIRTY_TAG) == 0));
324 pg = NULL;
325 if ((fillflags & UVM_PAGE_ARRAY_FILL_DENSE) != 0) {
326 UVMHIST_LOG(ubchist, "dense", 0,0,0,0);
327 return 0;
328 }
329 }
330
331 /* nope? allocate one now */
332 if (pg == NULL) {
333 if (flags & UFP_NOALLOC) {
334 UVMHIST_LOG(ubchist, "noalloc", 0,0,0,0);
335 return 0;
336 }
337 pg = uvm_pagealloc(uobj, offset, NULL,
338 UVM_FLAG_COLORMATCH);
339 if (pg == NULL) {
340 if (flags & UFP_NOWAIT) {
341 UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
342 return 0;
343 }
344 rw_exit(uobj->vmobjlock);
345 uvm_wait("uvnfp1");
346 uvm_page_array_clear(a);
347 rw_enter(uobj->vmobjlock, RW_WRITER);
348 continue;
349 }
350 UVMHIST_LOG(ubchist, "alloced %#jx (color %ju)",
351 (uintptr_t)pg, VM_PGCOLOR(pg), 0, 0);
352 KASSERTMSG(uvm_pagegetdirty(pg) ==
353 UVM_PAGE_STATUS_CLEAN, "page %p not clean", pg);
354 break;
355 } else if (flags & UFP_NOCACHE) {
356 UVMHIST_LOG(ubchist, "nocache",0,0,0,0);
357 goto skip;
358 }
359
360 /* page is there, see if we need to wait on it */
361 if ((pg->flags & PG_BUSY) != 0) {
362 if (flags & UFP_NOWAIT) {
363 UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
364 goto skip;
365 }
366 UVMHIST_LOG(ubchist, "wait %#jx (color %ju)",
367 (uintptr_t)pg, VM_PGCOLOR(pg), 0, 0);
368 uvm_pagewait(pg, uobj->vmobjlock, "uvnfp2");
369 uvm_page_array_clear(a);
370 rw_enter(uobj->vmobjlock, RW_WRITER);
371 continue;
372 }
373
374 /* skip PG_RDONLY pages if requested */
375 if ((flags & UFP_NORDONLY) && (pg->flags & PG_RDONLY)) {
376 UVMHIST_LOG(ubchist, "nordonly",0,0,0,0);
377 goto skip;
378 }
379
380 /* stop on clean pages if requested */
381 if (flags & UFP_DIRTYONLY) {
382 const bool dirty = uvm_pagecheckdirty(pg, false);
383 if (!dirty) {
384 UVMHIST_LOG(ubchist, "dirtonly", 0,0,0,0);
385 return 0;
386 }
387 }
388
389 /* mark the page BUSY and we're done. */
390 if ((flags & UFP_NOBUSY) == 0) {
391 pg->flags |= PG_BUSY;
392 UVM_PAGE_OWN(pg, "uvn_findpage");
393 }
394 UVMHIST_LOG(ubchist, "found %#jx (color %ju)",
395 (uintptr_t)pg, VM_PGCOLOR(pg), 0, 0);
396 uvm_page_array_advance(a);
397 break;
398 }
399 *pgp = pg;
400 return 1;
401
402 skip_offset:
403 /*
404 * skip this offset
405 */
406 pg = uvm_page_array_peek(a);
407 if (pg != NULL) {
408 if (pg->offset == offset) {
409 uvm_page_array_advance(a);
410 } else {
411 KASSERT((fillflags & UVM_PAGE_ARRAY_FILL_DENSE) == 0);
412 }
413 }
414 return 0;
415
416 skip:
417 /*
418 * skip this page
419 */
420 KASSERT(pg != NULL);
421 uvm_page_array_advance(a);
422 return 0;
423 }
424
425 /*
426 * uvm_vnp_setsize: grow or shrink a vnode uobj
427 *
428 * grow => just update size value
429 * shrink => toss un-needed pages
430 *
431 * => we assume that the caller has a reference of some sort to the
432 * vnode in question so that it will not be yanked out from under
433 * us.
434 */
435
436 void
437 uvm_vnp_setsize(struct vnode *vp, voff_t newsize)
438 {
439 struct uvm_object *uobj = &vp->v_uobj;
440 voff_t pgend = round_page(newsize);
441 voff_t oldsize;
442 UVMHIST_FUNC("uvm_vnp_setsize"); UVMHIST_CALLED(ubchist);
443
444 rw_enter(uobj->vmobjlock, RW_WRITER);
445 UVMHIST_LOG(ubchist, "vp %#jx old 0x%jx new 0x%jx",
446 (uintptr_t)vp, vp->v_size, newsize, 0);
447
448 /*
449 * now check if the size has changed: if we shrink we had better
450 * toss some pages...
451 */
452
453 KASSERT(newsize != VSIZENOTSET && newsize >= 0);
454 KASSERT(vp->v_size <= vp->v_writesize);
455 KASSERT(vp->v_size == vp->v_writesize ||
456 newsize == vp->v_writesize || newsize <= vp->v_size);
457
458 oldsize = vp->v_writesize;
459
460 /*
461 * check whether size shrinks
462 * if old size hasn't been set, there are no pages to drop
463 * if there was an integer overflow in pgend, then this is no shrink
464 */
465 if (oldsize > pgend && oldsize != VSIZENOTSET && pgend >= 0) {
466 (void) uvn_put(uobj, pgend, 0, PGO_FREE | PGO_SYNCIO);
467 rw_enter(uobj->vmobjlock, RW_WRITER);
468 }
469 mutex_enter(vp->v_interlock);
470 vp->v_size = vp->v_writesize = newsize;
471 mutex_exit(vp->v_interlock);
472 rw_exit(uobj->vmobjlock);
473 }
474
475 void
476 uvm_vnp_setwritesize(struct vnode *vp, voff_t newsize)
477 {
478
479 rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
480 KASSERT(newsize != VSIZENOTSET && newsize >= 0);
481 KASSERT(vp->v_size != VSIZENOTSET);
482 KASSERT(vp->v_writesize != VSIZENOTSET);
483 KASSERT(vp->v_size <= vp->v_writesize);
484 KASSERT(vp->v_size <= newsize);
485 mutex_enter(vp->v_interlock);
486 vp->v_writesize = newsize;
487 mutex_exit(vp->v_interlock);
488 rw_exit(vp->v_uobj.vmobjlock);
489 }
490
491 bool
492 uvn_text_p(struct uvm_object *uobj)
493 {
494 struct vnode *vp = (struct vnode *)uobj;
495 int iflag;
496
497 /*
498 * v_interlock is not held here, but VI_EXECMAP is only ever changed
499 * with the vmobjlock held too.
500 */
501 iflag = atomic_load_relaxed(&vp->v_iflag);
502 return (iflag & VI_EXECMAP) != 0;
503 }
504
505 bool
506 uvn_clean_p(struct uvm_object *uobj)
507 {
508
509 return radix_tree_empty_tagged_tree_p(&uobj->uo_pages,
510 UVM_PAGE_DIRTY_TAG);
511 }
512
513 static void
514 uvn_alloc_ractx(struct uvm_object *uobj)
515 {
516 struct vnode *vp = (struct vnode *)uobj;
517 struct uvm_ractx *ra = NULL;
518
519 KASSERT(rw_write_held(uobj->vmobjlock));
520
521 if (vp->v_type != VREG) {
522 return;
523 }
524 if (vp->v_ractx != NULL) {
525 return;
526 }
527 if (vp->v_ractx == NULL) {
528 rw_exit(uobj->vmobjlock);
529 ra = uvm_ra_allocctx();
530 rw_enter(uobj->vmobjlock, RW_WRITER);
531 if (ra != NULL && vp->v_ractx == NULL) {
532 vp->v_ractx = ra;
533 ra = NULL;
534 }
535 }
536 if (ra != NULL) {
537 uvm_ra_freectx(ra);
538 }
539 }
540