uvm_vnode.c revision 1.97.2.1 1 /* $NetBSD: uvm_vnode.c,v 1.97.2.1 2011/11/02 21:54:01 yamt Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993
6 * The Regents of the University of California.
7 * Copyright (c) 1990 University of Utah.
8 *
9 * All rights reserved.
10 *
11 * This code is derived from software contributed to Berkeley by
12 * the Systems Programming Group of the University of Utah Computer
13 * Science Department.
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 * 1. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 * notice, this list of conditions and the following disclaimer in the
22 * documentation and/or other materials provided with the distribution.
23 * 3. Neither the name of the University nor the names of its contributors
24 * may be used to endorse or promote products derived from this software
25 * without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 * @(#)vnode_pager.c 8.8 (Berkeley) 2/13/94
40 * from: Id: uvm_vnode.c,v 1.1.2.26 1998/02/02 20:38:07 chuck Exp
41 */
42
43 /*
44 * uvm_vnode.c: the vnode pager.
45 */
46
47 #include <sys/cdefs.h>
48 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.97.2.1 2011/11/02 21:54:01 yamt Exp $");
49
50 #include "opt_uvmhist.h"
51
52 #include <sys/param.h>
53 #include <sys/systm.h>
54 #include <sys/kernel.h>
55 #include <sys/vnode.h>
56 #include <sys/disklabel.h>
57 #include <sys/ioctl.h>
58 #include <sys/fcntl.h>
59 #include <sys/conf.h>
60 #include <sys/pool.h>
61 #include <sys/mount.h>
62
63 #include <miscfs/specfs/specdev.h>
64
65 #include <uvm/uvm.h>
66 #include <uvm/uvm_readahead.h>
67
68 /*
69 * functions
70 */
71
72 static void uvn_detach(struct uvm_object *);
73 static int uvn_get(struct uvm_object *, voff_t, struct vm_page **, int *,
74 int, vm_prot_t, int, int);
75 static int uvn_put(struct uvm_object *, voff_t, voff_t, int);
76 static void uvn_reference(struct uvm_object *);
77
78 static int uvn_findpage(struct uvm_object *, voff_t, struct vm_page **,
79 int);
80
81 /*
82 * master pager structure
83 */
84
85 const struct uvm_pagerops uvm_vnodeops = {
86 .pgo_reference = uvn_reference,
87 .pgo_detach = uvn_detach,
88 .pgo_get = uvn_get,
89 .pgo_put = uvn_put,
90 };
91
92 /*
93 * the ops!
94 */
95
96 /*
97 * uvn_reference
98 *
99 * duplicate a reference to a VM object. Note that the reference
100 * count must already be at least one (the passed in reference) so
101 * there is no chance of the uvn being killed or locked out here.
102 *
103 * => caller must call with object unlocked.
104 * => caller must be using the same accessprot as was used at attach time
105 */
106
107 static void
108 uvn_reference(struct uvm_object *uobj)
109 {
110 vref((struct vnode *)uobj);
111 }
112
113
114 /*
115 * uvn_detach
116 *
117 * remove a reference to a VM object.
118 *
119 * => caller must call with object unlocked and map locked.
120 */
121
122 static void
123 uvn_detach(struct uvm_object *uobj)
124 {
125 vrele((struct vnode *)uobj);
126 }
127
128 /*
129 * uvn_put: flush page data to backing store.
130 *
131 * => object must be locked on entry! VOP_PUTPAGES must unlock it.
132 * => flags: PGO_SYNCIO -- use sync. I/O
133 */
134
135 static int
136 uvn_put(struct uvm_object *uobj, voff_t offlo, voff_t offhi, int flags)
137 {
138 struct vnode *vp = (struct vnode *)uobj;
139 int error;
140
141 KASSERT(mutex_owned(vp->v_interlock));
142 error = VOP_PUTPAGES(vp, offlo, offhi, flags);
143
144 return error;
145 }
146
147
148 /*
149 * uvn_get: get pages (synchronously) from backing store
150 *
151 * => prefer map unlocked (not required)
152 * => object must be locked! we will _unlock_ it before starting any I/O.
153 * => flags: PGO_ALLPAGES: get all of the pages
154 * PGO_LOCKED: fault data structures are locked
155 * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
156 * => NOTE: caller must check for released pages!!
157 */
158
159 static int
160 uvn_get(struct uvm_object *uobj, voff_t offset,
161 struct vm_page **pps /* IN/OUT */,
162 int *npagesp /* IN (OUT if PGO_LOCKED)*/,
163 int centeridx, vm_prot_t access_type, int advice, int flags)
164 {
165 struct vnode *vp = (struct vnode *)uobj;
166 int error;
167
168 UVMHIST_FUNC("uvn_get"); UVMHIST_CALLED(ubchist);
169
170 UVMHIST_LOG(ubchist, "vp %p off 0x%x", vp, (int)offset, 0,0);
171
172 if ((access_type & VM_PROT_WRITE) == 0 && (flags & PGO_LOCKED) == 0) {
173 vn_ra_allocctx(vp);
174 uvm_ra_request(vp->v_ractx, advice, uobj, offset,
175 *npagesp << PAGE_SHIFT);
176 }
177
178 error = VOP_GETPAGES(vp, offset, pps, npagesp, centeridx,
179 access_type, advice, flags);
180
181 KASSERT(((flags & PGO_LOCKED) != 0 && mutex_owned(vp->v_interlock)) ||
182 (flags & PGO_LOCKED) == 0);
183 return error;
184 }
185
186
187 /*
188 * uvn_findpages:
189 * return the page for the uobj and offset requested, allocating if needed.
190 * => uobj must be locked.
191 * => returned pages will be BUSY.
192 */
193
194 int
195 uvn_findpages(struct uvm_object *uobj, voff_t offset, int *npagesp,
196 struct vm_page **pgs, int flags)
197 {
198 int i, count, found, npages, rv;
199
200 count = found = 0;
201 npages = *npagesp;
202 if (flags & UFP_BACKWARD) {
203 for (i = npages - 1; i >= 0; i--, offset -= PAGE_SIZE) {
204 rv = uvn_findpage(uobj, offset, &pgs[i], flags);
205 if (rv == 0) {
206 if (flags & UFP_DIRTYONLY)
207 break;
208 } else
209 found++;
210 count++;
211 }
212 } else {
213 for (i = 0; i < npages; i++, offset += PAGE_SIZE) {
214 rv = uvn_findpage(uobj, offset, &pgs[i], flags);
215 if (rv == 0) {
216 if (flags & UFP_DIRTYONLY)
217 break;
218 } else
219 found++;
220 count++;
221 }
222 }
223 *npagesp = count;
224 return (found);
225 }
226
227 static int
228 uvn_findpage(struct uvm_object *uobj, voff_t offset, struct vm_page **pgp,
229 int flags)
230 {
231 struct vm_page *pg;
232 bool dirty;
233 UVMHIST_FUNC("uvn_findpage"); UVMHIST_CALLED(ubchist);
234 UVMHIST_LOG(ubchist, "vp %p off 0x%lx", uobj, offset,0,0);
235
236 KASSERT(mutex_owned(uobj->vmobjlock));
237
238 if (*pgp != NULL) {
239 UVMHIST_LOG(ubchist, "dontcare", 0,0,0,0);
240 return 0;
241 }
242 for (;;) {
243 /* look for an existing page */
244 pg = uvm_pagelookup(uobj, offset);
245
246 /* nope? allocate one now */
247 if (pg == NULL) {
248 if (flags & UFP_NOALLOC) {
249 UVMHIST_LOG(ubchist, "noalloc", 0,0,0,0);
250 return 0;
251 }
252 pg = uvm_pagealloc(uobj, offset, NULL,
253 UVM_FLAG_COLORMATCH);
254 if (pg == NULL) {
255 if (flags & UFP_NOWAIT) {
256 UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
257 return 0;
258 }
259 mutex_exit(uobj->vmobjlock);
260 uvm_wait("uvn_fp1");
261 mutex_enter(uobj->vmobjlock);
262 continue;
263 }
264 UVMHIST_LOG(ubchist, "alloced %p (color %u)", pg,
265 VM_PGCOLOR_BUCKET(pg), 0,0);
266 break;
267 } else if (flags & UFP_NOCACHE) {
268 UVMHIST_LOG(ubchist, "nocache",0,0,0,0);
269 return 0;
270 }
271
272 /* page is there, see if we need to wait on it */
273 if ((pg->flags & PG_BUSY) != 0) {
274 if (flags & UFP_NOWAIT) {
275 UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
276 return 0;
277 }
278 pg->flags |= PG_WANTED;
279 UVMHIST_LOG(ubchist, "wait %p (color %u)", pg,
280 VM_PGCOLOR_BUCKET(pg), 0,0);
281 UVM_UNLOCK_AND_WAIT(pg, uobj->vmobjlock, 0,
282 "uvn_fp2", 0);
283 mutex_enter(uobj->vmobjlock);
284 continue;
285 }
286
287 /* skip PG_RDONLY and PG_HOLE pages if requested */
288 if ((flags & UFP_NORDONLY) &&
289 (pg->flags & (PG_RDONLY|PG_HOLE))) {
290 UVMHIST_LOG(ubchist, "nordonly",0,0,0,0);
291 return 0;
292 }
293
294 /* stop on clean pages if requested */
295 if (flags & UFP_DIRTYONLY) {
296 dirty = uvm_pagecheckdirty(pg, false);
297 if (!dirty) {
298 UVMHIST_LOG(ubchist, "dirtonly", 0,0,0,0);
299 return 0;
300 }
301 }
302
303 /* mark the page BUSY and we're done. */
304 pg->flags |= PG_BUSY;
305 UVM_PAGE_OWN(pg, "uvn_findpage");
306 UVMHIST_LOG(ubchist, "found %p (color %u)",
307 pg, VM_PGCOLOR_BUCKET(pg), 0,0);
308 break;
309 }
310 *pgp = pg;
311 return 1;
312 }
313
314 /*
315 * uvm_vnp_setsize: grow or shrink a vnode uobj
316 *
317 * grow => just update size value
318 * shrink => toss un-needed pages
319 *
320 * => we assume that the caller has a reference of some sort to the
321 * vnode in question so that it will not be yanked out from under
322 * us.
323 */
324
325 void
326 uvm_vnp_setsize(struct vnode *vp, voff_t newsize)
327 {
328 struct uvm_object *uobj = &vp->v_uobj;
329 voff_t pgend = round_page(newsize);
330 voff_t oldsize;
331 UVMHIST_FUNC("uvm_vnp_setsize"); UVMHIST_CALLED(ubchist);
332
333 mutex_enter(uobj->vmobjlock);
334 UVMHIST_LOG(ubchist, "vp %p old 0x%x new 0x%x",
335 vp, vp->v_size, newsize, 0);
336
337 /*
338 * now check if the size has changed: if we shrink we had better
339 * toss some pages...
340 */
341
342 KASSERT(newsize != VSIZENOTSET);
343 KASSERT(vp->v_size <= vp->v_writesize);
344 KASSERT(vp->v_size == vp->v_writesize ||
345 newsize == vp->v_writesize || newsize <= vp->v_size);
346
347 oldsize = vp->v_writesize;
348 KASSERT(oldsize != VSIZENOTSET || pgend > oldsize);
349
350 if (oldsize > pgend) {
351 (void) uvn_put(uobj, pgend, 0, PGO_FREE | PGO_SYNCIO);
352 mutex_enter(uobj->vmobjlock);
353 }
354 vp->v_size = vp->v_writesize = newsize;
355 mutex_exit(uobj->vmobjlock);
356 }
357
358 void
359 uvm_vnp_setwritesize(struct vnode *vp, voff_t newsize)
360 {
361
362 mutex_enter(vp->v_interlock);
363 KASSERT(newsize != VSIZENOTSET);
364 KASSERT(vp->v_size != VSIZENOTSET);
365 KASSERT(vp->v_writesize != VSIZENOTSET);
366 KASSERT(vp->v_size <= vp->v_writesize);
367 KASSERT(vp->v_size <= newsize);
368 vp->v_writesize = newsize;
369 mutex_exit(vp->v_interlock);
370 }
371
372 bool
373 uvn_text_p(struct uvm_object *uobj)
374 {
375 struct vnode *vp = (struct vnode *)uobj;
376
377 return (vp->v_iflag & VI_EXECMAP) != 0;
378 }
379
380 bool
381 uvn_clean_p(struct uvm_object *uobj)
382 {
383 struct vnode *vp = (struct vnode *)uobj;
384
385 return (vp->v_iflag & VI_ONWORKLST) == 0;
386 }
387
388 bool
389 uvn_needs_writefault_p(struct uvm_object *uobj)
390 {
391 struct vnode *vp = (struct vnode *)uobj;
392
393 return uvn_clean_p(uobj) ||
394 (vp->v_iflag & (VI_WRMAP|VI_WRMAPDIRTY)) == VI_WRMAP;
395 }
396