uvm_vnode.c revision 1.85 1 1.85 pooka /* $NetBSD: uvm_vnode.c,v 1.85 2007/08/04 09:42:58 pooka Exp $ */
2 1.1 mrg
3 1.1 mrg /*
4 1.1 mrg * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 1.1 mrg * Copyright (c) 1991, 1993
6 1.49 chs * The Regents of the University of California.
7 1.1 mrg * Copyright (c) 1990 University of Utah.
8 1.1 mrg *
9 1.1 mrg * All rights reserved.
10 1.1 mrg *
11 1.1 mrg * This code is derived from software contributed to Berkeley by
12 1.1 mrg * the Systems Programming Group of the University of Utah Computer
13 1.1 mrg * Science Department.
14 1.1 mrg *
15 1.1 mrg * Redistribution and use in source and binary forms, with or without
16 1.1 mrg * modification, are permitted provided that the following conditions
17 1.1 mrg * are met:
18 1.1 mrg * 1. Redistributions of source code must retain the above copyright
19 1.1 mrg * notice, this list of conditions and the following disclaimer.
20 1.1 mrg * 2. Redistributions in binary form must reproduce the above copyright
21 1.1 mrg * notice, this list of conditions and the following disclaimer in the
22 1.1 mrg * documentation and/or other materials provided with the distribution.
23 1.1 mrg * 3. All advertising materials mentioning features or use of this software
24 1.1 mrg * must display the following acknowledgement:
25 1.1 mrg * This product includes software developed by Charles D. Cranor,
26 1.49 chs * Washington University, the University of California, Berkeley and
27 1.1 mrg * its contributors.
28 1.1 mrg * 4. Neither the name of the University nor the names of its contributors
29 1.1 mrg * may be used to endorse or promote products derived from this software
30 1.1 mrg * without specific prior written permission.
31 1.1 mrg *
32 1.1 mrg * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
33 1.1 mrg * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34 1.1 mrg * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35 1.1 mrg * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
36 1.1 mrg * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37 1.1 mrg * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38 1.1 mrg * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39 1.1 mrg * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40 1.1 mrg * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41 1.1 mrg * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42 1.1 mrg * SUCH DAMAGE.
43 1.1 mrg *
44 1.1 mrg * @(#)vnode_pager.c 8.8 (Berkeley) 2/13/94
45 1.3 mrg * from: Id: uvm_vnode.c,v 1.1.2.26 1998/02/02 20:38:07 chuck Exp
46 1.1 mrg */
47 1.1 mrg
48 1.55 lukem /*
49 1.55 lukem * uvm_vnode.c: the vnode pager.
50 1.55 lukem */
51 1.55 lukem
52 1.55 lukem #include <sys/cdefs.h>
53 1.85 pooka __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.85 2007/08/04 09:42:58 pooka Exp $");
54 1.55 lukem
55 1.6 thorpej #include "fs_nfs.h"
56 1.4 mrg #include "opt_uvmhist.h"
57 1.37 chs #include "opt_ddb.h"
58 1.1 mrg
59 1.1 mrg #include <sys/param.h>
60 1.1 mrg #include <sys/systm.h>
61 1.37 chs #include <sys/kernel.h>
62 1.1 mrg #include <sys/proc.h>
63 1.1 mrg #include <sys/malloc.h>
64 1.1 mrg #include <sys/vnode.h>
65 1.13 thorpej #include <sys/disklabel.h>
66 1.13 thorpej #include <sys/ioctl.h>
67 1.13 thorpej #include <sys/fcntl.h>
68 1.13 thorpej #include <sys/conf.h>
69 1.37 chs #include <sys/pool.h>
70 1.37 chs #include <sys/mount.h>
71 1.13 thorpej
72 1.13 thorpej #include <miscfs/specfs/specdev.h>
73 1.1 mrg
74 1.1 mrg #include <uvm/uvm.h>
75 1.68 yamt #include <uvm/uvm_readahead.h>
76 1.1 mrg
77 1.1 mrg /*
78 1.1 mrg * functions
79 1.1 mrg */
80 1.1 mrg
81 1.66 thorpej static void uvn_detach(struct uvm_object *);
82 1.66 thorpej static int uvn_get(struct uvm_object *, voff_t, struct vm_page **, int *,
83 1.66 thorpej int, vm_prot_t, int, int);
84 1.66 thorpej static int uvn_put(struct uvm_object *, voff_t, voff_t, int);
85 1.66 thorpej static void uvn_reference(struct uvm_object *);
86 1.52 chs
87 1.66 thorpej static int uvn_findpage(struct uvm_object *, voff_t, struct vm_page **,
88 1.66 thorpej int);
89 1.1 mrg
90 1.1 mrg /*
91 1.1 mrg * master pager structure
92 1.1 mrg */
93 1.1 mrg
94 1.1 mrg struct uvm_pagerops uvm_vnodeops = {
95 1.37 chs NULL,
96 1.8 mrg uvn_reference,
97 1.8 mrg uvn_detach,
98 1.37 chs NULL,
99 1.8 mrg uvn_get,
100 1.8 mrg uvn_put,
101 1.1 mrg };
102 1.1 mrg
103 1.1 mrg /*
104 1.1 mrg * the ops!
105 1.1 mrg */
106 1.1 mrg
107 1.1 mrg /*
108 1.1 mrg * uvn_reference
109 1.1 mrg *
110 1.1 mrg * duplicate a reference to a VM object. Note that the reference
111 1.49 chs * count must already be at least one (the passed in reference) so
112 1.1 mrg * there is no chance of the uvn being killed or locked out here.
113 1.1 mrg *
114 1.49 chs * => caller must call with object unlocked.
115 1.1 mrg * => caller must be using the same accessprot as was used at attach time
116 1.1 mrg */
117 1.1 mrg
118 1.66 thorpej static void
119 1.65 thorpej uvn_reference(struct uvm_object *uobj)
120 1.1 mrg {
121 1.37 chs VREF((struct vnode *)uobj);
122 1.1 mrg }
123 1.1 mrg
124 1.52 chs
125 1.1 mrg /*
126 1.1 mrg * uvn_detach
127 1.1 mrg *
128 1.1 mrg * remove a reference to a VM object.
129 1.1 mrg *
130 1.1 mrg * => caller must call with object unlocked and map locked.
131 1.1 mrg */
132 1.52 chs
133 1.66 thorpej static void
134 1.65 thorpej uvn_detach(struct uvm_object *uobj)
135 1.8 mrg {
136 1.37 chs vrele((struct vnode *)uobj);
137 1.1 mrg }
138 1.1 mrg
139 1.1 mrg /*
140 1.1 mrg * uvn_put: flush page data to backing store.
141 1.1 mrg *
142 1.53 sommerfe * => object must be locked on entry! VOP_PUTPAGES must unlock it.
143 1.1 mrg * => flags: PGO_SYNCIO -- use sync. I/O
144 1.1 mrg * => note: caller must set PG_CLEAN and pmap_clear_modify (if needed)
145 1.1 mrg */
146 1.1 mrg
147 1.66 thorpej static int
148 1.65 thorpej uvn_put(struct uvm_object *uobj, voff_t offlo, voff_t offhi, int flags)
149 1.1 mrg {
150 1.37 chs struct vnode *vp = (struct vnode *)uobj;
151 1.37 chs int error;
152 1.1 mrg
153 1.53 sommerfe LOCK_ASSERT(simple_lock_held(&vp->v_interlock));
154 1.54 chs error = VOP_PUTPAGES(vp, offlo, offhi, flags);
155 1.53 sommerfe LOCK_ASSERT(!simple_lock_held(&vp->v_interlock));
156 1.48 chs return error;
157 1.1 mrg }
158 1.1 mrg
159 1.1 mrg
160 1.1 mrg /*
161 1.1 mrg * uvn_get: get pages (synchronously) from backing store
162 1.1 mrg *
163 1.1 mrg * => prefer map unlocked (not required)
164 1.1 mrg * => object must be locked! we will _unlock_ it before starting any I/O.
165 1.1 mrg * => flags: PGO_ALLPAGES: get all of the pages
166 1.1 mrg * PGO_LOCKED: fault data structures are locked
167 1.1 mrg * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
168 1.1 mrg * => NOTE: caller must check for released pages!!
169 1.1 mrg */
170 1.49 chs
171 1.66 thorpej static int
172 1.65 thorpej uvn_get(struct uvm_object *uobj, voff_t offset,
173 1.65 thorpej struct vm_page **pps /* IN/OUT */,
174 1.65 thorpej int *npagesp /* IN (OUT if PGO_LOCKED)*/,
175 1.65 thorpej int centeridx, vm_prot_t access_type, int advice, int flags)
176 1.8 mrg {
177 1.37 chs struct vnode *vp = (struct vnode *)uobj;
178 1.37 chs int error;
179 1.67 yamt
180 1.37 chs UVMHIST_FUNC("uvn_get"); UVMHIST_CALLED(ubchist);
181 1.37 chs
182 1.37 chs UVMHIST_LOG(ubchist, "vp %p off 0x%x", vp, (int)offset, 0,0);
183 1.68 yamt
184 1.68 yamt if ((access_type & VM_PROT_WRITE) == 0 && (flags & PGO_LOCKED) == 0) {
185 1.68 yamt simple_unlock(&vp->v_interlock);
186 1.68 yamt vn_ra_allocctx(vp);
187 1.68 yamt uvm_ra_request(vp->v_ractx, advice, uobj, offset,
188 1.68 yamt *npagesp << PAGE_SHIFT);
189 1.68 yamt simple_lock(&vp->v_interlock);
190 1.68 yamt }
191 1.68 yamt
192 1.37 chs error = VOP_GETPAGES(vp, offset, pps, npagesp, centeridx,
193 1.37 chs access_type, advice, flags);
194 1.67 yamt
195 1.78 chs LOCK_ASSERT(((flags & PGO_LOCKED) != 0 &&
196 1.78 chs simple_lock_held(&vp->v_interlock)) ||
197 1.78 chs ((flags & PGO_LOCKED) == 0 &&
198 1.78 chs !simple_lock_held(&vp->v_interlock)));
199 1.48 chs return error;
200 1.37 chs }
201 1.8 mrg
202 1.8 mrg
203 1.37 chs /*
204 1.37 chs * uvn_findpages:
205 1.37 chs * return the page for the uobj and offset requested, allocating if needed.
206 1.37 chs * => uobj must be locked.
207 1.52 chs * => returned pages will be BUSY.
208 1.37 chs */
209 1.1 mrg
210 1.58 enami int
211 1.65 thorpej uvn_findpages(struct uvm_object *uobj, voff_t offset, int *npagesp,
212 1.65 thorpej struct vm_page **pgs, int flags)
213 1.37 chs {
214 1.58 enami int i, count, found, npages, rv;
215 1.8 mrg
216 1.58 enami count = found = 0;
217 1.37 chs npages = *npagesp;
218 1.52 chs if (flags & UFP_BACKWARD) {
219 1.52 chs for (i = npages - 1; i >= 0; i--, offset -= PAGE_SIZE) {
220 1.52 chs rv = uvn_findpage(uobj, offset, &pgs[i], flags);
221 1.58 enami if (rv == 0) {
222 1.58 enami if (flags & UFP_DIRTYONLY)
223 1.58 enami break;
224 1.58 enami } else
225 1.58 enami found++;
226 1.52 chs count++;
227 1.52 chs }
228 1.52 chs } else {
229 1.52 chs for (i = 0; i < npages; i++, offset += PAGE_SIZE) {
230 1.52 chs rv = uvn_findpage(uobj, offset, &pgs[i], flags);
231 1.58 enami if (rv == 0) {
232 1.58 enami if (flags & UFP_DIRTYONLY)
233 1.58 enami break;
234 1.58 enami } else
235 1.58 enami found++;
236 1.52 chs count++;
237 1.52 chs }
238 1.37 chs }
239 1.52 chs *npagesp = count;
240 1.58 enami return (found);
241 1.37 chs }
242 1.8 mrg
243 1.66 thorpej static int
244 1.65 thorpej uvn_findpage(struct uvm_object *uobj, voff_t offset, struct vm_page **pgp,
245 1.65 thorpej int flags)
246 1.37 chs {
247 1.37 chs struct vm_page *pg;
248 1.79 thorpej bool dirty;
249 1.37 chs UVMHIST_FUNC("uvn_findpage"); UVMHIST_CALLED(ubchist);
250 1.37 chs UVMHIST_LOG(ubchist, "vp %p off 0x%lx", uobj, offset,0,0);
251 1.8 mrg
252 1.37 chs if (*pgp != NULL) {
253 1.37 chs UVMHIST_LOG(ubchist, "dontcare", 0,0,0,0);
254 1.37 chs return 0;
255 1.37 chs }
256 1.37 chs for (;;) {
257 1.37 chs /* look for an existing page */
258 1.37 chs pg = uvm_pagelookup(uobj, offset);
259 1.37 chs
260 1.52 chs /* nope? allocate one now */
261 1.37 chs if (pg == NULL) {
262 1.37 chs if (flags & UFP_NOALLOC) {
263 1.37 chs UVMHIST_LOG(ubchist, "noalloc", 0,0,0,0);
264 1.37 chs return 0;
265 1.37 chs }
266 1.47 chs pg = uvm_pagealloc(uobj, offset, NULL, 0);
267 1.37 chs if (pg == NULL) {
268 1.37 chs if (flags & UFP_NOWAIT) {
269 1.37 chs UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
270 1.37 chs return 0;
271 1.8 mrg }
272 1.37 chs simple_unlock(&uobj->vmobjlock);
273 1.37 chs uvm_wait("uvn_fp1");
274 1.8 mrg simple_lock(&uobj->vmobjlock);
275 1.37 chs continue;
276 1.47 chs }
277 1.52 chs UVMHIST_LOG(ubchist, "alloced %p", pg,0,0,0);
278 1.37 chs break;
279 1.37 chs } else if (flags & UFP_NOCACHE) {
280 1.37 chs UVMHIST_LOG(ubchist, "nocache",0,0,0,0);
281 1.37 chs return 0;
282 1.8 mrg }
283 1.8 mrg
284 1.37 chs /* page is there, see if we need to wait on it */
285 1.52 chs if ((pg->flags & PG_BUSY) != 0) {
286 1.37 chs if (flags & UFP_NOWAIT) {
287 1.37 chs UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
288 1.37 chs return 0;
289 1.37 chs }
290 1.37 chs pg->flags |= PG_WANTED;
291 1.58 enami UVMHIST_LOG(ubchist, "wait %p", pg,0,0,0);
292 1.37 chs UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, 0,
293 1.37 chs "uvn_fp2", 0);
294 1.37 chs simple_lock(&uobj->vmobjlock);
295 1.37 chs continue;
296 1.8 mrg }
297 1.49 chs
298 1.37 chs /* skip PG_RDONLY pages if requested */
299 1.37 chs if ((flags & UFP_NORDONLY) && (pg->flags & PG_RDONLY)) {
300 1.37 chs UVMHIST_LOG(ubchist, "nordonly",0,0,0,0);
301 1.37 chs return 0;
302 1.8 mrg }
303 1.8 mrg
304 1.52 chs /* stop on clean pages if requested */
305 1.52 chs if (flags & UFP_DIRTYONLY) {
306 1.52 chs dirty = pmap_clear_modify(pg) ||
307 1.52 chs (pg->flags & PG_CLEAN) == 0;
308 1.52 chs pg->flags |= PG_CLEAN;
309 1.52 chs if (!dirty) {
310 1.58 enami UVMHIST_LOG(ubchist, "dirtonly", 0,0,0,0);
311 1.52 chs return 0;
312 1.52 chs }
313 1.52 chs }
314 1.52 chs
315 1.37 chs /* mark the page BUSY and we're done. */
316 1.37 chs pg->flags |= PG_BUSY;
317 1.37 chs UVM_PAGE_OWN(pg, "uvn_findpage");
318 1.52 chs UVMHIST_LOG(ubchist, "found %p", pg,0,0,0);
319 1.37 chs break;
320 1.8 mrg }
321 1.37 chs *pgp = pg;
322 1.37 chs return 1;
323 1.1 mrg }
324 1.1 mrg
325 1.1 mrg /*
326 1.52 chs * uvm_vnp_setsize: grow or shrink a vnode uobj
327 1.1 mrg *
328 1.1 mrg * grow => just update size value
329 1.1 mrg * shrink => toss un-needed pages
330 1.1 mrg *
331 1.49 chs * => we assume that the caller has a reference of some sort to the
332 1.1 mrg * vnode in question so that it will not be yanked out from under
333 1.1 mrg * us.
334 1.1 mrg */
335 1.1 mrg
336 1.8 mrg void
337 1.65 thorpej uvm_vnp_setsize(struct vnode *vp, voff_t newsize)
338 1.8 mrg {
339 1.52 chs struct uvm_object *uobj = &vp->v_uobj;
340 1.46 enami voff_t pgend = round_page(newsize);
341 1.72 yamt voff_t oldsize;
342 1.37 chs UVMHIST_FUNC("uvm_vnp_setsize"); UVMHIST_CALLED(ubchist);
343 1.37 chs
344 1.52 chs simple_lock(&uobj->vmobjlock);
345 1.52 chs UVMHIST_LOG(ubchist, "vp %p old 0x%x new 0x%x",
346 1.52 chs vp, vp->v_size, newsize, 0);
347 1.1 mrg
348 1.8 mrg /*
349 1.37 chs * now check if the size has changed: if we shrink we had better
350 1.37 chs * toss some pages...
351 1.8 mrg */
352 1.1 mrg
353 1.85 pooka KASSERT(newsize != VSIZENOTSET);
354 1.85 pooka KASSERT(vp->v_size <= vp->v_writesize);
355 1.85 pooka KASSERT(vp->v_size == vp->v_writesize ||
356 1.85 pooka newsize == vp->v_writesize || newsize <= vp->v_size);
357 1.85 pooka
358 1.85 pooka oldsize = vp->v_writesize;
359 1.85 pooka KASSERT(oldsize != VSIZENOTSET || pgend > oldsize);
360 1.85 pooka
361 1.85 pooka if (oldsize > pgend) {
362 1.57 chs (void) uvn_put(uobj, pgend, 0, PGO_FREE | PGO_SYNCIO);
363 1.76 yamt simple_lock(&uobj->vmobjlock);
364 1.8 mrg }
365 1.82 yamt vp->v_size = vp->v_writesize = newsize;
366 1.76 yamt simple_unlock(&uobj->vmobjlock);
367 1.1 mrg }
368 1.1 mrg
369 1.82 yamt void
370 1.82 yamt uvm_vnp_setwritesize(struct vnode *vp, voff_t newsize)
371 1.82 yamt {
372 1.82 yamt
373 1.82 yamt simple_lock(&vp->v_interlock);
374 1.85 pooka KASSERT(newsize != VSIZENOTSET);
375 1.82 yamt KASSERT(vp->v_size != VSIZENOTSET);
376 1.82 yamt KASSERT(vp->v_writesize != VSIZENOTSET);
377 1.82 yamt KASSERT(vp->v_size <= vp->v_writesize);
378 1.82 yamt KASSERT(vp->v_size <= newsize);
379 1.82 yamt vp->v_writesize = newsize;
380 1.82 yamt simple_unlock(&vp->v_interlock);
381 1.82 yamt }
382 1.82 yamt
383 1.1 mrg /*
384 1.37 chs * uvm_vnp_zerorange: set a range of bytes in a file to zero.
385 1.1 mrg */
386 1.1 mrg
387 1.8 mrg void
388 1.65 thorpej uvm_vnp_zerorange(struct vnode *vp, off_t off, size_t len)
389 1.8 mrg {
390 1.64 chs void *win;
391 1.64 chs int flags;
392 1.64 chs
393 1.64 chs /*
394 1.64 chs * XXXUBC invent kzero() and use it
395 1.64 chs */
396 1.8 mrg
397 1.64 chs while (len) {
398 1.64 chs vsize_t bytelen = len;
399 1.64 chs
400 1.68 yamt win = ubc_alloc(&vp->v_uobj, off, &bytelen, UVM_ADV_NORMAL,
401 1.68 yamt UBC_WRITE);
402 1.64 chs memset(win, 0, bytelen);
403 1.64 chs flags = UBC_WANT_UNMAP(vp) ? UBC_UNMAP : 0;
404 1.64 chs ubc_release(win, flags);
405 1.64 chs
406 1.64 chs off += bytelen;
407 1.64 chs len -= bytelen;
408 1.64 chs }
409 1.1 mrg }
410 1.75 yamt
411 1.79 thorpej bool
412 1.75 yamt uvn_text_p(struct uvm_object *uobj)
413 1.75 yamt {
414 1.75 yamt struct vnode *vp = (struct vnode *)uobj;
415 1.75 yamt
416 1.75 yamt return (vp->v_flag & VEXECMAP) != 0;
417 1.75 yamt }
418 1.75 yamt
419 1.79 thorpej bool
420 1.75 yamt uvn_clean_p(struct uvm_object *uobj)
421 1.75 yamt {
422 1.75 yamt struct vnode *vp = (struct vnode *)uobj;
423 1.75 yamt
424 1.75 yamt return (vp->v_flag & VONWORKLST) == 0;
425 1.75 yamt }
426 1.75 yamt
427 1.79 thorpej bool
428 1.75 yamt uvn_needs_writefault_p(struct uvm_object *uobj)
429 1.75 yamt {
430 1.75 yamt struct vnode *vp = (struct vnode *)uobj;
431 1.75 yamt
432 1.75 yamt return uvn_clean_p(uobj) ||
433 1.75 yamt (vp->v_flag & (VWRITEMAP|VWRITEMAPDIRTY)) == VWRITEMAP;
434 1.75 yamt }
435