uvm_vnode.c revision 1.83 1 /* $NetBSD: uvm_vnode.c,v 1.83 2007/07/09 21:11:37 ad Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993
6 * The Regents of the University of California.
7 * Copyright (c) 1990 University of Utah.
8 *
9 * All rights reserved.
10 *
11 * This code is derived from software contributed to Berkeley by
12 * the Systems Programming Group of the University of Utah Computer
13 * Science Department.
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 * 1. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 * notice, this list of conditions and the following disclaimer in the
22 * documentation and/or other materials provided with the distribution.
23 * 3. All advertising materials mentioning features or use of this software
24 * must display the following acknowledgement:
25 * This product includes software developed by Charles D. Cranor,
26 * Washington University, the University of California, Berkeley and
27 * its contributors.
28 * 4. Neither the name of the University nor the names of its contributors
29 * may be used to endorse or promote products derived from this software
30 * without specific prior written permission.
31 *
32 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
33 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
36 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42 * SUCH DAMAGE.
43 *
44 * @(#)vnode_pager.c 8.8 (Berkeley) 2/13/94
45 * from: Id: uvm_vnode.c,v 1.1.2.26 1998/02/02 20:38:07 chuck Exp
46 */
47
48 /*
49 * uvm_vnode.c: the vnode pager.
50 */
51
52 #include <sys/cdefs.h>
53 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.83 2007/07/09 21:11:37 ad Exp $");
54
55 #include "fs_nfs.h"
56 #include "opt_uvmhist.h"
57 #include "opt_ddb.h"
58
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/kernel.h>
62 #include <sys/proc.h>
63 #include <sys/malloc.h>
64 #include <sys/vnode.h>
65 #include <sys/disklabel.h>
66 #include <sys/ioctl.h>
67 #include <sys/fcntl.h>
68 #include <sys/conf.h>
69 #include <sys/pool.h>
70 #include <sys/mount.h>
71
72 #include <miscfs/specfs/specdev.h>
73
74 #include <uvm/uvm.h>
75 #include <uvm/uvm_readahead.h>
76
77 /*
78 * functions
79 */
80
81 static void uvn_detach(struct uvm_object *);
82 static int uvn_get(struct uvm_object *, voff_t, struct vm_page **, int *,
83 int, vm_prot_t, int, int);
84 static int uvn_put(struct uvm_object *, voff_t, voff_t, int);
85 static void uvn_reference(struct uvm_object *);
86
87 static int uvn_findpage(struct uvm_object *, voff_t, struct vm_page **,
88 int);
89
90 /*
91 * master pager structure
92 */
93
94 struct uvm_pagerops uvm_vnodeops = {
95 NULL,
96 uvn_reference,
97 uvn_detach,
98 NULL,
99 uvn_get,
100 uvn_put,
101 };
102
103 /*
104 * the ops!
105 */
106
107 /*
108 * uvn_attach
109 *
110 * attach a vnode structure to a VM object. if the vnode is already
111 * attached, then just bump the reference count by one and return the
112 * VM object. if not already attached, attach and return the new VM obj.
113 * the "accessprot" tells the max access the attaching thread wants to
114 * our pages.
115 *
116 * => caller must _not_ already be holding the lock on the uvm_object.
117 * => in fact, nothing should be locked so that we can sleep here.
118 * => note that uvm_object is first thing in vnode structure, so their
119 * pointers are equiv.
120 */
121
122 struct uvm_object *
123 uvn_attach(void *arg, vm_prot_t accessprot)
124 {
125 struct vnode *vp = arg;
126 struct uvm_object *uobj = &vp->v_uobj;
127 struct vattr vattr;
128 int result;
129 struct partinfo pi;
130 voff_t used_vnode_size;
131 UVMHIST_FUNC("uvn_attach"); UVMHIST_CALLED(maphist);
132
133 UVMHIST_LOG(maphist, "(vn=0x%x)", arg,0,0,0);
134 used_vnode_size = (voff_t)0;
135
136 /*
137 * first get a lock on the uobj.
138 */
139
140 simple_lock(&uobj->vmobjlock);
141 while (vp->v_flag & VXLOCK) {
142 vp->v_flag |= VXWANT;
143 UVMHIST_LOG(maphist, " SLEEPING on blocked vn",0,0,0,0);
144 UVM_UNLOCK_AND_WAIT(uobj, &uobj->vmobjlock, false,
145 "uvn_attach", 0);
146 simple_lock(&uobj->vmobjlock);
147 UVMHIST_LOG(maphist," WOKE UP",0,0,0,0);
148 }
149
150 /*
151 * if we're mapping a BLK device, make sure it is a disk.
152 */
153 if (vp->v_type == VBLK) {
154 if (bdev_type(vp->v_rdev) != D_DISK) {
155 simple_unlock(&uobj->vmobjlock);
156 UVMHIST_LOG(maphist,"<- done (VBLK not D_DISK!)",
157 0,0,0,0);
158 return(NULL);
159 }
160 }
161 KASSERT(vp->v_type == VREG || vp->v_type == VBLK);
162
163 /*
164 * set up our idea of the size
165 * if this hasn't been done already.
166 */
167 if (vp->v_size == VSIZENOTSET) {
168
169
170 vp->v_flag |= VXLOCK;
171 simple_unlock(&uobj->vmobjlock); /* drop lock in case we sleep */
172 /* XXX: curproc? */
173 if (vp->v_type == VBLK) {
174 /*
175 * We could implement this as a specfs getattr call, but:
176 *
177 * (1) VOP_GETATTR() would get the file system
178 * vnode operation, not the specfs operation.
179 *
180 * (2) All we want is the size, anyhow.
181 */
182 result = bdev_ioctl(vp->v_rdev, DIOCGPART, (void *)&pi,
183 FREAD, curlwp);
184 if (result == 0) {
185 /* XXX should remember blocksize */
186 used_vnode_size = (voff_t)pi.disklab->d_secsize *
187 (voff_t)pi.part->p_size;
188 }
189 } else {
190 result = VOP_GETATTR(vp, &vattr, curlwp->l_cred, curlwp);
191 if (result == 0)
192 used_vnode_size = vattr.va_size;
193 }
194
195 /* relock object */
196 simple_lock(&uobj->vmobjlock);
197
198 if (vp->v_flag & VXWANT) {
199 wakeup(vp);
200 }
201 vp->v_flag &= ~(VXLOCK|VXWANT);
202
203 if (result != 0) {
204 simple_unlock(&uobj->vmobjlock);
205 UVMHIST_LOG(maphist,"<- done (VOP_GETATTR FAILED!)", 0,0,0,0);
206 return(NULL);
207 }
208 vp->v_size = vp->v_writesize = used_vnode_size;
209
210 }
211
212 simple_unlock(&uobj->vmobjlock);
213 UVMHIST_LOG(maphist,"<- done, refcnt=%d", vp->v_usecount,
214 0, 0, 0);
215 return uobj;
216 }
217
218
219 /*
220 * uvn_reference
221 *
222 * duplicate a reference to a VM object. Note that the reference
223 * count must already be at least one (the passed in reference) so
224 * there is no chance of the uvn being killed or locked out here.
225 *
226 * => caller must call with object unlocked.
227 * => caller must be using the same accessprot as was used at attach time
228 */
229
230 static void
231 uvn_reference(struct uvm_object *uobj)
232 {
233 VREF((struct vnode *)uobj);
234 }
235
236
237 /*
238 * uvn_detach
239 *
240 * remove a reference to a VM object.
241 *
242 * => caller must call with object unlocked and map locked.
243 */
244
245 static void
246 uvn_detach(struct uvm_object *uobj)
247 {
248 vrele((struct vnode *)uobj);
249 }
250
251 /*
252 * uvn_put: flush page data to backing store.
253 *
254 * => object must be locked on entry! VOP_PUTPAGES must unlock it.
255 * => flags: PGO_SYNCIO -- use sync. I/O
256 * => note: caller must set PG_CLEAN and pmap_clear_modify (if needed)
257 */
258
259 static int
260 uvn_put(struct uvm_object *uobj, voff_t offlo, voff_t offhi, int flags)
261 {
262 struct vnode *vp = (struct vnode *)uobj;
263 int error;
264
265 LOCK_ASSERT(simple_lock_held(&vp->v_interlock));
266 error = VOP_PUTPAGES(vp, offlo, offhi, flags);
267 LOCK_ASSERT(!simple_lock_held(&vp->v_interlock));
268 return error;
269 }
270
271
272 /*
273 * uvn_get: get pages (synchronously) from backing store
274 *
275 * => prefer map unlocked (not required)
276 * => object must be locked! we will _unlock_ it before starting any I/O.
277 * => flags: PGO_ALLPAGES: get all of the pages
278 * PGO_LOCKED: fault data structures are locked
279 * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
280 * => NOTE: caller must check for released pages!!
281 */
282
283 static int
284 uvn_get(struct uvm_object *uobj, voff_t offset,
285 struct vm_page **pps /* IN/OUT */,
286 int *npagesp /* IN (OUT if PGO_LOCKED)*/,
287 int centeridx, vm_prot_t access_type, int advice, int flags)
288 {
289 struct vnode *vp = (struct vnode *)uobj;
290 int error;
291
292 UVMHIST_FUNC("uvn_get"); UVMHIST_CALLED(ubchist);
293
294 UVMHIST_LOG(ubchist, "vp %p off 0x%x", vp, (int)offset, 0,0);
295
296 if ((access_type & VM_PROT_WRITE) == 0 && (flags & PGO_LOCKED) == 0) {
297 simple_unlock(&vp->v_interlock);
298 vn_ra_allocctx(vp);
299 uvm_ra_request(vp->v_ractx, advice, uobj, offset,
300 *npagesp << PAGE_SHIFT);
301 simple_lock(&vp->v_interlock);
302 }
303
304 error = VOP_GETPAGES(vp, offset, pps, npagesp, centeridx,
305 access_type, advice, flags);
306
307 LOCK_ASSERT(((flags & PGO_LOCKED) != 0 &&
308 simple_lock_held(&vp->v_interlock)) ||
309 ((flags & PGO_LOCKED) == 0 &&
310 !simple_lock_held(&vp->v_interlock)));
311 return error;
312 }
313
314
315 /*
316 * uvn_findpages:
317 * return the page for the uobj and offset requested, allocating if needed.
318 * => uobj must be locked.
319 * => returned pages will be BUSY.
320 */
321
322 int
323 uvn_findpages(struct uvm_object *uobj, voff_t offset, int *npagesp,
324 struct vm_page **pgs, int flags)
325 {
326 int i, count, found, npages, rv;
327
328 count = found = 0;
329 npages = *npagesp;
330 if (flags & UFP_BACKWARD) {
331 for (i = npages - 1; i >= 0; i--, offset -= PAGE_SIZE) {
332 rv = uvn_findpage(uobj, offset, &pgs[i], flags);
333 if (rv == 0) {
334 if (flags & UFP_DIRTYONLY)
335 break;
336 } else
337 found++;
338 count++;
339 }
340 } else {
341 for (i = 0; i < npages; i++, offset += PAGE_SIZE) {
342 rv = uvn_findpage(uobj, offset, &pgs[i], flags);
343 if (rv == 0) {
344 if (flags & UFP_DIRTYONLY)
345 break;
346 } else
347 found++;
348 count++;
349 }
350 }
351 *npagesp = count;
352 return (found);
353 }
354
355 static int
356 uvn_findpage(struct uvm_object *uobj, voff_t offset, struct vm_page **pgp,
357 int flags)
358 {
359 struct vm_page *pg;
360 bool dirty;
361 UVMHIST_FUNC("uvn_findpage"); UVMHIST_CALLED(ubchist);
362 UVMHIST_LOG(ubchist, "vp %p off 0x%lx", uobj, offset,0,0);
363
364 if (*pgp != NULL) {
365 UVMHIST_LOG(ubchist, "dontcare", 0,0,0,0);
366 return 0;
367 }
368 for (;;) {
369 /* look for an existing page */
370 pg = uvm_pagelookup(uobj, offset);
371
372 /* nope? allocate one now */
373 if (pg == NULL) {
374 if (flags & UFP_NOALLOC) {
375 UVMHIST_LOG(ubchist, "noalloc", 0,0,0,0);
376 return 0;
377 }
378 pg = uvm_pagealloc(uobj, offset, NULL, 0);
379 if (pg == NULL) {
380 if (flags & UFP_NOWAIT) {
381 UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
382 return 0;
383 }
384 simple_unlock(&uobj->vmobjlock);
385 uvm_wait("uvn_fp1");
386 simple_lock(&uobj->vmobjlock);
387 continue;
388 }
389 UVMHIST_LOG(ubchist, "alloced %p", pg,0,0,0);
390 break;
391 } else if (flags & UFP_NOCACHE) {
392 UVMHIST_LOG(ubchist, "nocache",0,0,0,0);
393 return 0;
394 }
395
396 /* page is there, see if we need to wait on it */
397 if ((pg->flags & PG_BUSY) != 0) {
398 if (flags & UFP_NOWAIT) {
399 UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
400 return 0;
401 }
402 pg->flags |= PG_WANTED;
403 UVMHIST_LOG(ubchist, "wait %p", pg,0,0,0);
404 UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, 0,
405 "uvn_fp2", 0);
406 simple_lock(&uobj->vmobjlock);
407 continue;
408 }
409
410 /* skip PG_RDONLY pages if requested */
411 if ((flags & UFP_NORDONLY) && (pg->flags & PG_RDONLY)) {
412 UVMHIST_LOG(ubchist, "nordonly",0,0,0,0);
413 return 0;
414 }
415
416 /* stop on clean pages if requested */
417 if (flags & UFP_DIRTYONLY) {
418 dirty = pmap_clear_modify(pg) ||
419 (pg->flags & PG_CLEAN) == 0;
420 pg->flags |= PG_CLEAN;
421 if (!dirty) {
422 UVMHIST_LOG(ubchist, "dirtonly", 0,0,0,0);
423 return 0;
424 }
425 }
426
427 /* mark the page BUSY and we're done. */
428 pg->flags |= PG_BUSY;
429 UVM_PAGE_OWN(pg, "uvn_findpage");
430 UVMHIST_LOG(ubchist, "found %p", pg,0,0,0);
431 break;
432 }
433 *pgp = pg;
434 return 1;
435 }
436
437 /*
438 * uvm_vnp_setsize: grow or shrink a vnode uobj
439 *
440 * grow => just update size value
441 * shrink => toss un-needed pages
442 *
443 * => we assume that the caller has a reference of some sort to the
444 * vnode in question so that it will not be yanked out from under
445 * us.
446 */
447
448 void
449 uvm_vnp_setsize(struct vnode *vp, voff_t newsize)
450 {
451 struct uvm_object *uobj = &vp->v_uobj;
452 voff_t pgend = round_page(newsize);
453 voff_t oldsize;
454 UVMHIST_FUNC("uvm_vnp_setsize"); UVMHIST_CALLED(ubchist);
455
456 simple_lock(&uobj->vmobjlock);
457 UVMHIST_LOG(ubchist, "vp %p old 0x%x new 0x%x",
458 vp, vp->v_size, newsize, 0);
459
460 /*
461 * now check if the size has changed: if we shrink we had better
462 * toss some pages...
463 */
464
465 if (vp->v_writesize != VSIZENOTSET) {
466 KASSERT(vp->v_size <= vp->v_writesize);
467 KASSERT(vp->v_size == vp->v_writesize ||
468 newsize == vp->v_writesize || newsize <= vp->v_size);
469 oldsize = vp->v_writesize;
470 } else {
471 oldsize = vp->v_size;
472 }
473 if (oldsize > pgend && oldsize != VSIZENOTSET) {
474 (void) uvn_put(uobj, pgend, 0, PGO_FREE | PGO_SYNCIO);
475 simple_lock(&uobj->vmobjlock);
476 }
477 vp->v_size = vp->v_writesize = newsize;
478 simple_unlock(&uobj->vmobjlock);
479 }
480
481 void
482 uvm_vnp_setwritesize(struct vnode *vp, voff_t newsize)
483 {
484
485 simple_lock(&vp->v_interlock);
486 KASSERT(vp->v_size != VSIZENOTSET);
487 KASSERT(vp->v_writesize != VSIZENOTSET);
488 KASSERT(vp->v_size <= vp->v_writesize);
489 KASSERT(vp->v_size <= newsize);
490 vp->v_writesize = newsize;
491 simple_unlock(&vp->v_interlock);
492 }
493
494 /*
495 * uvm_vnp_zerorange: set a range of bytes in a file to zero.
496 */
497
498 void
499 uvm_vnp_zerorange(struct vnode *vp, off_t off, size_t len)
500 {
501 void *win;
502 int flags;
503
504 /*
505 * XXXUBC invent kzero() and use it
506 */
507
508 while (len) {
509 vsize_t bytelen = len;
510
511 win = ubc_alloc(&vp->v_uobj, off, &bytelen, UVM_ADV_NORMAL,
512 UBC_WRITE);
513 memset(win, 0, bytelen);
514 flags = UBC_WANT_UNMAP(vp) ? UBC_UNMAP : 0;
515 ubc_release(win, flags);
516
517 off += bytelen;
518 len -= bytelen;
519 }
520 }
521
522 bool
523 uvn_text_p(struct uvm_object *uobj)
524 {
525 struct vnode *vp = (struct vnode *)uobj;
526
527 return (vp->v_flag & VEXECMAP) != 0;
528 }
529
530 bool
531 uvn_clean_p(struct uvm_object *uobj)
532 {
533 struct vnode *vp = (struct vnode *)uobj;
534
535 return (vp->v_flag & VONWORKLST) == 0;
536 }
537
538 bool
539 uvn_needs_writefault_p(struct uvm_object *uobj)
540 {
541 struct vnode *vp = (struct vnode *)uobj;
542
543 return uvn_clean_p(uobj) ||
544 (vp->v_flag & (VWRITEMAP|VWRITEMAPDIRTY)) == VWRITEMAP;
545 }
546