uvm_vnode.c revision 1.65 1 /* $NetBSD: uvm_vnode.c,v 1.65 2005/06/27 02:19:48 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993
6 * The Regents of the University of California.
7 * Copyright (c) 1990 University of Utah.
8 *
9 * All rights reserved.
10 *
11 * This code is derived from software contributed to Berkeley by
12 * the Systems Programming Group of the University of Utah Computer
13 * Science Department.
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 * 1. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 * notice, this list of conditions and the following disclaimer in the
22 * documentation and/or other materials provided with the distribution.
23 * 3. All advertising materials mentioning features or use of this software
24 * must display the following acknowledgement:
25 * This product includes software developed by Charles D. Cranor,
26 * Washington University, the University of California, Berkeley and
27 * its contributors.
28 * 4. Neither the name of the University nor the names of its contributors
29 * may be used to endorse or promote products derived from this software
30 * without specific prior written permission.
31 *
32 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
33 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
36 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42 * SUCH DAMAGE.
43 *
44 * @(#)vnode_pager.c 8.8 (Berkeley) 2/13/94
45 * from: Id: uvm_vnode.c,v 1.1.2.26 1998/02/02 20:38:07 chuck Exp
46 */
47
48 /*
49 * uvm_vnode.c: the vnode pager.
50 */
51
52 #include <sys/cdefs.h>
53 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.65 2005/06/27 02:19:48 thorpej Exp $");
54
55 #include "fs_nfs.h"
56 #include "opt_uvmhist.h"
57 #include "opt_ddb.h"
58
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/kernel.h>
62 #include <sys/proc.h>
63 #include <sys/malloc.h>
64 #include <sys/vnode.h>
65 #include <sys/disklabel.h>
66 #include <sys/ioctl.h>
67 #include <sys/fcntl.h>
68 #include <sys/conf.h>
69 #include <sys/pool.h>
70 #include <sys/mount.h>
71
72 #include <miscfs/specfs/specdev.h>
73
74 #include <uvm/uvm.h>
75
76 /*
77 * functions
78 */
79
80 void uvn_detach(struct uvm_object *);
81 int uvn_get(struct uvm_object *, voff_t, struct vm_page **, int *, int,
82 vm_prot_t, int, int);
83 int uvn_put(struct uvm_object *, voff_t, voff_t, int);
84 void uvn_reference(struct uvm_object *);
85
86 int uvn_findpage(struct uvm_object *, voff_t, struct vm_page **, int);
87
88 /*
89 * master pager structure
90 */
91
92 struct uvm_pagerops uvm_vnodeops = {
93 NULL,
94 uvn_reference,
95 uvn_detach,
96 NULL,
97 uvn_get,
98 uvn_put,
99 };
100
101 /*
102 * the ops!
103 */
104
105 /*
106 * uvn_attach
107 *
108 * attach a vnode structure to a VM object. if the vnode is already
109 * attached, then just bump the reference count by one and return the
110 * VM object. if not already attached, attach and return the new VM obj.
111 * the "accessprot" tells the max access the attaching thread wants to
112 * our pages.
113 *
114 * => caller must _not_ already be holding the lock on the uvm_object.
115 * => in fact, nothing should be locked so that we can sleep here.
116 * => note that uvm_object is first thing in vnode structure, so their
117 * pointers are equiv.
118 */
119
120 struct uvm_object *
121 uvn_attach(void *arg, vm_prot_t accessprot)
122 {
123 struct vnode *vp = arg;
124 struct uvm_object *uobj = &vp->v_uobj;
125 struct vattr vattr;
126 const struct bdevsw *bdev;
127 int result;
128 struct partinfo pi;
129 voff_t used_vnode_size;
130 UVMHIST_FUNC("uvn_attach"); UVMHIST_CALLED(maphist);
131
132 UVMHIST_LOG(maphist, "(vn=0x%x)", arg,0,0,0);
133 used_vnode_size = (voff_t)0;
134
135 /*
136 * first get a lock on the uobj.
137 */
138
139 simple_lock(&uobj->vmobjlock);
140 while (vp->v_flag & VXLOCK) {
141 vp->v_flag |= VXWANT;
142 UVMHIST_LOG(maphist, " SLEEPING on blocked vn",0,0,0,0);
143 UVM_UNLOCK_AND_WAIT(uobj, &uobj->vmobjlock, FALSE,
144 "uvn_attach", 0);
145 simple_lock(&uobj->vmobjlock);
146 UVMHIST_LOG(maphist," WOKE UP",0,0,0,0);
147 }
148
149 /*
150 * if we're mapping a BLK device, make sure it is a disk.
151 */
152 if (vp->v_type == VBLK) {
153 bdev = bdevsw_lookup(vp->v_rdev);
154 if (bdev == NULL || bdev->d_type != D_DISK) {
155 simple_unlock(&uobj->vmobjlock);
156 UVMHIST_LOG(maphist,"<- done (VBLK not D_DISK!)",
157 0,0,0,0);
158 return(NULL);
159 }
160 }
161 KASSERT(vp->v_type == VREG || vp->v_type == VBLK);
162
163 /*
164 * set up our idea of the size
165 * if this hasn't been done already.
166 */
167 if (vp->v_size == VSIZENOTSET) {
168
169
170 vp->v_flag |= VXLOCK;
171 simple_unlock(&uobj->vmobjlock); /* drop lock in case we sleep */
172 /* XXX: curproc? */
173 if (vp->v_type == VBLK) {
174 /*
175 * We could implement this as a specfs getattr call, but:
176 *
177 * (1) VOP_GETATTR() would get the file system
178 * vnode operation, not the specfs operation.
179 *
180 * (2) All we want is the size, anyhow.
181 */
182 bdev = bdevsw_lookup(vp->v_rdev);
183 if (bdev != NULL) {
184 result = (*bdev->d_ioctl)(vp->v_rdev, DIOCGPART,
185 (caddr_t)&pi, FREAD, curproc);
186 } else {
187 result = ENXIO;
188 }
189 if (result == 0) {
190 /* XXX should remember blocksize */
191 used_vnode_size = (voff_t)pi.disklab->d_secsize *
192 (voff_t)pi.part->p_size;
193 }
194 } else {
195 result = VOP_GETATTR(vp, &vattr, curproc->p_ucred, curproc);
196 if (result == 0)
197 used_vnode_size = vattr.va_size;
198 }
199
200 /* relock object */
201 simple_lock(&uobj->vmobjlock);
202
203 if (vp->v_flag & VXWANT) {
204 wakeup(vp);
205 }
206 vp->v_flag &= ~(VXLOCK|VXWANT);
207
208 if (result != 0) {
209 simple_unlock(&uobj->vmobjlock);
210 UVMHIST_LOG(maphist,"<- done (VOP_GETATTR FAILED!)", 0,0,0,0);
211 return(NULL);
212 }
213 vp->v_size = used_vnode_size;
214
215 }
216
217 simple_unlock(&uobj->vmobjlock);
218 UVMHIST_LOG(maphist,"<- done, refcnt=%d", vp->v_usecount,
219 0, 0, 0);
220 return uobj;
221 }
222
223
224 /*
225 * uvn_reference
226 *
227 * duplicate a reference to a VM object. Note that the reference
228 * count must already be at least one (the passed in reference) so
229 * there is no chance of the uvn being killed or locked out here.
230 *
231 * => caller must call with object unlocked.
232 * => caller must be using the same accessprot as was used at attach time
233 */
234
235 void
236 uvn_reference(struct uvm_object *uobj)
237 {
238 VREF((struct vnode *)uobj);
239 }
240
241
242 /*
243 * uvn_detach
244 *
245 * remove a reference to a VM object.
246 *
247 * => caller must call with object unlocked and map locked.
248 */
249
250 void
251 uvn_detach(struct uvm_object *uobj)
252 {
253 vrele((struct vnode *)uobj);
254 }
255
256 /*
257 * uvn_put: flush page data to backing store.
258 *
259 * => object must be locked on entry! VOP_PUTPAGES must unlock it.
260 * => flags: PGO_SYNCIO -- use sync. I/O
261 * => note: caller must set PG_CLEAN and pmap_clear_modify (if needed)
262 */
263
264 int
265 uvn_put(struct uvm_object *uobj, voff_t offlo, voff_t offhi, int flags)
266 {
267 struct vnode *vp = (struct vnode *)uobj;
268 int error;
269
270 LOCK_ASSERT(simple_lock_held(&vp->v_interlock));
271 error = VOP_PUTPAGES(vp, offlo, offhi, flags);
272 LOCK_ASSERT(!simple_lock_held(&vp->v_interlock));
273 return error;
274 }
275
276
277 /*
278 * uvn_get: get pages (synchronously) from backing store
279 *
280 * => prefer map unlocked (not required)
281 * => object must be locked! we will _unlock_ it before starting any I/O.
282 * => flags: PGO_ALLPAGES: get all of the pages
283 * PGO_LOCKED: fault data structures are locked
284 * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
285 * => NOTE: caller must check for released pages!!
286 */
287
288 int
289 uvn_get(struct uvm_object *uobj, voff_t offset,
290 struct vm_page **pps /* IN/OUT */,
291 int *npagesp /* IN (OUT if PGO_LOCKED)*/,
292 int centeridx, vm_prot_t access_type, int advice, int flags)
293 {
294 struct vnode *vp = (struct vnode *)uobj;
295 int error;
296 UVMHIST_FUNC("uvn_get"); UVMHIST_CALLED(ubchist);
297
298 UVMHIST_LOG(ubchist, "vp %p off 0x%x", vp, (int)offset, 0,0);
299 error = VOP_GETPAGES(vp, offset, pps, npagesp, centeridx,
300 access_type, advice, flags);
301 return error;
302 }
303
304
305 /*
306 * uvn_findpages:
307 * return the page for the uobj and offset requested, allocating if needed.
308 * => uobj must be locked.
309 * => returned pages will be BUSY.
310 */
311
312 int
313 uvn_findpages(struct uvm_object *uobj, voff_t offset, int *npagesp,
314 struct vm_page **pgs, int flags)
315 {
316 int i, count, found, npages, rv;
317
318 count = found = 0;
319 npages = *npagesp;
320 if (flags & UFP_BACKWARD) {
321 for (i = npages - 1; i >= 0; i--, offset -= PAGE_SIZE) {
322 rv = uvn_findpage(uobj, offset, &pgs[i], flags);
323 if (rv == 0) {
324 if (flags & UFP_DIRTYONLY)
325 break;
326 } else
327 found++;
328 count++;
329 }
330 } else {
331 for (i = 0; i < npages; i++, offset += PAGE_SIZE) {
332 rv = uvn_findpage(uobj, offset, &pgs[i], flags);
333 if (rv == 0) {
334 if (flags & UFP_DIRTYONLY)
335 break;
336 } else
337 found++;
338 count++;
339 }
340 }
341 *npagesp = count;
342 return (found);
343 }
344
345 int
346 uvn_findpage(struct uvm_object *uobj, voff_t offset, struct vm_page **pgp,
347 int flags)
348 {
349 struct vm_page *pg;
350 boolean_t dirty;
351 UVMHIST_FUNC("uvn_findpage"); UVMHIST_CALLED(ubchist);
352 UVMHIST_LOG(ubchist, "vp %p off 0x%lx", uobj, offset,0,0);
353
354 if (*pgp != NULL) {
355 UVMHIST_LOG(ubchist, "dontcare", 0,0,0,0);
356 return 0;
357 }
358 for (;;) {
359 /* look for an existing page */
360 pg = uvm_pagelookup(uobj, offset);
361
362 /* nope? allocate one now */
363 if (pg == NULL) {
364 if (flags & UFP_NOALLOC) {
365 UVMHIST_LOG(ubchist, "noalloc", 0,0,0,0);
366 return 0;
367 }
368 pg = uvm_pagealloc(uobj, offset, NULL, 0);
369 if (pg == NULL) {
370 if (flags & UFP_NOWAIT) {
371 UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
372 return 0;
373 }
374 simple_unlock(&uobj->vmobjlock);
375 uvm_wait("uvn_fp1");
376 simple_lock(&uobj->vmobjlock);
377 continue;
378 }
379 UVMHIST_LOG(ubchist, "alloced %p", pg,0,0,0);
380 break;
381 } else if (flags & UFP_NOCACHE) {
382 UVMHIST_LOG(ubchist, "nocache",0,0,0,0);
383 return 0;
384 }
385
386 /* page is there, see if we need to wait on it */
387 if ((pg->flags & PG_BUSY) != 0) {
388 if (flags & UFP_NOWAIT) {
389 UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
390 return 0;
391 }
392 pg->flags |= PG_WANTED;
393 UVMHIST_LOG(ubchist, "wait %p", pg,0,0,0);
394 UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, 0,
395 "uvn_fp2", 0);
396 simple_lock(&uobj->vmobjlock);
397 continue;
398 }
399
400 /* skip PG_RDONLY pages if requested */
401 if ((flags & UFP_NORDONLY) && (pg->flags & PG_RDONLY)) {
402 UVMHIST_LOG(ubchist, "nordonly",0,0,0,0);
403 return 0;
404 }
405
406 /* stop on clean pages if requested */
407 if (flags & UFP_DIRTYONLY) {
408 dirty = pmap_clear_modify(pg) ||
409 (pg->flags & PG_CLEAN) == 0;
410 pg->flags |= PG_CLEAN;
411 if (!dirty) {
412 UVMHIST_LOG(ubchist, "dirtonly", 0,0,0,0);
413 return 0;
414 }
415 }
416
417 /* mark the page BUSY and we're done. */
418 pg->flags |= PG_BUSY;
419 UVM_PAGE_OWN(pg, "uvn_findpage");
420 UVMHIST_LOG(ubchist, "found %p", pg,0,0,0);
421 break;
422 }
423 *pgp = pg;
424 return 1;
425 }
426
427 /*
428 * uvm_vnp_setsize: grow or shrink a vnode uobj
429 *
430 * grow => just update size value
431 * shrink => toss un-needed pages
432 *
433 * => we assume that the caller has a reference of some sort to the
434 * vnode in question so that it will not be yanked out from under
435 * us.
436 */
437
438 void
439 uvm_vnp_setsize(struct vnode *vp, voff_t newsize)
440 {
441 struct uvm_object *uobj = &vp->v_uobj;
442 voff_t pgend = round_page(newsize);
443 UVMHIST_FUNC("uvm_vnp_setsize"); UVMHIST_CALLED(ubchist);
444
445 simple_lock(&uobj->vmobjlock);
446 UVMHIST_LOG(ubchist, "vp %p old 0x%x new 0x%x",
447 vp, vp->v_size, newsize, 0);
448
449 /*
450 * now check if the size has changed: if we shrink we had better
451 * toss some pages...
452 */
453
454 if (vp->v_size > pgend && vp->v_size != VSIZENOTSET) {
455 (void) uvn_put(uobj, pgend, 0, PGO_FREE | PGO_SYNCIO);
456 } else {
457 simple_unlock(&uobj->vmobjlock);
458 }
459 vp->v_size = newsize;
460 }
461
462 /*
463 * uvm_vnp_zerorange: set a range of bytes in a file to zero.
464 */
465
466 void
467 uvm_vnp_zerorange(struct vnode *vp, off_t off, size_t len)
468 {
469 void *win;
470 int flags;
471
472 /*
473 * XXXUBC invent kzero() and use it
474 */
475
476 while (len) {
477 vsize_t bytelen = len;
478
479 win = ubc_alloc(&vp->v_uobj, off, &bytelen, UBC_WRITE);
480 memset(win, 0, bytelen);
481 flags = UBC_WANT_UNMAP(vp) ? UBC_UNMAP : 0;
482 ubc_release(win, flags);
483
484 off += bytelen;
485 len -= bytelen;
486 }
487 }
488