uvm_vnode.c revision 1.46.2.11 1 /* $NetBSD: uvm_vnode.c,v 1.46.2.11 2002/07/12 01:40:46 nathanw Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993
6 * The Regents of the University of California.
7 * Copyright (c) 1990 University of Utah.
8 *
9 * All rights reserved.
10 *
11 * This code is derived from software contributed to Berkeley by
12 * the Systems Programming Group of the University of Utah Computer
13 * Science Department.
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 * 1. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 * notice, this list of conditions and the following disclaimer in the
22 * documentation and/or other materials provided with the distribution.
23 * 3. All advertising materials mentioning features or use of this software
24 * must display the following acknowledgement:
25 * This product includes software developed by Charles D. Cranor,
26 * Washington University, the University of California, Berkeley and
27 * its contributors.
28 * 4. Neither the name of the University nor the names of its contributors
29 * may be used to endorse or promote products derived from this software
30 * without specific prior written permission.
31 *
32 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
33 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
36 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42 * SUCH DAMAGE.
43 *
44 * @(#)vnode_pager.c 8.8 (Berkeley) 2/13/94
45 * from: Id: uvm_vnode.c,v 1.1.2.26 1998/02/02 20:38:07 chuck Exp
46 */
47
48 /*
49 * uvm_vnode.c: the vnode pager.
50 */
51
52 #include <sys/cdefs.h>
53 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.46.2.11 2002/07/12 01:40:46 nathanw Exp $");
54
55 #include "fs_nfs.h"
56 #include "opt_uvmhist.h"
57 #include "opt_ddb.h"
58
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/kernel.h>
62 #include <sys/proc.h>
63 #include <sys/malloc.h>
64 #include <sys/vnode.h>
65 #include <sys/disklabel.h>
66 #include <sys/ioctl.h>
67 #include <sys/fcntl.h>
68 #include <sys/conf.h>
69 #include <sys/pool.h>
70 #include <sys/mount.h>
71
72 #include <miscfs/specfs/specdev.h>
73
74 #include <uvm/uvm.h>
75
76 /*
77 * functions
78 */
79
80 void uvn_detach __P((struct uvm_object *));
81 int uvn_get __P((struct uvm_object *, voff_t, struct vm_page **, int *, int,
82 vm_prot_t, int, int));
83 int uvn_put __P((struct uvm_object *, voff_t, voff_t, int));
84 void uvn_reference __P((struct uvm_object *));
85
86 int uvn_findpage __P((struct uvm_object *, voff_t, struct vm_page **, int));
87
88 /*
89 * master pager structure
90 */
91
92 struct uvm_pagerops uvm_vnodeops = {
93 NULL,
94 uvn_reference,
95 uvn_detach,
96 NULL,
97 uvn_get,
98 uvn_put,
99 };
100
101 /*
102 * the ops!
103 */
104
105 /*
106 * uvn_attach
107 *
108 * attach a vnode structure to a VM object. if the vnode is already
109 * attached, then just bump the reference count by one and return the
110 * VM object. if not already attached, attach and return the new VM obj.
111 * the "accessprot" tells the max access the attaching thread wants to
112 * our pages.
113 *
114 * => caller must _not_ already be holding the lock on the uvm_object.
115 * => in fact, nothing should be locked so that we can sleep here.
116 * => note that uvm_object is first thing in vnode structure, so their
117 * pointers are equiv.
118 */
119
120 struct uvm_object *
121 uvn_attach(arg, accessprot)
122 void *arg;
123 vm_prot_t accessprot;
124 {
125 struct vnode *vp = arg;
126 struct uvm_object *uobj = &vp->v_uobj;
127 struct vattr vattr;
128 int result;
129 struct partinfo pi;
130 voff_t used_vnode_size;
131 UVMHIST_FUNC("uvn_attach"); UVMHIST_CALLED(maphist);
132
133 UVMHIST_LOG(maphist, "(vn=0x%x)", arg,0,0,0);
134 used_vnode_size = (voff_t)0;
135
136 /*
137 * first get a lock on the uobj.
138 */
139
140 simple_lock(&uobj->vmobjlock);
141 while (vp->v_flag & VXLOCK) {
142 vp->v_flag |= VXWANT;
143 UVMHIST_LOG(maphist, " SLEEPING on blocked vn",0,0,0,0);
144 UVM_UNLOCK_AND_WAIT(uobj, &uobj->vmobjlock, FALSE,
145 "uvn_attach", 0);
146 simple_lock(&uobj->vmobjlock);
147 UVMHIST_LOG(maphist," WOKE UP",0,0,0,0);
148 }
149
150 /*
151 * if we're mapping a BLK device, make sure it is a disk.
152 */
153 if (vp->v_type == VBLK && bdevsw[major(vp->v_rdev)].d_type != D_DISK) {
154 simple_unlock(&uobj->vmobjlock);
155 UVMHIST_LOG(maphist,"<- done (VBLK not D_DISK!)", 0,0,0,0);
156 return(NULL);
157 }
158 KASSERT(vp->v_type == VREG || vp->v_type == VBLK);
159
160 /*
161 * set up our idea of the size
162 * if this hasn't been done already.
163 */
164 if (vp->v_size == VSIZENOTSET) {
165
166
167 vp->v_flag |= VXLOCK;
168 simple_unlock(&uobj->vmobjlock); /* drop lock in case we sleep */
169 /* XXX: curlwp? */
170 if (vp->v_type == VBLK) {
171 /*
172 * We could implement this as a specfs getattr call, but:
173 *
174 * (1) VOP_GETATTR() would get the file system
175 * vnode operation, not the specfs operation.
176 *
177 * (2) All we want is the size, anyhow.
178 */
179 result = (*bdevsw[major(vp->v_rdev)].d_ioctl)(vp->v_rdev,
180 DIOCGPART, (caddr_t)&pi, FREAD, curproc);
181 if (result == 0) {
182 /* XXX should remember blocksize */
183 used_vnode_size = (voff_t)pi.disklab->d_secsize *
184 (voff_t)pi.part->p_size;
185 }
186 } else {
187 result = VOP_GETATTR(vp, &vattr, curproc->p_ucred,
188 curproc);
189 if (result == 0)
190 used_vnode_size = vattr.va_size;
191 }
192
193 /* relock object */
194 simple_lock(&uobj->vmobjlock);
195
196 if (vp->v_flag & VXWANT) {
197 wakeup(vp);
198 }
199 vp->v_flag &= ~(VXLOCK|VXWANT);
200
201 if (result != 0) {
202 simple_unlock(&uobj->vmobjlock);
203 UVMHIST_LOG(maphist,"<- done (VOP_GETATTR FAILED!)", 0,0,0,0);
204 return(NULL);
205 }
206 vp->v_size = used_vnode_size;
207
208 }
209
210 simple_unlock(&uobj->vmobjlock);
211 UVMHIST_LOG(maphist,"<- done, refcnt=%d", vp->v_usecount,
212 0, 0, 0);
213 return uobj;
214 }
215
216
217 /*
218 * uvn_reference
219 *
220 * duplicate a reference to a VM object. Note that the reference
221 * count must already be at least one (the passed in reference) so
222 * there is no chance of the uvn being killed or locked out here.
223 *
224 * => caller must call with object unlocked.
225 * => caller must be using the same accessprot as was used at attach time
226 */
227
228 void
229 uvn_reference(uobj)
230 struct uvm_object *uobj;
231 {
232 VREF((struct vnode *)uobj);
233 }
234
235
236 /*
237 * uvn_detach
238 *
239 * remove a reference to a VM object.
240 *
241 * => caller must call with object unlocked and map locked.
242 */
243
244 void
245 uvn_detach(uobj)
246 struct uvm_object *uobj;
247 {
248 vrele((struct vnode *)uobj);
249 }
250
251 /*
252 * uvn_put: flush page data to backing store.
253 *
254 * => object must be locked on entry! VOP_PUTPAGES must unlock it.
255 * => flags: PGO_SYNCIO -- use sync. I/O
256 * => note: caller must set PG_CLEAN and pmap_clear_modify (if needed)
257 */
258
259 int
260 uvn_put(uobj, offlo, offhi, flags)
261 struct uvm_object *uobj;
262 voff_t offlo;
263 voff_t offhi;
264 int flags;
265 {
266 struct vnode *vp = (struct vnode *)uobj;
267 int error;
268
269 LOCK_ASSERT(simple_lock_held(&vp->v_interlock));
270 error = VOP_PUTPAGES(vp, offlo, offhi, flags);
271 LOCK_ASSERT(!simple_lock_held(&vp->v_interlock));
272 return error;
273 }
274
275
276 /*
277 * uvn_get: get pages (synchronously) from backing store
278 *
279 * => prefer map unlocked (not required)
280 * => object must be locked! we will _unlock_ it before starting any I/O.
281 * => flags: PGO_ALLPAGES: get all of the pages
282 * PGO_LOCKED: fault data structures are locked
283 * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
284 * => NOTE: caller must check for released pages!!
285 */
286
287 int
288 uvn_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags)
289 struct uvm_object *uobj;
290 voff_t offset;
291 struct vm_page **pps; /* IN/OUT */
292 int *npagesp; /* IN (OUT if PGO_LOCKED) */
293 int centeridx;
294 vm_prot_t access_type;
295 int advice, flags;
296 {
297 struct vnode *vp = (struct vnode *)uobj;
298 int error;
299 UVMHIST_FUNC("uvn_get"); UVMHIST_CALLED(ubchist);
300
301 UVMHIST_LOG(ubchist, "vp %p off 0x%x", vp, (int)offset, 0,0);
302 error = VOP_GETPAGES(vp, offset, pps, npagesp, centeridx,
303 access_type, advice, flags);
304 return error;
305 }
306
307
308 /*
309 * uvn_findpages:
310 * return the page for the uobj and offset requested, allocating if needed.
311 * => uobj must be locked.
312 * => returned pages will be BUSY.
313 */
314
315 int
316 uvn_findpages(uobj, offset, npagesp, pgs, flags)
317 struct uvm_object *uobj;
318 voff_t offset;
319 int *npagesp;
320 struct vm_page **pgs;
321 int flags;
322 {
323 int i, count, found, npages, rv;
324
325 count = found = 0;
326 npages = *npagesp;
327 if (flags & UFP_BACKWARD) {
328 for (i = npages - 1; i >= 0; i--, offset -= PAGE_SIZE) {
329 rv = uvn_findpage(uobj, offset, &pgs[i], flags);
330 if (rv == 0) {
331 if (flags & UFP_DIRTYONLY)
332 break;
333 } else
334 found++;
335 count++;
336 }
337 } else {
338 for (i = 0; i < npages; i++, offset += PAGE_SIZE) {
339 rv = uvn_findpage(uobj, offset, &pgs[i], flags);
340 if (rv == 0) {
341 if (flags & UFP_DIRTYONLY)
342 break;
343 } else
344 found++;
345 count++;
346 }
347 }
348 *npagesp = count;
349 return (found);
350 }
351
352 int
353 uvn_findpage(uobj, offset, pgp, flags)
354 struct uvm_object *uobj;
355 voff_t offset;
356 struct vm_page **pgp;
357 int flags;
358 {
359 struct vm_page *pg;
360 boolean_t dirty;
361 UVMHIST_FUNC("uvn_findpage"); UVMHIST_CALLED(ubchist);
362 UVMHIST_LOG(ubchist, "vp %p off 0x%lx", uobj, offset,0,0);
363
364 if (*pgp != NULL) {
365 UVMHIST_LOG(ubchist, "dontcare", 0,0,0,0);
366 return 0;
367 }
368 for (;;) {
369 /* look for an existing page */
370 pg = uvm_pagelookup(uobj, offset);
371
372 /* nope? allocate one now */
373 if (pg == NULL) {
374 if (flags & UFP_NOALLOC) {
375 UVMHIST_LOG(ubchist, "noalloc", 0,0,0,0);
376 return 0;
377 }
378 pg = uvm_pagealloc(uobj, offset, NULL, 0);
379 if (pg == NULL) {
380 if (flags & UFP_NOWAIT) {
381 UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
382 return 0;
383 }
384 simple_unlock(&uobj->vmobjlock);
385 uvm_wait("uvn_fp1");
386 simple_lock(&uobj->vmobjlock);
387 continue;
388 }
389 if (UVM_OBJ_IS_VTEXT(uobj)) {
390 uvmexp.execpages++;
391 } else {
392 uvmexp.filepages++;
393 }
394 UVMHIST_LOG(ubchist, "alloced %p", pg,0,0,0);
395 break;
396 } else if (flags & UFP_NOCACHE) {
397 UVMHIST_LOG(ubchist, "nocache",0,0,0,0);
398 return 0;
399 }
400
401 /* page is there, see if we need to wait on it */
402 if ((pg->flags & PG_BUSY) != 0) {
403 if (flags & UFP_NOWAIT) {
404 UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
405 return 0;
406 }
407 pg->flags |= PG_WANTED;
408 UVMHIST_LOG(ubchist, "wait %p", pg,0,0,0);
409 UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, 0,
410 "uvn_fp2", 0);
411 simple_lock(&uobj->vmobjlock);
412 continue;
413 }
414
415 /* skip PG_RDONLY pages if requested */
416 if ((flags & UFP_NORDONLY) && (pg->flags & PG_RDONLY)) {
417 UVMHIST_LOG(ubchist, "nordonly",0,0,0,0);
418 return 0;
419 }
420
421 /* stop on clean pages if requested */
422 if (flags & UFP_DIRTYONLY) {
423 dirty = pmap_clear_modify(pg) ||
424 (pg->flags & PG_CLEAN) == 0;
425 pg->flags |= PG_CLEAN;
426 if (!dirty) {
427 UVMHIST_LOG(ubchist, "dirtonly", 0,0,0,0);
428 return 0;
429 }
430 }
431
432 /* mark the page BUSY and we're done. */
433 pg->flags |= PG_BUSY;
434 UVM_PAGE_OWN(pg, "uvn_findpage");
435 UVMHIST_LOG(ubchist, "found %p", pg,0,0,0);
436 break;
437 }
438 *pgp = pg;
439 return 1;
440 }
441
442 /*
443 * uvm_vnp_setsize: grow or shrink a vnode uobj
444 *
445 * grow => just update size value
446 * shrink => toss un-needed pages
447 *
448 * => we assume that the caller has a reference of some sort to the
449 * vnode in question so that it will not be yanked out from under
450 * us.
451 */
452
453 void
454 uvm_vnp_setsize(vp, newsize)
455 struct vnode *vp;
456 voff_t newsize;
457 {
458 struct uvm_object *uobj = &vp->v_uobj;
459 voff_t pgend = round_page(newsize);
460 UVMHIST_FUNC("uvm_vnp_setsize"); UVMHIST_CALLED(ubchist);
461
462 simple_lock(&uobj->vmobjlock);
463 UVMHIST_LOG(ubchist, "vp %p old 0x%x new 0x%x",
464 vp, vp->v_size, newsize, 0);
465
466 /*
467 * now check if the size has changed: if we shrink we had better
468 * toss some pages...
469 */
470
471 if (vp->v_size > pgend && vp->v_size != VSIZENOTSET) {
472 (void) uvn_put(uobj, pgend, 0, PGO_FREE | PGO_SYNCIO);
473 } else {
474 simple_unlock(&uobj->vmobjlock);
475 }
476 vp->v_size = newsize;
477 }
478
479 /*
480 * uvm_vnp_zerorange: set a range of bytes in a file to zero.
481 */
482
483 void
484 uvm_vnp_zerorange(vp, off, len)
485 struct vnode *vp;
486 off_t off;
487 size_t len;
488 {
489 void *win;
490
491 /*
492 * XXXUBC invent kzero() and use it
493 */
494
495 while (len) {
496 vsize_t bytelen = len;
497
498 win = ubc_alloc(&vp->v_uobj, off, &bytelen, UBC_WRITE);
499 memset(win, 0, bytelen);
500 ubc_release(win, 0);
501
502 off += bytelen;
503 len -= bytelen;
504 }
505 }
506