uvm_vnode.c revision 1.93.2.6 1 /* $NetBSD: uvm_vnode.c,v 1.93.2.6 2010/11/21 12:02:06 uebayasi Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993
6 * The Regents of the University of California.
7 * Copyright (c) 1990 University of Utah.
8 *
9 * All rights reserved.
10 *
11 * This code is derived from software contributed to Berkeley by
12 * the Systems Programming Group of the University of Utah Computer
13 * Science Department.
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 * 1. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 * notice, this list of conditions and the following disclaimer in the
22 * documentation and/or other materials provided with the distribution.
23 * 3. All advertising materials mentioning features or use of this software
24 * must display the following acknowledgement:
25 * This product includes software developed by Charles D. Cranor,
26 * Washington University, the University of California, Berkeley and
27 * its contributors.
28 * 4. Neither the name of the University nor the names of its contributors
29 * may be used to endorse or promote products derived from this software
30 * without specific prior written permission.
31 *
32 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
33 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
36 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42 * SUCH DAMAGE.
43 *
44 * @(#)vnode_pager.c 8.8 (Berkeley) 2/13/94
45 * from: Id: uvm_vnode.c,v 1.1.2.26 1998/02/02 20:38:07 chuck Exp
46 */
47
48 /*
49 * uvm_vnode.c: the vnode pager.
50 */
51
52 #include <sys/cdefs.h>
53 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.93.2.6 2010/11/21 12:02:06 uebayasi Exp $");
54
55 #include "opt_uvmhist.h"
56 #include "opt_xip.h"
57
58 #include <sys/param.h>
59 #include <sys/systm.h>
60 #include <sys/kernel.h>
61 #include <sys/proc.h>
62 #include <sys/malloc.h>
63 #include <sys/vnode.h>
64 #include <sys/disklabel.h>
65 #include <sys/ioctl.h>
66 #include <sys/fcntl.h>
67 #include <sys/conf.h>
68 #include <sys/pool.h>
69 #include <sys/mount.h>
70
71 #include <miscfs/specfs/specdev.h>
72
73 #include <uvm/uvm.h>
74 #include <uvm/uvm_readahead.h>
75
76 /*
77 * functions
78 */
79
80 static void uvn_detach(struct uvm_object *);
81 static int uvn_get(struct uvm_object *, voff_t, struct vm_page **, int *,
82 int, vm_prot_t, int, int);
83 static int uvn_put(struct uvm_object *, voff_t, voff_t, int);
84 static void uvn_reference(struct uvm_object *);
85
86 static int uvn_findpage(struct uvm_object *, voff_t, struct vm_page **,
87 int);
88
89 /*
90 * master pager structure
91 */
92
93 const struct uvm_pagerops uvm_vnodeops = {
94 .pgo_reference = uvn_reference,
95 .pgo_detach = uvn_detach,
96 .pgo_get = uvn_get,
97 .pgo_put = uvn_put,
98 };
99
100 /*
101 * the ops!
102 */
103
104 /*
105 * uvn_reference
106 *
107 * duplicate a reference to a VM object. Note that the reference
108 * count must already be at least one (the passed in reference) so
109 * there is no chance of the uvn being killed or locked out here.
110 *
111 * => caller must call with object unlocked.
112 * => caller must be using the same accessprot as was used at attach time
113 */
114
115 static void
116 uvn_reference(struct uvm_object *uobj)
117 {
118 vref((struct vnode *)uobj);
119 }
120
121
122 /*
123 * uvn_detach
124 *
125 * remove a reference to a VM object.
126 *
127 * => caller must call with object unlocked and map locked.
128 */
129
130 static void
131 uvn_detach(struct uvm_object *uobj)
132 {
133 vrele((struct vnode *)uobj);
134 }
135
136 /*
137 * uvn_put: flush page data to backing store.
138 *
139 * => object must be locked on entry! VOP_PUTPAGES must unlock it.
140 * => flags: PGO_SYNCIO -- use sync. I/O
141 * => note: caller must set PG_CLEAN and pmap_clear_modify (if needed)
142 */
143
144 static int
145 uvn_put(struct uvm_object *uobj, voff_t offlo, voff_t offhi, int flags)
146 {
147 struct vnode *vp = (struct vnode *)uobj;
148 int error;
149
150 KASSERT(mutex_owned(&vp->v_interlock));
151 error = VOP_PUTPAGES(vp, offlo, offhi, flags);
152
153 return error;
154 }
155
156
157 /*
158 * uvn_get: get pages (synchronously) from backing store
159 *
160 * => prefer map unlocked (not required)
161 * => object must be locked! we will _unlock_ it before starting any I/O.
162 * => flags: PGO_ALLPAGES: get all of the pages
163 * PGO_LOCKED: fault data structures are locked
164 * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
165 * => NOTE: caller must check for released pages!!
166 */
167
168 static int
169 uvn_get(struct uvm_object *uobj, voff_t offset,
170 struct vm_page **pps /* IN/OUT */,
171 int *npagesp /* IN (OUT if PGO_LOCKED)*/,
172 int centeridx, vm_prot_t access_type, int advice, int flags)
173 {
174 struct vnode *vp = (struct vnode *)uobj;
175 int error;
176
177 UVMHIST_FUNC("uvn_get"); UVMHIST_CALLED(ubchist);
178
179 UVMHIST_LOG(ubchist, "vp %p off 0x%x", vp, (int)offset, 0,0);
180
181 #ifdef XIP
182 if ((vp->v_vflag & VV_XIP) != 0)
183 goto uvn_get_ra_done;
184 #endif
185 if ((access_type & VM_PROT_WRITE) == 0 && (flags & PGO_LOCKED) == 0) {
186 vn_ra_allocctx(vp);
187 uvm_ra_request(vp->v_ractx, advice, uobj, offset,
188 *npagesp << PAGE_SHIFT);
189 }
190 #ifdef XIP
191 uvn_get_ra_done:
192 #endif
193
194 error = VOP_GETPAGES(vp, offset, pps, npagesp, centeridx,
195 access_type, advice, flags);
196
197 KASSERT(((flags & PGO_LOCKED) != 0 && mutex_owned(&vp->v_interlock)) ||
198 (flags & PGO_LOCKED) == 0);
199 return error;
200 }
201
202
203 /*
204 * uvn_findpages:
205 * return the page for the uobj and offset requested, allocating if needed.
206 * => uobj must be locked.
207 * => returned pages will be BUSY.
208 */
209
210 int
211 uvn_findpages(struct uvm_object *uobj, voff_t offset, int *npagesp,
212 struct vm_page **pgs, int flags)
213 {
214 int i, count, found, npages, rv;
215
216 count = found = 0;
217 npages = *npagesp;
218 if (flags & UFP_BACKWARD) {
219 for (i = npages - 1; i >= 0; i--, offset -= PAGE_SIZE) {
220 rv = uvn_findpage(uobj, offset, &pgs[i], flags);
221 if (rv == 0) {
222 if (flags & UFP_DIRTYONLY)
223 break;
224 } else
225 found++;
226 count++;
227 }
228 } else {
229 for (i = 0; i < npages; i++, offset += PAGE_SIZE) {
230 rv = uvn_findpage(uobj, offset, &pgs[i], flags);
231 if (rv == 0) {
232 if (flags & UFP_DIRTYONLY)
233 break;
234 } else
235 found++;
236 count++;
237 }
238 }
239 *npagesp = count;
240 return (found);
241 }
242
243 static int
244 uvn_findpage(struct uvm_object *uobj, voff_t offset, struct vm_page **pgp,
245 int flags)
246 {
247 struct vm_page *pg;
248 bool dirty;
249 UVMHIST_FUNC("uvn_findpage"); UVMHIST_CALLED(ubchist);
250 UVMHIST_LOG(ubchist, "vp %p off 0x%lx", uobj, offset,0,0);
251
252 if (*pgp != NULL) {
253 UVMHIST_LOG(ubchist, "dontcare", 0,0,0,0);
254 return 0;
255 }
256 for (;;) {
257 /* look for an existing page */
258 pg = uvm_pagelookup(uobj, offset);
259
260 /* nope? allocate one now */
261 if (pg == NULL) {
262 if (flags & UFP_NOALLOC) {
263 UVMHIST_LOG(ubchist, "noalloc", 0,0,0,0);
264 return 0;
265 }
266 pg = uvm_pagealloc(uobj, offset, NULL, 0);
267 if (pg == NULL) {
268 if (flags & UFP_NOWAIT) {
269 UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
270 return 0;
271 }
272 mutex_exit(&uobj->vmobjlock);
273 uvm_wait("uvn_fp1");
274 mutex_enter(&uobj->vmobjlock);
275 continue;
276 }
277 UVMHIST_LOG(ubchist, "alloced %p", pg,0,0,0);
278 break;
279 } else if (flags & UFP_NOCACHE) {
280 UVMHIST_LOG(ubchist, "nocache",0,0,0,0);
281 return 0;
282 }
283
284 /* page is there, see if we need to wait on it */
285 if ((pg->flags & PG_BUSY) != 0) {
286 if (flags & UFP_NOWAIT) {
287 UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
288 return 0;
289 }
290 pg->flags |= PG_WANTED;
291 UVMHIST_LOG(ubchist, "wait %p", pg,0,0,0);
292 UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, 0,
293 "uvn_fp2", 0);
294 mutex_enter(&uobj->vmobjlock);
295 continue;
296 }
297
298 /* skip PG_RDONLY pages if requested */
299 if ((flags & UFP_NORDONLY) && (pg->flags & PG_RDONLY)) {
300 UVMHIST_LOG(ubchist, "nordonly",0,0,0,0);
301 return 0;
302 }
303
304 /* stop on clean pages if requested */
305 if (flags & UFP_DIRTYONLY) {
306 dirty = pmap_clear_modify(pg) ||
307 (pg->flags & PG_CLEAN) == 0;
308 pg->flags |= PG_CLEAN;
309 if (!dirty) {
310 UVMHIST_LOG(ubchist, "dirtonly", 0,0,0,0);
311 return 0;
312 }
313 }
314
315 /* mark the page BUSY and we're done. */
316 pg->flags |= PG_BUSY;
317 UVM_PAGE_OWN(pg, "uvn_findpage");
318 UVMHIST_LOG(ubchist, "found %p", pg,0,0,0);
319 break;
320 }
321 *pgp = pg;
322 return 1;
323 }
324
325 /*
326 * uvm_vnp_setsize: grow or shrink a vnode uobj
327 *
328 * grow => just update size value
329 * shrink => toss un-needed pages
330 *
331 * => we assume that the caller has a reference of some sort to the
332 * vnode in question so that it will not be yanked out from under
333 * us.
334 */
335
336 void
337 uvm_vnp_setsize(struct vnode *vp, voff_t newsize)
338 {
339 struct uvm_object *uobj = &vp->v_uobj;
340 voff_t pgend = round_page(newsize);
341 voff_t oldsize;
342 UVMHIST_FUNC("uvm_vnp_setsize"); UVMHIST_CALLED(ubchist);
343
344 mutex_enter(&uobj->vmobjlock);
345 UVMHIST_LOG(ubchist, "vp %p old 0x%x new 0x%x",
346 vp, vp->v_size, newsize, 0);
347
348 /*
349 * now check if the size has changed: if we shrink we had better
350 * toss some pages...
351 */
352
353 KASSERT(newsize != VSIZENOTSET);
354 KASSERT(vp->v_size <= vp->v_writesize);
355 KASSERT(vp->v_size == vp->v_writesize ||
356 newsize == vp->v_writesize || newsize <= vp->v_size);
357
358 oldsize = vp->v_writesize;
359 KASSERT(oldsize != VSIZENOTSET || pgend > oldsize);
360
361 if (oldsize > pgend) {
362 (void) uvn_put(uobj, pgend, 0, PGO_FREE | PGO_SYNCIO);
363 mutex_enter(&uobj->vmobjlock);
364 }
365 vp->v_size = vp->v_writesize = newsize;
366 mutex_exit(&uobj->vmobjlock);
367 }
368
369 void
370 uvm_vnp_setwritesize(struct vnode *vp, voff_t newsize)
371 {
372
373 mutex_enter(&vp->v_interlock);
374 KASSERT(newsize != VSIZENOTSET);
375 KASSERT(vp->v_size != VSIZENOTSET);
376 KASSERT(vp->v_writesize != VSIZENOTSET);
377 KASSERT(vp->v_size <= vp->v_writesize);
378 KASSERT(vp->v_size <= newsize);
379 vp->v_writesize = newsize;
380 mutex_exit(&vp->v_interlock);
381 }
382
383 bool
384 uvn_text_p(struct uvm_object *uobj)
385 {
386 struct vnode *vp = (struct vnode *)uobj;
387
388 return (vp->v_iflag & VI_EXECMAP) != 0;
389 }
390
391 bool
392 uvn_clean_p(struct uvm_object *uobj)
393 {
394 struct vnode *vp = (struct vnode *)uobj;
395
396 return (vp->v_iflag & VI_ONWORKLST) == 0;
397 }
398
399 bool
400 uvn_needs_writefault_p(struct uvm_object *uobj)
401 {
402 struct vnode *vp = (struct vnode *)uobj;
403
404 return uvn_clean_p(uobj) ||
405 (vp->v_iflag & (VI_WRMAP|VI_WRMAPDIRTY)) == VI_WRMAP;
406 }
407
408 /*
409 * uvn_findpage_xip
410 * Lookup a physical page identity (== struct vm_page * in
411 * the current UVM design) within the given vnode, at the
412 * given offset.
413 */
414 struct vm_page *
415 uvn_findpage_xip(struct vnode *devvp, struct uvm_object *uobj, off_t off)
416 {
417 #if defined(DIAGNOSTIC)
418 struct vnode *vp = (struct vnode *)uobj;
419 #endif
420 struct vm_physseg *seg;
421 struct vm_page *pg;
422
423 UVMHIST_FUNC("uvn_findpage_xip"); UVMHIST_CALLED(ubchist);
424 UVMHIST_LOG(ubchist, "called devvp=%p uobj=%p off=%lx",devvp,uobj,(long)off,0);
425
426 #if defined(XIP)
427 #if !defined(XIP_CDEV_MMAP)
428 KASSERT((vp->v_vflag & VV_XIP) != 0);
429 KASSERT((off & PAGE_MASK) == 0);
430
431 /*
432 * Lookup a physical page identity from the underlying physical
433 * segment.
434 *
435 * Eventually, this will be replaced by a call of character
436 * device pager method, which is a generalized version of
437 * cdev_mmap(). Which means that v_physseg will become struct
438 * uvm_object *, and this will call cdev_page(uobj, off).
439 */
440
441 seg = devvp->v_physseg;
442 KASSERT(seg != NULL);
443
444 pg = seg->pgs + (off >> PAGE_SHIFT);
445 #else
446 dev_t dev;
447 paddr_t mdpgno, pa, pfn;
448 int segno, segidx;
449
450 KASSERT(vp != NULL);
451 KASSERT((vp->v_vflag & VV_XIP) != 0);
452 KASSERT((off & PAGE_MASK) == 0);
453
454 /*
455 * Get an "mmap cookie" from device.
456 */
457 dev = devsw_blk2chr(devvp->v_rdev);
458 mdpgno = cdev_mmap(dev, off, 0);
459 KASSERT(mdpgno != -1);
460
461 /*
462 * Index the matching vm_page and return it the vnode pager
463 * (genfs_getpages).
464 */
465 pa = pmap_phys_address(mdpgno);
466 pfn = atop(pa);
467 segno = vm_physseg_find_device(pfn, &segidx);
468 seg = VM_PHYSDEV_PTR(segno);
469 KASSERT(seg != NULL);
470 KASSERT(segidx == pfn - seg->start);
471 KASSERT(seg->pgs != NULL);
472
473 pg = seg->pgs + segidx;
474 #endif
475 #endif
476
477 KASSERT(pg->phys_addr == (seg->start << PAGE_SHIFT) + off);
478
479 pg->flags |= PG_BUSY;
480
481 UVMHIST_LOG(ubchist, "done pa=%lx seg=%p pg=%p off=%lx",(long)pa,seg,pg,(long)off);
482
483 return pg;
484 }
485