uvm_vnode.c revision 1.17.2.1 1 1.17.2.1 chs /* $NetBSD: uvm_vnode.c,v 1.17.2.1 1998/11/09 06:06:40 chs Exp $ */
2 1.1 mrg
3 1.1 mrg /*
4 1.1 mrg * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
5 1.1 mrg * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
6 1.1 mrg */
7 1.1 mrg /*
8 1.1 mrg * Copyright (c) 1997 Charles D. Cranor and Washington University.
9 1.1 mrg * Copyright (c) 1991, 1993
10 1.1 mrg * The Regents of the University of California.
11 1.1 mrg * Copyright (c) 1990 University of Utah.
12 1.1 mrg *
13 1.1 mrg * All rights reserved.
14 1.1 mrg *
15 1.1 mrg * This code is derived from software contributed to Berkeley by
16 1.1 mrg * the Systems Programming Group of the University of Utah Computer
17 1.1 mrg * Science Department.
18 1.1 mrg *
19 1.1 mrg * Redistribution and use in source and binary forms, with or without
20 1.1 mrg * modification, are permitted provided that the following conditions
21 1.1 mrg * are met:
22 1.1 mrg * 1. Redistributions of source code must retain the above copyright
23 1.1 mrg * notice, this list of conditions and the following disclaimer.
24 1.1 mrg * 2. Redistributions in binary form must reproduce the above copyright
25 1.1 mrg * notice, this list of conditions and the following disclaimer in the
26 1.1 mrg * documentation and/or other materials provided with the distribution.
27 1.1 mrg * 3. All advertising materials mentioning features or use of this software
28 1.1 mrg * must display the following acknowledgement:
29 1.1 mrg * This product includes software developed by Charles D. Cranor,
30 1.1 mrg * Washington University, the University of California, Berkeley and
31 1.1 mrg * its contributors.
32 1.1 mrg * 4. Neither the name of the University nor the names of its contributors
33 1.1 mrg * may be used to endorse or promote products derived from this software
34 1.1 mrg * without specific prior written permission.
35 1.1 mrg *
36 1.1 mrg * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
37 1.1 mrg * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
38 1.1 mrg * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
39 1.1 mrg * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
40 1.1 mrg * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
41 1.1 mrg * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
42 1.1 mrg * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
43 1.1 mrg * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
44 1.1 mrg * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
45 1.1 mrg * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
46 1.1 mrg * SUCH DAMAGE.
47 1.1 mrg *
48 1.1 mrg * @(#)vnode_pager.c 8.8 (Berkeley) 2/13/94
49 1.3 mrg * from: Id: uvm_vnode.c,v 1.1.2.26 1998/02/02 20:38:07 chuck Exp
50 1.1 mrg */
51 1.1 mrg
52 1.6 thorpej #include "fs_nfs.h"
53 1.4 mrg #include "opt_uvmhist.h"
54 1.4 mrg
55 1.1 mrg /*
56 1.1 mrg * uvm_vnode.c: the vnode pager.
57 1.1 mrg */
58 1.1 mrg
59 1.1 mrg #include <sys/param.h>
60 1.1 mrg #include <sys/systm.h>
61 1.17.2.1 chs #include <sys/kernel.h>
62 1.1 mrg #include <sys/proc.h>
63 1.1 mrg #include <sys/malloc.h>
64 1.1 mrg #include <sys/vnode.h>
65 1.13 thorpej #include <sys/disklabel.h>
66 1.13 thorpej #include <sys/ioctl.h>
67 1.13 thorpej #include <sys/fcntl.h>
68 1.13 thorpej #include <sys/conf.h>
69 1.13 thorpej
70 1.13 thorpej #include <miscfs/specfs/specdev.h>
71 1.1 mrg
72 1.1 mrg #include <vm/vm.h>
73 1.1 mrg #include <vm/vm_page.h>
74 1.1 mrg #include <vm/vm_kern.h>
75 1.1 mrg
76 1.1 mrg #include <uvm/uvm.h>
77 1.1 mrg #include <uvm/uvm_vnode.h>
78 1.1 mrg
79 1.1 mrg /*
80 1.1 mrg * private global data structure
81 1.1 mrg *
82 1.1 mrg * we keep a list of writeable active vnode-backed VM objects for sync op.
83 1.1 mrg * we keep a simpleq of vnodes that are currently being sync'd.
84 1.1 mrg */
85 1.1 mrg
86 1.1 mrg LIST_HEAD(uvn_list_struct, uvm_vnode);
87 1.1 mrg static struct uvn_list_struct uvn_wlist; /* writeable uvns */
88 1.1 mrg static simple_lock_data_t uvn_wl_lock; /* locks uvn_wlist */
89 1.1 mrg
90 1.1 mrg SIMPLEQ_HEAD(uvn_sq_struct, uvm_vnode);
91 1.1 mrg static struct uvn_sq_struct uvn_sync_q; /* sync'ing uvns */
92 1.1 mrg lock_data_t uvn_sync_lock; /* locks sync operation */
93 1.1 mrg
94 1.1 mrg /*
95 1.1 mrg * functions
96 1.1 mrg */
97 1.1 mrg
98 1.15 eeh static int uvn_asyncget __P((struct uvm_object *, vaddr_t,
99 1.1 mrg int));
100 1.1 mrg struct uvm_object *uvn_attach __P((void *, vm_prot_t));
101 1.15 eeh static void uvn_cluster __P((struct uvm_object *, vaddr_t,
102 1.15 eeh vaddr_t *, vaddr_t *));
103 1.1 mrg static void uvn_detach __P((struct uvm_object *));
104 1.15 eeh static boolean_t uvn_flush __P((struct uvm_object *, vaddr_t,
105 1.15 eeh vaddr_t, int));
106 1.15 eeh static int uvn_get __P((struct uvm_object *, vaddr_t,
107 1.1 mrg vm_page_t *, int *, int,
108 1.1 mrg vm_prot_t, int, int));
109 1.1 mrg static void uvn_init __P((void));
110 1.1 mrg static int uvn_put __P((struct uvm_object *, vm_page_t *,
111 1.1 mrg int, boolean_t));
112 1.1 mrg static void uvn_reference __P((struct uvm_object *));
113 1.1 mrg static boolean_t uvn_releasepg __P((struct vm_page *,
114 1.1 mrg struct vm_page **));
115 1.1 mrg
116 1.1 mrg /*
117 1.1 mrg * master pager structure
118 1.1 mrg */
119 1.1 mrg
120 1.1 mrg struct uvm_pagerops uvm_vnodeops = {
121 1.8 mrg uvn_init,
122 1.8 mrg uvn_attach,
123 1.8 mrg uvn_reference,
124 1.8 mrg uvn_detach,
125 1.8 mrg NULL, /* no specialized fault routine required */
126 1.8 mrg uvn_flush,
127 1.8 mrg uvn_get,
128 1.8 mrg uvn_asyncget,
129 1.8 mrg uvn_put,
130 1.8 mrg uvn_cluster,
131 1.8 mrg uvm_mk_pcluster, /* use generic version of this: see uvm_pager.c */
132 1.8 mrg uvm_shareprot, /* !NULL: allow us in share maps */
133 1.8 mrg NULL, /* AIO-DONE function (not until we have asyncio) */
134 1.8 mrg uvn_releasepg,
135 1.1 mrg };
136 1.1 mrg
137 1.1 mrg /*
138 1.1 mrg * the ops!
139 1.1 mrg */
140 1.1 mrg
141 1.1 mrg /*
142 1.1 mrg * uvn_init
143 1.1 mrg *
144 1.1 mrg * init pager private data structures.
145 1.1 mrg */
146 1.1 mrg
147 1.8 mrg static void
148 1.8 mrg uvn_init()
149 1.8 mrg {
150 1.1 mrg
151 1.8 mrg LIST_INIT(&uvn_wlist);
152 1.8 mrg simple_lock_init(&uvn_wl_lock);
153 1.8 mrg /* note: uvn_sync_q init'd in uvm_vnp_sync() */
154 1.8 mrg lockinit(&uvn_sync_lock, PVM, "uvnsync", 0, 0);
155 1.1 mrg }
156 1.1 mrg
157 1.1 mrg /*
158 1.1 mrg * uvn_attach
159 1.1 mrg *
160 1.1 mrg * attach a vnode structure to a VM object. if the vnode is already
161 1.1 mrg * attached, then just bump the reference count by one and return the
162 1.1 mrg * VM object. if not already attached, attach and return the new VM obj.
163 1.1 mrg * the "accessprot" tells the max access the attaching thread wants to
164 1.1 mrg * our pages.
165 1.1 mrg *
166 1.1 mrg * => caller must _not_ already be holding the lock on the uvm_object.
167 1.1 mrg * => in fact, nothing should be locked so that we can sleep here.
168 1.1 mrg * => note that uvm_object is first thing in vnode structure, so their
169 1.1 mrg * pointers are equiv.
170 1.1 mrg */
171 1.1 mrg
172 1.8 mrg struct uvm_object *
173 1.8 mrg uvn_attach(arg, accessprot)
174 1.8 mrg void *arg;
175 1.8 mrg vm_prot_t accessprot;
176 1.8 mrg {
177 1.8 mrg struct vnode *vp = arg;
178 1.8 mrg struct uvm_vnode *uvn = &vp->v_uvm;
179 1.8 mrg struct vattr vattr;
180 1.8 mrg int oldflags, result;
181 1.13 thorpej struct partinfo pi;
182 1.17.2.1 chs off_t used_vnode_size;
183 1.8 mrg UVMHIST_FUNC("uvn_attach"); UVMHIST_CALLED(maphist);
184 1.8 mrg
185 1.8 mrg UVMHIST_LOG(maphist, "(vn=0x%x)", arg,0,0,0);
186 1.8 mrg
187 1.13 thorpej used_vnode_size = (u_quad_t)0; /* XXX gcc -Wuninitialized */
188 1.13 thorpej
189 1.8 mrg /*
190 1.8 mrg * first get a lock on the uvn.
191 1.8 mrg */
192 1.8 mrg simple_lock(&uvn->u_obj.vmobjlock);
193 1.8 mrg while (uvn->u_flags & UVM_VNODE_BLOCKED) {
194 1.8 mrg uvn->u_flags |= UVM_VNODE_WANTED;
195 1.8 mrg UVMHIST_LOG(maphist, " SLEEPING on blocked vn",0,0,0,0);
196 1.8 mrg UVM_UNLOCK_AND_WAIT(uvn, &uvn->u_obj.vmobjlock, FALSE,
197 1.8 mrg "uvn_attach", 0);
198 1.8 mrg simple_lock(&uvn->u_obj.vmobjlock);
199 1.8 mrg UVMHIST_LOG(maphist," WOKE UP",0,0,0,0);
200 1.8 mrg }
201 1.1 mrg
202 1.8 mrg /*
203 1.17.2.1 chs * if we're mapping a BLK device, make sure it is a disk.
204 1.13 thorpej */
205 1.13 thorpej if (vp->v_type == VBLK && bdevsw[major(vp->v_rdev)].d_type != D_DISK) {
206 1.17.2.1 chs simple_unlock(&uvn->u_obj.vmobjlock);
207 1.13 thorpej UVMHIST_LOG(maphist,"<- done (VBLK not D_DISK!)", 0,0,0,0);
208 1.13 thorpej return(NULL);
209 1.13 thorpej }
210 1.13 thorpej
211 1.17.2.1 chs #ifdef UBC
212 1.17.2.1 chs oldflags = 0;
213 1.17.2.1 chs
214 1.17.2.1 chs
215 1.17.2.1 chs #ifdef DIAGNOSTIC
216 1.17.2.1 chs if (vp->v_type != VREG) {
217 1.17.2.1 chs panic("uvn_attach: vp %p not VREG", vp);
218 1.17.2.1 chs }
219 1.17.2.1 chs #endif
220 1.17.2.1 chs
221 1.17.2.1 chs /*
222 1.17.2.1 chs * set up our idea of the size
223 1.17.2.1 chs * if this hasn't been done already.
224 1.17.2.1 chs */
225 1.17.2.1 chs if (uvn->u_size == VSIZENOTSET) {
226 1.17.2.1 chs
227 1.17.2.1 chs uvn->u_flags = UVM_VNODE_ALOCK;
228 1.17.2.1 chs simple_unlock(&uvn->u_obj.vmobjlock); /* drop lock in case we sleep */
229 1.17.2.1 chs /* XXX: curproc? */
230 1.17.2.1 chs if (vp->v_type == VBLK) {
231 1.17.2.1 chs /*
232 1.17.2.1 chs * We could implement this as a specfs getattr call, but:
233 1.17.2.1 chs *
234 1.17.2.1 chs * (1) VOP_GETATTR() would get the file system
235 1.17.2.1 chs * vnode operation, not the specfs operation.
236 1.17.2.1 chs *
237 1.17.2.1 chs * (2) All we want is the size, anyhow.
238 1.17.2.1 chs */
239 1.17.2.1 chs result = (*bdevsw[major(vp->v_rdev)].d_ioctl)(vp->v_rdev,
240 1.17.2.1 chs DIOCGPART, (caddr_t)&pi, FREAD, curproc);
241 1.17.2.1 chs if (result == 0) {
242 1.17.2.1 chs /* XXX should remember blocksize */
243 1.17.2.1 chs used_vnode_size = (u_quad_t)pi.disklab->d_secsize *
244 1.17.2.1 chs (u_quad_t)pi.part->p_size;
245 1.17.2.1 chs }
246 1.17.2.1 chs } else {
247 1.17.2.1 chs result = VOP_GETATTR(vp, &vattr, curproc->p_ucred, curproc);
248 1.17.2.1 chs if (result == 0)
249 1.17.2.1 chs used_vnode_size = vattr.va_size;
250 1.17.2.1 chs }
251 1.17.2.1 chs
252 1.17.2.1 chs
253 1.17.2.1 chs /*
254 1.17.2.1 chs * make sure that the newsize fits within a vaddr_t
255 1.17.2.1 chs * XXX: need to revise addressing data types
256 1.17.2.1 chs */
257 1.17.2.1 chs if (used_vnode_size > (vaddr_t) -PAGE_SIZE) {
258 1.17.2.1 chs #ifdef DEBUG
259 1.17.2.1 chs printf("uvn_attach: vn %p size truncated %qx->%x\n", vp,
260 1.17.2.1 chs used_vnode_size, -PAGE_SIZE);
261 1.17.2.1 chs #endif
262 1.17.2.1 chs used_vnode_size = (vaddr_t) -PAGE_SIZE;
263 1.17.2.1 chs }
264 1.17.2.1 chs
265 1.17.2.1 chs /* relock object */
266 1.17.2.1 chs simple_lock(&uvn->u_obj.vmobjlock);
267 1.17.2.1 chs
268 1.17.2.1 chs if (uvn->u_flags & UVM_VNODE_WANTED)
269 1.17.2.1 chs wakeup(uvn);
270 1.17.2.1 chs uvn->u_flags = 0;
271 1.17.2.1 chs
272 1.17.2.1 chs if (result != 0) {
273 1.17.2.1 chs simple_unlock(&uvn->u_obj.vmobjlock); /* drop lock */
274 1.17.2.1 chs UVMHIST_LOG(maphist,"<- done (VOP_GETATTR FAILED!)", 0,0,0,0);
275 1.17.2.1 chs return(NULL);
276 1.17.2.1 chs }
277 1.17.2.1 chs uvn->u_size = used_vnode_size;
278 1.17.2.1 chs
279 1.17.2.1 chs }
280 1.17.2.1 chs
281 1.17.2.1 chs /* check for new writeable uvn */
282 1.17.2.1 chs if ((accessprot & VM_PROT_WRITE) != 0 &&
283 1.17.2.1 chs (uvn->u_flags & UVM_VNODE_WRITEABLE) == 0) {
284 1.17.2.1 chs simple_lock(&uvn_wl_lock);
285 1.17.2.1 chs
286 1.17.2.1 chs if (uvn->u_wlist.le_next != NULL) {
287 1.17.2.1 chs printf("already on wlist vp %p\n", uvn);
288 1.17.2.1 chs Debugger();
289 1.17.2.1 chs }
290 1.17.2.1 chs
291 1.17.2.1 chs LIST_INSERT_HEAD(&uvn_wlist, uvn, u_wlist);
292 1.17.2.1 chs simple_unlock(&uvn_wl_lock);
293 1.17.2.1 chs /* we are now on wlist! */
294 1.17.2.1 chs uvn->u_flags |= UVM_VNODE_WRITEABLE;
295 1.17.2.1 chs }
296 1.17.2.1 chs
297 1.17.2.1 chs /* unlock and return */
298 1.17.2.1 chs simple_unlock(&uvn->u_obj.vmobjlock);
299 1.17.2.1 chs UVMHIST_LOG(maphist,"<- done, refcnt=%d", uvn->u_obj.uo_refs,
300 1.17.2.1 chs 0, 0, 0);
301 1.17.2.1 chs return (&uvn->u_obj);
302 1.17.2.1 chs #else
303 1.13 thorpej /*
304 1.8 mrg * now we have lock and uvn must not be in a blocked state.
305 1.8 mrg * first check to see if it is already active, in which case
306 1.8 mrg * we can bump the reference count, check to see if we need to
307 1.8 mrg * add it to the writeable list, and then return.
308 1.8 mrg */
309 1.8 mrg if (uvn->u_flags & UVM_VNODE_VALID) { /* already active? */
310 1.1 mrg
311 1.8 mrg /* regain VREF if we were persisting */
312 1.8 mrg if (uvn->u_obj.uo_refs == 0) {
313 1.8 mrg VREF(vp);
314 1.8 mrg UVMHIST_LOG(maphist," VREF (reclaim persisting vnode)",
315 1.8 mrg 0,0,0,0);
316 1.8 mrg }
317 1.8 mrg uvn->u_obj.uo_refs++; /* bump uvn ref! */
318 1.8 mrg
319 1.8 mrg /* check for new writeable uvn */
320 1.8 mrg if ((accessprot & VM_PROT_WRITE) != 0 &&
321 1.8 mrg (uvn->u_flags & UVM_VNODE_WRITEABLE) == 0) {
322 1.8 mrg simple_lock(&uvn_wl_lock);
323 1.8 mrg LIST_INSERT_HEAD(&uvn_wlist, uvn, u_wlist);
324 1.8 mrg simple_unlock(&uvn_wl_lock);
325 1.8 mrg /* we are now on wlist! */
326 1.8 mrg uvn->u_flags |= UVM_VNODE_WRITEABLE;
327 1.8 mrg }
328 1.8 mrg
329 1.8 mrg /* unlock and return */
330 1.8 mrg simple_unlock(&uvn->u_obj.vmobjlock);
331 1.8 mrg UVMHIST_LOG(maphist,"<- done, refcnt=%d", uvn->u_obj.uo_refs,
332 1.8 mrg 0, 0, 0);
333 1.8 mrg return (&uvn->u_obj);
334 1.8 mrg }
335 1.8 mrg
336 1.8 mrg /*
337 1.8 mrg * need to call VOP_GETATTR() to get the attributes, but that could
338 1.8 mrg * block (due to I/O), so we want to unlock the object before calling.
339 1.8 mrg * however, we want to keep anyone else from playing with the object
340 1.8 mrg * while it is unlocked. to do this we set UVM_VNODE_ALOCK which
341 1.8 mrg * prevents anyone from attaching to the vnode until we are done with
342 1.8 mrg * it.
343 1.8 mrg */
344 1.8 mrg uvn->u_flags = UVM_VNODE_ALOCK;
345 1.8 mrg simple_unlock(&uvn->u_obj.vmobjlock); /* drop lock in case we sleep */
346 1.8 mrg /* XXX: curproc? */
347 1.8 mrg
348 1.13 thorpej if (vp->v_type == VBLK) {
349 1.13 thorpej /*
350 1.13 thorpej * We could implement this as a specfs getattr call, but:
351 1.13 thorpej *
352 1.13 thorpej * (1) VOP_GETATTR() would get the file system
353 1.13 thorpej * vnode operation, not the specfs operation.
354 1.13 thorpej *
355 1.13 thorpej * (2) All we want is the size, anyhow.
356 1.13 thorpej */
357 1.13 thorpej result = (*bdevsw[major(vp->v_rdev)].d_ioctl)(vp->v_rdev,
358 1.13 thorpej DIOCGPART, (caddr_t)&pi, FREAD, curproc);
359 1.13 thorpej if (result == 0) {
360 1.13 thorpej /* XXX should remember blocksize */
361 1.13 thorpej used_vnode_size = (u_quad_t)pi.disklab->d_secsize *
362 1.13 thorpej (u_quad_t)pi.part->p_size;
363 1.13 thorpej }
364 1.13 thorpej } else {
365 1.13 thorpej result = VOP_GETATTR(vp, &vattr, curproc->p_ucred, curproc);
366 1.13 thorpej if (result == 0)
367 1.13 thorpej used_vnode_size = vattr.va_size;
368 1.8 mrg }
369 1.1 mrg
370 1.8 mrg /* relock object */
371 1.8 mrg simple_lock(&uvn->u_obj.vmobjlock);
372 1.1 mrg
373 1.8 mrg if (result != 0) {
374 1.8 mrg if (uvn->u_flags & UVM_VNODE_WANTED)
375 1.8 mrg wakeup(uvn);
376 1.8 mrg uvn->u_flags = 0;
377 1.8 mrg simple_unlock(&uvn->u_obj.vmobjlock); /* drop lock */
378 1.8 mrg UVMHIST_LOG(maphist,"<- done (VOP_GETATTR FAILED!)", 0,0,0,0);
379 1.8 mrg return(NULL);
380 1.8 mrg }
381 1.13 thorpej
382 1.13 thorpej /*
383 1.15 eeh * make sure that the newsize fits within a vaddr_t
384 1.13 thorpej * XXX: need to revise addressing data types
385 1.13 thorpej */
386 1.13 thorpej if (vp->v_type == VBLK) printf("used_vnode_size = %qu\n", used_vnode_size);
387 1.15 eeh if (used_vnode_size > (vaddr_t) -PAGE_SIZE) {
388 1.13 thorpej #ifdef DEBUG
389 1.13 thorpej printf("uvn_attach: vn %p size truncated %qx->%x\n", vp,
390 1.13 thorpej used_vnode_size, -PAGE_SIZE);
391 1.13 thorpej #endif
392 1.15 eeh used_vnode_size = (vaddr_t) -PAGE_SIZE;
393 1.13 thorpej }
394 1.13 thorpej
395 1.8 mrg /*
396 1.8 mrg * now set up the uvn.
397 1.8 mrg */
398 1.8 mrg uvn->u_obj.pgops = &uvm_vnodeops;
399 1.8 mrg TAILQ_INIT(&uvn->u_obj.memq);
400 1.8 mrg uvn->u_obj.uo_npages = 0;
401 1.8 mrg uvn->u_obj.uo_refs = 1; /* just us... */
402 1.8 mrg oldflags = uvn->u_flags;
403 1.8 mrg uvn->u_flags = UVM_VNODE_VALID|UVM_VNODE_CANPERSIST;
404 1.8 mrg uvn->u_nio = 0;
405 1.8 mrg uvn->u_size = used_vnode_size;
406 1.8 mrg
407 1.8 mrg /* if write access, we need to add it to the wlist */
408 1.8 mrg if (accessprot & VM_PROT_WRITE) {
409 1.8 mrg simple_lock(&uvn_wl_lock);
410 1.8 mrg LIST_INSERT_HEAD(&uvn_wlist, uvn, u_wlist);
411 1.8 mrg simple_unlock(&uvn_wl_lock);
412 1.8 mrg uvn->u_flags |= UVM_VNODE_WRITEABLE; /* we are on wlist! */
413 1.8 mrg }
414 1.8 mrg
415 1.8 mrg /*
416 1.8 mrg * add a reference to the vnode. this reference will stay as long
417 1.8 mrg * as there is a valid mapping of the vnode. dropped when the
418 1.8 mrg * reference count goes to zero [and we either free or persist].
419 1.8 mrg */
420 1.8 mrg VREF(vp);
421 1.8 mrg simple_unlock(&uvn->u_obj.vmobjlock);
422 1.8 mrg if (oldflags & UVM_VNODE_WANTED)
423 1.8 mrg wakeup(uvn);
424 1.1 mrg
425 1.8 mrg UVMHIST_LOG(maphist,"<- done/VREF, ret 0x%x", &uvn->u_obj,0,0,0);
426 1.8 mrg return(&uvn->u_obj);
427 1.17.2.1 chs #endif
428 1.1 mrg }
429 1.1 mrg
430 1.1 mrg
431 1.1 mrg /*
432 1.1 mrg * uvn_reference
433 1.1 mrg *
434 1.1 mrg * duplicate a reference to a VM object. Note that the reference
435 1.1 mrg * count must already be at least one (the passed in reference) so
436 1.1 mrg * there is no chance of the uvn being killed or locked out here.
437 1.1 mrg *
438 1.1 mrg * => caller must call with object unlocked.
439 1.1 mrg * => caller must be using the same accessprot as was used at attach time
440 1.1 mrg */
441 1.1 mrg
442 1.1 mrg
443 1.8 mrg static void
444 1.8 mrg uvn_reference(uobj)
445 1.8 mrg struct uvm_object *uobj;
446 1.1 mrg {
447 1.17.2.1 chs #ifdef UBC
448 1.17.2.1 chs #else
449 1.1 mrg #ifdef DIAGNOSTIC
450 1.8 mrg struct uvm_vnode *uvn = (struct uvm_vnode *) uobj;
451 1.1 mrg #endif
452 1.17.2.1 chs #endif
453 1.8 mrg UVMHIST_FUNC("uvn_reference"); UVMHIST_CALLED(maphist);
454 1.1 mrg
455 1.17.2.1 chs #ifdef UBC
456 1.17.2.1 chs VREF((struct vnode *)uobj);
457 1.17.2.1 chs #else
458 1.8 mrg simple_lock(&uobj->vmobjlock);
459 1.1 mrg #ifdef DIAGNOSTIC
460 1.8 mrg if ((uvn->u_flags & UVM_VNODE_VALID) == 0) {
461 1.8 mrg printf("uvn_reference: ref=%d, flags=0x%x\n", uvn->u_flags,
462 1.8 mrg uobj->uo_refs);
463 1.8 mrg panic("uvn_reference: invalid state");
464 1.8 mrg }
465 1.1 mrg #endif
466 1.8 mrg uobj->uo_refs++;
467 1.8 mrg UVMHIST_LOG(maphist, "<- done (uobj=0x%x, ref = %d)",
468 1.1 mrg uobj, uobj->uo_refs,0,0);
469 1.8 mrg simple_unlock(&uobj->vmobjlock);
470 1.17.2.1 chs #endif
471 1.1 mrg }
472 1.1 mrg
473 1.1 mrg /*
474 1.1 mrg * uvn_detach
475 1.1 mrg *
476 1.1 mrg * remove a reference to a VM object.
477 1.1 mrg *
478 1.1 mrg * => caller must call with object unlocked and map locked.
479 1.1 mrg * => this starts the detach process, but doesn't have to finish it
480 1.1 mrg * (async i/o could still be pending).
481 1.1 mrg */
482 1.8 mrg static void
483 1.8 mrg uvn_detach(uobj)
484 1.8 mrg struct uvm_object *uobj;
485 1.8 mrg {
486 1.17.2.1 chs #ifdef UBC
487 1.17.2.1 chs #else
488 1.8 mrg struct uvm_vnode *uvn;
489 1.8 mrg struct vnode *vp;
490 1.8 mrg int oldflags;
491 1.17.2.1 chs #endif
492 1.8 mrg UVMHIST_FUNC("uvn_detach"); UVMHIST_CALLED(maphist);
493 1.8 mrg
494 1.17.2.1 chs #ifdef UBC
495 1.17.2.1 chs vrele((struct vnode *)uobj);
496 1.17.2.1 chs #else
497 1.8 mrg simple_lock(&uobj->vmobjlock);
498 1.8 mrg
499 1.8 mrg UVMHIST_LOG(maphist," (uobj=0x%x) ref=%d", uobj,uobj->uo_refs,0,0);
500 1.8 mrg uobj->uo_refs--; /* drop ref! */
501 1.8 mrg if (uobj->uo_refs) { /* still more refs */
502 1.8 mrg simple_unlock(&uobj->vmobjlock);
503 1.8 mrg UVMHIST_LOG(maphist, "<- done (rc>0)", 0,0,0,0);
504 1.8 mrg return;
505 1.8 mrg }
506 1.1 mrg
507 1.8 mrg /*
508 1.8 mrg * get other pointers ...
509 1.8 mrg */
510 1.8 mrg
511 1.8 mrg uvn = (struct uvm_vnode *) uobj;
512 1.8 mrg vp = (struct vnode *) uobj;
513 1.8 mrg
514 1.8 mrg /*
515 1.8 mrg * clear VTEXT flag now that there are no mappings left (VTEXT is used
516 1.8 mrg * to keep an active text file from being overwritten).
517 1.8 mrg */
518 1.8 mrg vp->v_flag &= ~VTEXT;
519 1.8 mrg
520 1.8 mrg /*
521 1.8 mrg * we just dropped the last reference to the uvn. see if we can
522 1.8 mrg * let it "stick around".
523 1.8 mrg */
524 1.8 mrg
525 1.8 mrg if (uvn->u_flags & UVM_VNODE_CANPERSIST) {
526 1.8 mrg /* won't block */
527 1.8 mrg uvn_flush(uobj, 0, 0, PGO_DEACTIVATE|PGO_ALLPAGES);
528 1.17 chs simple_unlock(&uobj->vmobjlock);
529 1.8 mrg vrele(vp); /* drop vnode reference */
530 1.8 mrg UVMHIST_LOG(maphist,"<- done/vrele! (persist)", 0,0,0,0);
531 1.8 mrg return;
532 1.8 mrg }
533 1.8 mrg
534 1.8 mrg /*
535 1.8 mrg * its a goner!
536 1.8 mrg */
537 1.8 mrg
538 1.8 mrg UVMHIST_LOG(maphist," its a goner (flushing)!", 0,0,0,0);
539 1.8 mrg
540 1.8 mrg uvn->u_flags |= UVM_VNODE_DYING;
541 1.8 mrg
542 1.8 mrg /*
543 1.8 mrg * even though we may unlock in flush, no one can gain a reference
544 1.8 mrg * to us until we clear the "dying" flag [because it blocks
545 1.8 mrg * attaches]. we will not do that until after we've disposed of all
546 1.8 mrg * the pages with uvn_flush(). note that before the flush the only
547 1.8 mrg * pages that could be marked PG_BUSY are ones that are in async
548 1.8 mrg * pageout by the daemon. (there can't be any pending "get"'s
549 1.8 mrg * because there are no references to the object).
550 1.8 mrg */
551 1.1 mrg
552 1.8 mrg (void) uvn_flush(uobj, 0, 0, PGO_CLEANIT|PGO_FREE|PGO_ALLPAGES);
553 1.1 mrg
554 1.8 mrg UVMHIST_LOG(maphist," its a goner (done flush)!", 0,0,0,0);
555 1.8 mrg
556 1.8 mrg /*
557 1.8 mrg * given the structure of this pager, the above flush request will
558 1.8 mrg * create the following state: all the pages that were in the object
559 1.8 mrg * have either been free'd or they are marked PG_BUSY|PG_RELEASED.
560 1.8 mrg * the PG_BUSY bit was set either by us or the daemon for async I/O.
561 1.8 mrg * in either case, if we have pages left we can't kill the object
562 1.8 mrg * yet because i/o is pending. in this case we set the "relkill"
563 1.8 mrg * flag which will cause pgo_releasepg to kill the object once all
564 1.8 mrg * the I/O's are done [pgo_releasepg will be called from the aiodone
565 1.8 mrg * routine or from the page daemon].
566 1.8 mrg */
567 1.1 mrg
568 1.8 mrg if (uobj->uo_npages) { /* I/O pending. iodone will free */
569 1.1 mrg #ifdef DIAGNOSTIC
570 1.8 mrg /*
571 1.8 mrg * XXXCDC: very unlikely to happen until we have async i/o
572 1.8 mrg * so print a little info message in case it does.
573 1.8 mrg */
574 1.8 mrg printf("uvn_detach: vn %p has pages left after flush - "
575 1.8 mrg "relkill mode\n", uobj);
576 1.1 mrg #endif
577 1.8 mrg uvn->u_flags |= UVM_VNODE_RELKILL;
578 1.8 mrg simple_unlock(&uobj->vmobjlock);
579 1.8 mrg UVMHIST_LOG(maphist,"<- done! (releasepg will kill obj)", 0, 0,
580 1.8 mrg 0, 0);
581 1.8 mrg return;
582 1.8 mrg }
583 1.8 mrg
584 1.8 mrg /*
585 1.8 mrg * kill object now. note that we can't be on the sync q because
586 1.8 mrg * all references are gone.
587 1.8 mrg */
588 1.8 mrg if (uvn->u_flags & UVM_VNODE_WRITEABLE) {
589 1.8 mrg simple_lock(&uvn_wl_lock); /* protect uvn_wlist */
590 1.8 mrg LIST_REMOVE(uvn, u_wlist);
591 1.17.2.1 chs XXXwlist
592 1.8 mrg simple_unlock(&uvn_wl_lock);
593 1.8 mrg }
594 1.1 mrg #ifdef DIAGNOSTIC
595 1.8 mrg if (uobj->memq.tqh_first != NULL)
596 1.8 mrg panic("uvn_deref: vnode VM object still has pages afer "
597 1.8 mrg "syncio/free flush");
598 1.1 mrg #endif
599 1.8 mrg oldflags = uvn->u_flags;
600 1.8 mrg uvn->u_flags = 0;
601 1.8 mrg simple_unlock(&uobj->vmobjlock);
602 1.8 mrg
603 1.8 mrg /* wake up any sleepers */
604 1.8 mrg if (oldflags & UVM_VNODE_WANTED)
605 1.8 mrg wakeup(uvn);
606 1.8 mrg
607 1.8 mrg /*
608 1.8 mrg * drop our reference to the vnode.
609 1.8 mrg */
610 1.8 mrg vrele(vp);
611 1.8 mrg UVMHIST_LOG(maphist,"<- done (vrele) final", 0,0,0,0);
612 1.1 mrg
613 1.8 mrg return;
614 1.17.2.1 chs #endif
615 1.1 mrg }
616 1.1 mrg
617 1.1 mrg /*
618 1.1 mrg * uvm_vnp_terminate: external hook to clear out a vnode's VM
619 1.1 mrg *
620 1.5 mrg * called in two cases:
621 1.5 mrg * [1] when a persisting vnode vm object (i.e. one with a zero reference
622 1.5 mrg * count) needs to be freed so that a vnode can be reused. this
623 1.5 mrg * happens under "getnewvnode" in vfs_subr.c. if the vnode from
624 1.5 mrg * the free list is still attached (i.e. not VBAD) then vgone is
625 1.5 mrg * called. as part of the vgone trace this should get called to
626 1.5 mrg * free the vm object. this is the common case.
627 1.5 mrg * [2] when a filesystem is being unmounted by force (MNT_FORCE,
628 1.5 mrg * "umount -f") the vgone() function is called on active vnodes
629 1.5 mrg * on the mounted file systems to kill their data (the vnodes become
630 1.5 mrg * "dead" ones [see src/sys/miscfs/deadfs/...]). that results in a
631 1.5 mrg * call here (even if the uvn is still in use -- i.e. has a non-zero
632 1.5 mrg * reference count). this case happens at "umount -f" and during a
633 1.5 mrg * "reboot/halt" operation.
634 1.5 mrg *
635 1.5 mrg * => the caller must XLOCK and VOP_LOCK the vnode before calling us
636 1.5 mrg * [protects us from getting a vnode that is already in the DYING
637 1.5 mrg * state...]
638 1.5 mrg * => unlike uvn_detach, this function must not return until all the
639 1.5 mrg * uvn's pages are disposed of.
640 1.5 mrg * => in case [2] the uvn is still alive after this call, but all I/O
641 1.5 mrg * ops will fail (due to the backing vnode now being "dead"). this
642 1.5 mrg * will prob. kill any process using the uvn due to pgo_get failing.
643 1.1 mrg */
644 1.1 mrg
645 1.8 mrg void
646 1.8 mrg uvm_vnp_terminate(vp)
647 1.8 mrg struct vnode *vp;
648 1.8 mrg {
649 1.8 mrg struct uvm_vnode *uvn = &vp->v_uvm;
650 1.17.2.1 chs #ifdef UBC
651 1.17.2.1 chs if (uvn->u_flags & UVM_VNODE_WRITEABLE) {
652 1.17.2.1 chs simple_lock(&uvn_wl_lock);
653 1.17.2.1 chs LIST_REMOVE(uvn, u_wlist);
654 1.17.2.1 chs uvn->u_wlist.le_next = NULL;
655 1.17.2.1 chs uvn->u_flags &= ~(UVM_VNODE_WRITEABLE);
656 1.17.2.1 chs simple_unlock(&uvn_wl_lock);
657 1.17.2.1 chs }
658 1.17.2.1 chs #else
659 1.8 mrg int oldflags;
660 1.8 mrg UVMHIST_FUNC("uvm_vnp_terminate"); UVMHIST_CALLED(maphist);
661 1.1 mrg
662 1.8 mrg /*
663 1.8 mrg * lock object and check if it is valid
664 1.8 mrg */
665 1.8 mrg simple_lock(&uvn->u_obj.vmobjlock);
666 1.8 mrg UVMHIST_LOG(maphist, " vp=0x%x, ref=%d, flag=0x%x", vp,
667 1.8 mrg uvn->u_obj.uo_refs, uvn->u_flags, 0);
668 1.8 mrg if ((uvn->u_flags & UVM_VNODE_VALID) == 0) {
669 1.8 mrg simple_unlock(&uvn->u_obj.vmobjlock);
670 1.8 mrg UVMHIST_LOG(maphist, "<- done (not active)", 0, 0, 0, 0);
671 1.8 mrg return;
672 1.8 mrg }
673 1.1 mrg
674 1.8 mrg /*
675 1.8 mrg * must be a valid uvn that is not already dying (because XLOCK
676 1.8 mrg * protects us from that). the uvn can't in the the ALOCK state
677 1.8 mrg * because it is valid, and uvn's that are in the ALOCK state haven't
678 1.8 mrg * been marked valid yet.
679 1.8 mrg */
680 1.1 mrg
681 1.5 mrg #ifdef DEBUG
682 1.8 mrg /*
683 1.8 mrg * debug check: are we yanking the vnode out from under our uvn?
684 1.8 mrg */
685 1.8 mrg if (uvn->u_obj.uo_refs) {
686 1.8 mrg printf("uvm_vnp_terminate(%p): terminating active vnode "
687 1.8 mrg "(refs=%d)\n", uvn, uvn->u_obj.uo_refs);
688 1.8 mrg }
689 1.1 mrg #endif
690 1.8 mrg
691 1.8 mrg /*
692 1.8 mrg * it is possible that the uvn was detached and is in the relkill
693 1.8 mrg * state [i.e. waiting for async i/o to finish so that releasepg can
694 1.8 mrg * kill object]. we take over the vnode now and cancel the relkill.
695 1.8 mrg * we want to know when the i/o is done so we can recycle right
696 1.8 mrg * away. note that a uvn can only be in the RELKILL state if it
697 1.8 mrg * has a zero reference count.
698 1.8 mrg */
699 1.8 mrg
700 1.8 mrg if (uvn->u_flags & UVM_VNODE_RELKILL)
701 1.8 mrg uvn->u_flags &= ~UVM_VNODE_RELKILL; /* cancel RELKILL */
702 1.8 mrg
703 1.8 mrg /*
704 1.8 mrg * block the uvn by setting the dying flag, and then flush the
705 1.8 mrg * pages. (note that flush may unlock object while doing I/O, but
706 1.8 mrg * it will re-lock it before it returns control here).
707 1.8 mrg *
708 1.8 mrg * also, note that we tell I/O that we are already VOP_LOCK'd so
709 1.8 mrg * that uvn_io doesn't attempt to VOP_LOCK again.
710 1.8 mrg *
711 1.8 mrg * XXXCDC: setting VNISLOCKED on an active uvn which is being terminated
712 1.8 mrg * due to a forceful unmount might not be a good idea. maybe we
713 1.8 mrg * need a way to pass in this info to uvn_flush through a
714 1.8 mrg * pager-defined PGO_ constant [currently there are none].
715 1.8 mrg */
716 1.8 mrg uvn->u_flags |= UVM_VNODE_DYING|UVM_VNODE_VNISLOCKED;
717 1.8 mrg
718 1.8 mrg (void) uvn_flush(&uvn->u_obj, 0, 0, PGO_CLEANIT|PGO_FREE|PGO_ALLPAGES);
719 1.8 mrg
720 1.8 mrg /*
721 1.8 mrg * as we just did a flush we expect all the pages to be gone or in
722 1.8 mrg * the process of going. sleep to wait for the rest to go [via iosync].
723 1.8 mrg */
724 1.1 mrg
725 1.8 mrg while (uvn->u_obj.uo_npages) {
726 1.1 mrg #ifdef DIAGNOSTIC
727 1.8 mrg struct vm_page *pp;
728 1.8 mrg for (pp = uvn->u_obj.memq.tqh_first ; pp != NULL ;
729 1.8 mrg pp = pp->listq.tqe_next) {
730 1.8 mrg if ((pp->flags & PG_BUSY) == 0)
731 1.8 mrg panic("uvm_vnp_terminate: detected unbusy pg");
732 1.8 mrg }
733 1.8 mrg if (uvn->u_nio == 0)
734 1.8 mrg panic("uvm_vnp_terminate: no I/O to wait for?");
735 1.8 mrg printf("uvm_vnp_terminate: waiting for I/O to fin.\n");
736 1.8 mrg /*
737 1.8 mrg * XXXCDC: this is unlikely to happen without async i/o so we
738 1.8 mrg * put a printf in just to keep an eye on it.
739 1.8 mrg */
740 1.1 mrg #endif
741 1.8 mrg uvn->u_flags |= UVM_VNODE_IOSYNC;
742 1.8 mrg UVM_UNLOCK_AND_WAIT(&uvn->u_nio, &uvn->u_obj.vmobjlock, FALSE,
743 1.8 mrg "uvn_term",0);
744 1.8 mrg simple_lock(&uvn->u_obj.vmobjlock);
745 1.8 mrg }
746 1.8 mrg
747 1.8 mrg /*
748 1.8 mrg * done. now we free the uvn if its reference count is zero
749 1.8 mrg * (true if we are zapping a persisting uvn). however, if we are
750 1.8 mrg * terminating a uvn with active mappings we let it live ... future
751 1.8 mrg * calls down to the vnode layer will fail.
752 1.8 mrg */
753 1.5 mrg
754 1.8 mrg oldflags = uvn->u_flags;
755 1.8 mrg if (uvn->u_obj.uo_refs) {
756 1.8 mrg
757 1.8 mrg /*
758 1.8 mrg * uvn must live on it is dead-vnode state until all references
759 1.8 mrg * are gone. restore flags. clear CANPERSIST state.
760 1.8 mrg */
761 1.8 mrg
762 1.8 mrg uvn->u_flags &= ~(UVM_VNODE_DYING|UVM_VNODE_VNISLOCKED|
763 1.5 mrg UVM_VNODE_WANTED|UVM_VNODE_CANPERSIST);
764 1.8 mrg
765 1.8 mrg } else {
766 1.5 mrg
767 1.8 mrg /*
768 1.8 mrg * free the uvn now. note that the VREF reference is already
769 1.8 mrg * gone [it is dropped when we enter the persist state].
770 1.8 mrg */
771 1.8 mrg if (uvn->u_flags & UVM_VNODE_IOSYNCWANTED)
772 1.8 mrg panic("uvm_vnp_terminate: io sync wanted bit set");
773 1.8 mrg
774 1.8 mrg if (uvn->u_flags & UVM_VNODE_WRITEABLE) {
775 1.8 mrg simple_lock(&uvn_wl_lock);
776 1.8 mrg LIST_REMOVE(uvn, u_wlist);
777 1.8 mrg simple_unlock(&uvn_wl_lock);
778 1.8 mrg }
779 1.8 mrg uvn->u_flags = 0; /* uvn is history, clear all bits */
780 1.8 mrg }
781 1.5 mrg
782 1.8 mrg if (oldflags & UVM_VNODE_WANTED)
783 1.8 mrg wakeup(uvn); /* object lock still held */
784 1.5 mrg
785 1.8 mrg simple_unlock(&uvn->u_obj.vmobjlock);
786 1.8 mrg UVMHIST_LOG(maphist, "<- done", 0, 0, 0, 0);
787 1.17.2.1 chs #endif
788 1.1 mrg }
789 1.1 mrg
790 1.1 mrg /*
791 1.1 mrg * uvn_releasepg: handled a released page in a uvn
792 1.1 mrg *
793 1.1 mrg * => "pg" is a PG_BUSY [caller owns it], PG_RELEASED page that we need
794 1.1 mrg * to dispose of.
795 1.1 mrg * => caller must handled PG_WANTED case
796 1.1 mrg * => called with page's object locked, pageq's unlocked
797 1.1 mrg * => returns TRUE if page's object is still alive, FALSE if we
798 1.1 mrg * killed the page's object. if we return TRUE, then we
799 1.1 mrg * return with the object locked.
800 1.1 mrg * => if (nextpgp != NULL) => we return pageq.tqe_next here, and return
801 1.1 mrg * with the page queues locked [for pagedaemon]
802 1.1 mrg * => if (nextpgp == NULL) => we return with page queues unlocked [normal case]
803 1.1 mrg * => we kill the uvn if it is not referenced and we are suppose to
804 1.1 mrg * kill it ("relkill").
805 1.1 mrg */
806 1.1 mrg
807 1.8 mrg boolean_t
808 1.8 mrg uvn_releasepg(pg, nextpgp)
809 1.8 mrg struct vm_page *pg;
810 1.8 mrg struct vm_page **nextpgp; /* OUT */
811 1.1 mrg {
812 1.8 mrg struct uvm_vnode *uvn = (struct uvm_vnode *) pg->uobject;
813 1.1 mrg #ifdef DIAGNOSTIC
814 1.8 mrg if ((pg->flags & PG_RELEASED) == 0)
815 1.8 mrg panic("uvn_releasepg: page not released!");
816 1.1 mrg #endif
817 1.8 mrg
818 1.8 mrg /*
819 1.8 mrg * dispose of the page [caller handles PG_WANTED]
820 1.8 mrg */
821 1.8 mrg pmap_page_protect(PMAP_PGARG(pg), VM_PROT_NONE);
822 1.8 mrg uvm_lock_pageq();
823 1.8 mrg if (nextpgp)
824 1.8 mrg *nextpgp = pg->pageq.tqe_next; /* next page for daemon */
825 1.8 mrg uvm_pagefree(pg);
826 1.8 mrg if (!nextpgp)
827 1.8 mrg uvm_unlock_pageq();
828 1.8 mrg
829 1.17.2.1 chs #ifdef UBC
830 1.17.2.1 chs /* XXX I'm sure we need to do something here. */
831 1.17.2.1 chs uvn = uvn;
832 1.17.2.1 chs #else
833 1.8 mrg /*
834 1.8 mrg * now see if we need to kill the object
835 1.8 mrg */
836 1.8 mrg if (uvn->u_flags & UVM_VNODE_RELKILL) {
837 1.8 mrg if (uvn->u_obj.uo_refs)
838 1.8 mrg panic("uvn_releasepg: kill flag set on referenced "
839 1.8 mrg "object!");
840 1.8 mrg if (uvn->u_obj.uo_npages == 0) {
841 1.8 mrg if (uvn->u_flags & UVM_VNODE_WRITEABLE) {
842 1.8 mrg simple_lock(&uvn_wl_lock);
843 1.8 mrg LIST_REMOVE(uvn, u_wlist);
844 1.8 mrg simple_unlock(&uvn_wl_lock);
845 1.8 mrg }
846 1.1 mrg #ifdef DIAGNOSTIC
847 1.8 mrg if (uvn->u_obj.memq.tqh_first)
848 1.1 mrg panic("uvn_releasepg: pages in object with npages == 0");
849 1.1 mrg #endif
850 1.8 mrg if (uvn->u_flags & UVM_VNODE_WANTED)
851 1.8 mrg /* still holding object lock */
852 1.8 mrg wakeup(uvn);
853 1.8 mrg
854 1.8 mrg uvn->u_flags = 0; /* DEAD! */
855 1.8 mrg simple_unlock(&uvn->u_obj.vmobjlock);
856 1.8 mrg return (FALSE);
857 1.8 mrg }
858 1.8 mrg }
859 1.17.2.1 chs #endif
860 1.8 mrg return (TRUE);
861 1.1 mrg }
862 1.1 mrg
863 1.1 mrg /*
864 1.1 mrg * NOTE: currently we have to use VOP_READ/VOP_WRITE because they go
865 1.1 mrg * through the buffer cache and allow I/O in any size. These VOPs use
866 1.1 mrg * synchronous i/o. [vs. VOP_STRATEGY which can be async, but doesn't
867 1.1 mrg * go through the buffer cache or allow I/O sizes larger than a
868 1.1 mrg * block]. we will eventually want to change this.
869 1.1 mrg *
870 1.1 mrg * issues to consider:
871 1.1 mrg * uvm provides the uvm_aiodesc structure for async i/o management.
872 1.1 mrg * there are two tailq's in the uvm. structure... one for pending async
873 1.1 mrg * i/o and one for "done" async i/o. to do an async i/o one puts
874 1.1 mrg * an aiodesc on the "pending" list (protected by splbio()), starts the
875 1.1 mrg * i/o and returns VM_PAGER_PEND. when the i/o is done, we expect
876 1.1 mrg * some sort of "i/o done" function to be called (at splbio(), interrupt
877 1.1 mrg * time). this function should remove the aiodesc from the pending list
878 1.1 mrg * and place it on the "done" list and wakeup the daemon. the daemon
879 1.1 mrg * will run at normal spl() and will remove all items from the "done"
880 1.1 mrg * list and call the "aiodone" hook for each done request (see uvm_pager.c).
881 1.1 mrg * [in the old vm code, this was done by calling the "put" routine with
882 1.1 mrg * null arguments which made the code harder to read and understand because
883 1.1 mrg * you had one function ("put") doing two things.]
884 1.1 mrg *
885 1.1 mrg * so the current pager needs:
886 1.1 mrg * int uvn_aiodone(struct uvm_aiodesc *)
887 1.1 mrg *
888 1.1 mrg * => return KERN_SUCCESS (aio finished, free it). otherwise requeue for
889 1.1 mrg * later collection.
890 1.1 mrg * => called with pageq's locked by the daemon.
891 1.1 mrg *
892 1.1 mrg * general outline:
893 1.1 mrg * - "try" to lock object. if fail, just return (will try again later)
894 1.1 mrg * - drop "u_nio" (this req is done!)
895 1.1 mrg * - if (object->iosync && u_naio == 0) { wakeup &uvn->u_naio }
896 1.1 mrg * - get "page" structures (atop?).
897 1.1 mrg * - handle "wanted" pages
898 1.1 mrg * - handle "released" pages [using pgo_releasepg]
899 1.1 mrg * >>> pgo_releasepg may kill the object
900 1.1 mrg * dont forget to look at "object" wanted flag in all cases.
901 1.1 mrg */
902 1.1 mrg
903 1.1 mrg
904 1.1 mrg /*
905 1.1 mrg * uvn_flush: flush pages out of a uvm object.
906 1.1 mrg *
907 1.1 mrg * => object should be locked by caller. we may _unlock_ the object
908 1.1 mrg * if (and only if) we need to clean a page (PGO_CLEANIT).
909 1.1 mrg * we return with the object locked.
910 1.1 mrg * => if PGO_CLEANIT is set, we may block (due to I/O). thus, a caller
911 1.1 mrg * might want to unlock higher level resources (e.g. vm_map)
912 1.1 mrg * before calling flush.
913 1.1 mrg * => if PGO_CLEANIT is not set, then we will neither unlock the object
914 1.1 mrg * or block.
915 1.1 mrg * => if PGO_ALLPAGE is set, then all pages in the object are valid targets
916 1.1 mrg * for flushing.
917 1.1 mrg * => NOTE: we rely on the fact that the object's memq is a TAILQ and
918 1.1 mrg * that new pages are inserted on the tail end of the list. thus,
919 1.1 mrg * we can make a complete pass through the object in one go by starting
920 1.1 mrg * at the head and working towards the tail (new pages are put in
921 1.1 mrg * front of us).
922 1.1 mrg * => NOTE: we are allowed to lock the page queues, so the caller
923 1.1 mrg * must not be holding the lock on them [e.g. pagedaemon had
924 1.1 mrg * better not call us with the queues locked]
925 1.1 mrg * => we return TRUE unless we encountered some sort of I/O error
926 1.1 mrg *
927 1.1 mrg * comment on "cleaning" object and PG_BUSY pages:
928 1.1 mrg * this routine is holding the lock on the object. the only time
929 1.1 mrg * that it can run into a PG_BUSY page that it does not own is if
930 1.1 mrg * some other process has started I/O on the page (e.g. either
931 1.1 mrg * a pagein, or a pageout). if the PG_BUSY page is being paged
932 1.1 mrg * in, then it can not be dirty (!PG_CLEAN) because no one has
933 1.1 mrg * had a chance to modify it yet. if the PG_BUSY page is being
934 1.1 mrg * paged out then it means that someone else has already started
935 1.1 mrg * cleaning the page for us (how nice!). in this case, if we
936 1.1 mrg * have syncio specified, then after we make our pass through the
937 1.1 mrg * object we need to wait for the other PG_BUSY pages to clear
938 1.1 mrg * off (i.e. we need to do an iosync). also note that once a
939 1.1 mrg * page is PG_BUSY it must stay in its object until it is un-busyed.
940 1.1 mrg *
941 1.1 mrg * note on page traversal:
942 1.1 mrg * we can traverse the pages in an object either by going down the
943 1.1 mrg * linked list in "uobj->memq", or we can go over the address range
944 1.1 mrg * by page doing hash table lookups for each address. depending
945 1.1 mrg * on how many pages are in the object it may be cheaper to do one
946 1.1 mrg * or the other. we set "by_list" to true if we are using memq.
947 1.1 mrg * if the cost of a hash lookup was equal to the cost of the list
948 1.1 mrg * traversal we could compare the number of pages in the start->stop
949 1.1 mrg * range to the total number of pages in the object. however, it
950 1.1 mrg * seems that a hash table lookup is more expensive than the linked
951 1.1 mrg * list traversal, so we multiply the number of pages in the
952 1.1 mrg * start->stop range by a penalty which we define below.
953 1.1 mrg */
954 1.1 mrg
955 1.8 mrg #define UVN_HASH_PENALTY 4 /* XXX: a guess */
956 1.1 mrg
957 1.8 mrg static boolean_t
958 1.8 mrg uvn_flush(uobj, start, stop, flags)
959 1.8 mrg struct uvm_object *uobj;
960 1.15 eeh vaddr_t start, stop;
961 1.8 mrg int flags;
962 1.8 mrg {
963 1.8 mrg struct uvm_vnode *uvn = (struct uvm_vnode *) uobj;
964 1.8 mrg struct vm_page *pp, *ppnext, *ptmp;
965 1.16 chs struct vm_page *pps[MAXBSIZE >> PAGE_SHIFT], **ppsp;
966 1.8 mrg int npages, result, lcv;
967 1.8 mrg boolean_t retval, need_iosync, by_list, needs_clean;
968 1.15 eeh vaddr_t curoff;
969 1.8 mrg u_short pp_version;
970 1.8 mrg UVMHIST_FUNC("uvn_flush"); UVMHIST_CALLED(maphist);
971 1.8 mrg
972 1.17.2.1 chs #ifdef UBC
973 1.17.2.1 chs if (uvn->u_size == VSIZENOTSET) {
974 1.17.2.1 chs void vp_name(void *);
975 1.17.2.1 chs
976 1.17.2.1 chs printf("uvn_flush: size not set vp %p\n", uvn);
977 1.17.2.1 chs if ((flags & PGO_ALLPAGES) == 0)
978 1.17.2.1 chs printf("... and PGO_ALLPAGES not set: "
979 1.17.2.1 chs "start 0x%lx end 0x%lx flags 0x%x\n",
980 1.17.2.1 chs start, stop, flags);
981 1.17.2.1 chs vp_name(uvn);
982 1.17.2.1 chs flags |= PGO_ALLPAGES;
983 1.17.2.1 chs }
984 1.17.2.1 chs #if 0
985 1.17.2.1 chs /* XXX unfortunately this is legitimate */
986 1.17.2.1 chs if (flags & PGO_FREE && uobj->uo_refs) {
987 1.17.2.1 chs printf("uvn_flush: PGO_FREE on ref'd vp %p\n", uobj);
988 1.17.2.1 chs Debugger();
989 1.17.2.1 chs }
990 1.17.2.1 chs #endif
991 1.17.2.1 chs #endif
992 1.17.2.1 chs
993 1.8 mrg curoff = 0; /* XXX: shut up gcc */
994 1.8 mrg /*
995 1.8 mrg * get init vals and determine how we are going to traverse object
996 1.8 mrg */
997 1.1 mrg
998 1.8 mrg need_iosync = FALSE;
999 1.8 mrg retval = TRUE; /* return value */
1000 1.8 mrg if (flags & PGO_ALLPAGES) {
1001 1.8 mrg start = 0;
1002 1.17.2.1 chs #ifdef UBC
1003 1.17.2.1 chs stop = -1;
1004 1.17.2.1 chs #else
1005 1.8 mrg stop = round_page(uvn->u_size);
1006 1.17.2.1 chs #endif
1007 1.8 mrg by_list = TRUE; /* always go by the list */
1008 1.8 mrg } else {
1009 1.8 mrg start = trunc_page(start);
1010 1.8 mrg stop = round_page(stop);
1011 1.17.2.1 chs if (stop > round_page(uvn->u_size)) {
1012 1.17.2.1 chs printf("uvn_flush: out of range flush (fixed)\n");
1013 1.17.2.1 chs printf(" vp %p stop 0x%x\n", uvn, (int)stop);
1014 1.17.2.1 chs }
1015 1.1 mrg
1016 1.8 mrg by_list = (uobj->uo_npages <=
1017 1.16 chs ((stop - start) >> PAGE_SHIFT) * UVN_HASH_PENALTY);
1018 1.8 mrg }
1019 1.8 mrg
1020 1.8 mrg UVMHIST_LOG(maphist,
1021 1.8 mrg " flush start=0x%x, stop=0x%x, by_list=%d, flags=0x%x",
1022 1.8 mrg start, stop, by_list, flags);
1023 1.8 mrg
1024 1.8 mrg /*
1025 1.8 mrg * PG_CLEANCHK: this bit is used by the pgo_mk_pcluster function as
1026 1.8 mrg * a _hint_ as to how up to date the PG_CLEAN bit is. if the hint
1027 1.8 mrg * is wrong it will only prevent us from clustering... it won't break
1028 1.8 mrg * anything. we clear all PG_CLEANCHK bits here, and pgo_mk_pcluster
1029 1.8 mrg * will set them as it syncs PG_CLEAN. This is only an issue if we
1030 1.8 mrg * are looking at non-inactive pages (because inactive page's PG_CLEAN
1031 1.8 mrg * bit is always up to date since there are no mappings).
1032 1.8 mrg * [borrowed PG_CLEANCHK idea from FreeBSD VM]
1033 1.8 mrg */
1034 1.1 mrg
1035 1.8 mrg if ((flags & PGO_CLEANIT) != 0 &&
1036 1.8 mrg uobj->pgops->pgo_mk_pcluster != NULL) {
1037 1.8 mrg if (by_list) {
1038 1.17.2.1 chs for (pp = TAILQ_FIRST(&uobj->memq);
1039 1.17.2.1 chs pp != NULL ;
1040 1.17.2.1 chs pp = TAILQ_NEXT(pp, listq)) {
1041 1.17.2.1 chs if (pp->offset < start ||
1042 1.17.2.1 chs (pp->offset >= stop && stop != -1))
1043 1.8 mrg continue;
1044 1.8 mrg pp->flags &= ~PG_CLEANCHK;
1045 1.8 mrg }
1046 1.8 mrg
1047 1.8 mrg } else { /* by hash */
1048 1.8 mrg for (curoff = start ; curoff < stop;
1049 1.8 mrg curoff += PAGE_SIZE) {
1050 1.8 mrg pp = uvm_pagelookup(uobj, curoff);
1051 1.8 mrg if (pp)
1052 1.8 mrg pp->flags &= ~PG_CLEANCHK;
1053 1.8 mrg }
1054 1.8 mrg }
1055 1.8 mrg }
1056 1.1 mrg
1057 1.8 mrg /*
1058 1.8 mrg * now do it. note: we must update ppnext in body of loop or we
1059 1.8 mrg * will get stuck. we need to use ppnext because we may free "pp"
1060 1.8 mrg * before doing the next loop.
1061 1.8 mrg */
1062 1.1 mrg
1063 1.8 mrg if (by_list) {
1064 1.17.2.1 chs pp = TAILQ_FIRST(&uobj->memq);
1065 1.1 mrg } else {
1066 1.8 mrg curoff = start;
1067 1.8 mrg pp = uvm_pagelookup(uobj, curoff);
1068 1.1 mrg }
1069 1.8 mrg
1070 1.8 mrg ppnext = NULL; /* XXX: shut up gcc */
1071 1.8 mrg ppsp = NULL; /* XXX: shut up gcc */
1072 1.8 mrg uvm_lock_pageq(); /* page queues locked */
1073 1.8 mrg
1074 1.8 mrg /* locked: both page queues and uobj */
1075 1.8 mrg for ( ; (by_list && pp != NULL) ||
1076 1.8 mrg (!by_list && curoff < stop) ; pp = ppnext) {
1077 1.8 mrg
1078 1.8 mrg if (by_list) {
1079 1.8 mrg
1080 1.8 mrg /*
1081 1.8 mrg * range check
1082 1.8 mrg */
1083 1.8 mrg
1084 1.8 mrg if (pp->offset < start || pp->offset >= stop) {
1085 1.17.2.1 chs ppnext = TAILQ_NEXT(pp, listq);
1086 1.8 mrg continue;
1087 1.8 mrg }
1088 1.8 mrg
1089 1.8 mrg } else {
1090 1.8 mrg
1091 1.8 mrg /*
1092 1.8 mrg * null check
1093 1.8 mrg */
1094 1.8 mrg
1095 1.8 mrg curoff += PAGE_SIZE;
1096 1.8 mrg if (pp == NULL) {
1097 1.8 mrg if (curoff < stop)
1098 1.8 mrg ppnext = uvm_pagelookup(uobj, curoff);
1099 1.8 mrg continue;
1100 1.8 mrg }
1101 1.8 mrg
1102 1.8 mrg }
1103 1.8 mrg
1104 1.8 mrg /*
1105 1.8 mrg * handle case where we do not need to clean page (either
1106 1.8 mrg * because we are not clean or because page is not dirty or
1107 1.8 mrg * is busy):
1108 1.8 mrg *
1109 1.8 mrg * NOTE: we are allowed to deactivate a non-wired active
1110 1.8 mrg * PG_BUSY page, but once a PG_BUSY page is on the inactive
1111 1.8 mrg * queue it must stay put until it is !PG_BUSY (so as not to
1112 1.8 mrg * confuse pagedaemon).
1113 1.8 mrg */
1114 1.8 mrg
1115 1.8 mrg if ((flags & PGO_CLEANIT) == 0 || (pp->flags & PG_BUSY) != 0) {
1116 1.8 mrg needs_clean = FALSE;
1117 1.8 mrg if ((pp->flags & PG_BUSY) != 0 &&
1118 1.8 mrg (flags & (PGO_CLEANIT|PGO_SYNCIO)) ==
1119 1.8 mrg (PGO_CLEANIT|PGO_SYNCIO))
1120 1.8 mrg need_iosync = TRUE;
1121 1.8 mrg } else {
1122 1.8 mrg /*
1123 1.8 mrg * freeing: nuke all mappings so we can sync
1124 1.8 mrg * PG_CLEAN bit with no race
1125 1.8 mrg */
1126 1.8 mrg if ((pp->flags & PG_CLEAN) != 0 &&
1127 1.8 mrg (flags & PGO_FREE) != 0 &&
1128 1.8 mrg (pp->pqflags & PQ_ACTIVE) != 0)
1129 1.8 mrg pmap_page_protect(PMAP_PGARG(pp), VM_PROT_NONE);
1130 1.8 mrg if ((pp->flags & PG_CLEAN) != 0 &&
1131 1.8 mrg pmap_is_modified(PMAP_PGARG(pp)))
1132 1.8 mrg pp->flags &= ~(PG_CLEAN);
1133 1.8 mrg pp->flags |= PG_CLEANCHK; /* update "hint" */
1134 1.8 mrg
1135 1.8 mrg needs_clean = ((pp->flags & PG_CLEAN) == 0);
1136 1.8 mrg }
1137 1.8 mrg
1138 1.8 mrg /*
1139 1.8 mrg * if we don't need a clean... load ppnext and dispose of pp
1140 1.8 mrg */
1141 1.8 mrg if (!needs_clean) {
1142 1.8 mrg /* load ppnext */
1143 1.8 mrg if (by_list)
1144 1.8 mrg ppnext = pp->listq.tqe_next;
1145 1.8 mrg else {
1146 1.8 mrg if (curoff < stop)
1147 1.8 mrg ppnext = uvm_pagelookup(uobj, curoff);
1148 1.8 mrg }
1149 1.8 mrg
1150 1.8 mrg /* now dispose of pp */
1151 1.8 mrg if (flags & PGO_DEACTIVATE) {
1152 1.8 mrg if ((pp->pqflags & PQ_INACTIVE) == 0 &&
1153 1.8 mrg pp->wire_count == 0) {
1154 1.8 mrg pmap_page_protect(PMAP_PGARG(pp),
1155 1.8 mrg VM_PROT_NONE);
1156 1.8 mrg uvm_pagedeactivate(pp);
1157 1.8 mrg }
1158 1.8 mrg
1159 1.8 mrg } else if (flags & PGO_FREE) {
1160 1.8 mrg if (pp->flags & PG_BUSY) {
1161 1.8 mrg /* release busy pages */
1162 1.8 mrg pp->flags |= PG_RELEASED;
1163 1.8 mrg } else {
1164 1.8 mrg pmap_page_protect(PMAP_PGARG(pp),
1165 1.8 mrg VM_PROT_NONE);
1166 1.8 mrg /* removed page from object */
1167 1.8 mrg uvm_pagefree(pp);
1168 1.8 mrg }
1169 1.8 mrg }
1170 1.8 mrg /* ppnext is valid so we can continue... */
1171 1.8 mrg continue;
1172 1.8 mrg }
1173 1.8 mrg
1174 1.8 mrg /*
1175 1.8 mrg * pp points to a page in the locked object that we are
1176 1.8 mrg * working on. if it is !PG_CLEAN,!PG_BUSY and we asked
1177 1.8 mrg * for cleaning (PGO_CLEANIT). we clean it now.
1178 1.8 mrg *
1179 1.8 mrg * let uvm_pager_put attempted a clustered page out.
1180 1.8 mrg * note: locked: uobj and page queues.
1181 1.8 mrg */
1182 1.8 mrg
1183 1.8 mrg pp->flags |= PG_BUSY; /* we 'own' page now */
1184 1.8 mrg UVM_PAGE_OWN(pp, "uvn_flush");
1185 1.8 mrg pmap_page_protect(PMAP_PGARG(pp), VM_PROT_READ);
1186 1.8 mrg pp_version = pp->version;
1187 1.1 mrg ReTry:
1188 1.8 mrg ppsp = pps;
1189 1.8 mrg npages = sizeof(pps) / sizeof(struct vm_page *);
1190 1.1 mrg
1191 1.8 mrg /* locked: page queues, uobj */
1192 1.8 mrg result = uvm_pager_put(uobj, pp, &ppsp, &npages,
1193 1.1 mrg flags | PGO_DOACTCLUST, start, stop);
1194 1.8 mrg /* unlocked: page queues, uobj */
1195 1.1 mrg
1196 1.8 mrg /*
1197 1.8 mrg * at this point nothing is locked. if we did an async I/O
1198 1.8 mrg * it is remotely possible for the async i/o to complete and
1199 1.8 mrg * the page "pp" be freed or what not before we get a chance
1200 1.8 mrg * to relock the object. in order to detect this, we have
1201 1.8 mrg * saved the version number of the page in "pp_version".
1202 1.8 mrg */
1203 1.8 mrg
1204 1.8 mrg /* relock! */
1205 1.8 mrg simple_lock(&uobj->vmobjlock);
1206 1.8 mrg uvm_lock_pageq();
1207 1.8 mrg
1208 1.8 mrg /*
1209 1.8 mrg * VM_PAGER_AGAIN: given the structure of this pager, this
1210 1.8 mrg * can only happen when we are doing async I/O and can't
1211 1.8 mrg * map the pages into kernel memory (pager_map) due to lack
1212 1.8 mrg * of vm space. if this happens we drop back to sync I/O.
1213 1.8 mrg */
1214 1.8 mrg
1215 1.8 mrg if (result == VM_PAGER_AGAIN) {
1216 1.8 mrg /*
1217 1.8 mrg * it is unlikely, but page could have been released
1218 1.8 mrg * while we had the object lock dropped. we ignore
1219 1.8 mrg * this now and retry the I/O. we will detect and
1220 1.8 mrg * handle the released page after the syncio I/O
1221 1.8 mrg * completes.
1222 1.8 mrg */
1223 1.1 mrg #ifdef DIAGNOSTIC
1224 1.8 mrg if (flags & PGO_SYNCIO)
1225 1.1 mrg panic("uvn_flush: PGO_SYNCIO return 'try again' error (impossible)");
1226 1.1 mrg #endif
1227 1.8 mrg flags |= PGO_SYNCIO;
1228 1.8 mrg goto ReTry;
1229 1.8 mrg }
1230 1.8 mrg
1231 1.8 mrg /*
1232 1.8 mrg * the cleaning operation is now done. finish up. note that
1233 1.8 mrg * on error (!OK, !PEND) uvm_pager_put drops the cluster for us.
1234 1.8 mrg * if success (OK, PEND) then uvm_pager_put returns the cluster
1235 1.8 mrg * to us in ppsp/npages.
1236 1.8 mrg */
1237 1.8 mrg
1238 1.8 mrg /*
1239 1.8 mrg * for pending async i/o if we are not deactivating/freeing
1240 1.8 mrg * we can move on to the next page.
1241 1.8 mrg */
1242 1.8 mrg
1243 1.8 mrg if (result == VM_PAGER_PEND) {
1244 1.8 mrg
1245 1.8 mrg if ((flags & (PGO_DEACTIVATE|PGO_FREE)) == 0) {
1246 1.8 mrg /*
1247 1.8 mrg * no per-page ops: refresh ppnext and continue
1248 1.8 mrg */
1249 1.8 mrg if (by_list) {
1250 1.8 mrg if (pp->version == pp_version)
1251 1.8 mrg ppnext = pp->listq.tqe_next;
1252 1.8 mrg else
1253 1.8 mrg /* reset */
1254 1.8 mrg ppnext = uobj->memq.tqh_first;
1255 1.8 mrg } else {
1256 1.8 mrg if (curoff < stop)
1257 1.8 mrg ppnext = uvm_pagelookup(uobj,
1258 1.8 mrg curoff);
1259 1.8 mrg }
1260 1.8 mrg continue;
1261 1.8 mrg }
1262 1.8 mrg
1263 1.8 mrg /* need to do anything here? */
1264 1.8 mrg }
1265 1.8 mrg
1266 1.8 mrg /*
1267 1.8 mrg * need to look at each page of the I/O operation. we defer
1268 1.8 mrg * processing "pp" until the last trip through this "for" loop
1269 1.8 mrg * so that we can load "ppnext" for the main loop after we
1270 1.8 mrg * play with the cluster pages [thus the "npages + 1" in the
1271 1.8 mrg * loop below].
1272 1.8 mrg */
1273 1.8 mrg
1274 1.8 mrg for (lcv = 0 ; lcv < npages + 1 ; lcv++) {
1275 1.8 mrg
1276 1.8 mrg /*
1277 1.8 mrg * handle ppnext for outside loop, and saving pp
1278 1.8 mrg * until the end.
1279 1.8 mrg */
1280 1.8 mrg if (lcv < npages) {
1281 1.8 mrg if (ppsp[lcv] == pp)
1282 1.8 mrg continue; /* skip pp until the end */
1283 1.8 mrg ptmp = ppsp[lcv];
1284 1.8 mrg } else {
1285 1.8 mrg ptmp = pp;
1286 1.8 mrg
1287 1.8 mrg /* set up next page for outer loop */
1288 1.8 mrg if (by_list) {
1289 1.8 mrg if (pp->version == pp_version)
1290 1.8 mrg ppnext = pp->listq.tqe_next;
1291 1.8 mrg else
1292 1.8 mrg /* reset */
1293 1.8 mrg ppnext = uobj->memq.tqh_first;
1294 1.8 mrg } else {
1295 1.8 mrg if (curoff < stop)
1296 1.8 mrg ppnext = uvm_pagelookup(uobj, curoff);
1297 1.8 mrg }
1298 1.8 mrg }
1299 1.8 mrg
1300 1.8 mrg /*
1301 1.8 mrg * verify the page didn't get moved while obj was
1302 1.8 mrg * unlocked
1303 1.8 mrg */
1304 1.8 mrg if (result == VM_PAGER_PEND && ptmp->uobject != uobj)
1305 1.8 mrg continue;
1306 1.8 mrg
1307 1.8 mrg /*
1308 1.8 mrg * unbusy the page if I/O is done. note that for
1309 1.8 mrg * pending I/O it is possible that the I/O op
1310 1.8 mrg * finished before we relocked the object (in
1311 1.8 mrg * which case the page is no longer busy).
1312 1.8 mrg */
1313 1.8 mrg
1314 1.8 mrg if (result != VM_PAGER_PEND) {
1315 1.8 mrg if (ptmp->flags & PG_WANTED)
1316 1.8 mrg /* still holding object lock */
1317 1.8 mrg thread_wakeup(ptmp);
1318 1.8 mrg
1319 1.8 mrg ptmp->flags &= ~(PG_WANTED|PG_BUSY);
1320 1.8 mrg UVM_PAGE_OWN(ptmp, NULL);
1321 1.8 mrg if (ptmp->flags & PG_RELEASED) {
1322 1.8 mrg
1323 1.8 mrg /* pgo_releasepg wants this */
1324 1.8 mrg uvm_unlock_pageq();
1325 1.8 mrg if (!uvn_releasepg(ptmp, NULL))
1326 1.8 mrg return (TRUE);
1327 1.8 mrg
1328 1.8 mrg uvm_lock_pageq(); /* relock */
1329 1.8 mrg continue; /* next page */
1330 1.8 mrg
1331 1.8 mrg } else {
1332 1.8 mrg ptmp->flags |= (PG_CLEAN|PG_CLEANCHK);
1333 1.8 mrg if ((flags & PGO_FREE) == 0)
1334 1.8 mrg pmap_clear_modify(
1335 1.8 mrg PMAP_PGARG(ptmp));
1336 1.8 mrg }
1337 1.8 mrg }
1338 1.8 mrg
1339 1.8 mrg /*
1340 1.8 mrg * dispose of page
1341 1.8 mrg */
1342 1.8 mrg
1343 1.8 mrg if (flags & PGO_DEACTIVATE) {
1344 1.8 mrg if ((pp->pqflags & PQ_INACTIVE) == 0 &&
1345 1.8 mrg pp->wire_count == 0) {
1346 1.8 mrg pmap_page_protect(PMAP_PGARG(ptmp),
1347 1.8 mrg VM_PROT_NONE);
1348 1.8 mrg uvm_pagedeactivate(ptmp);
1349 1.8 mrg }
1350 1.8 mrg
1351 1.8 mrg } else if (flags & PGO_FREE) {
1352 1.8 mrg if (result == VM_PAGER_PEND) {
1353 1.8 mrg if ((ptmp->flags & PG_BUSY) != 0)
1354 1.8 mrg /* signal for i/o done */
1355 1.8 mrg ptmp->flags |= PG_RELEASED;
1356 1.8 mrg } else {
1357 1.8 mrg if (result != VM_PAGER_OK) {
1358 1.8 mrg printf("uvn_flush: obj=%p, "
1359 1.17.2.1 chs "offset=0x%lx. error %d\n",
1360 1.17.2.1 chs pp->uobject, pp->offset,
1361 1.17.2.1 chs result);
1362 1.8 mrg printf("uvn_flush: WARNING: "
1363 1.8 mrg "changes to page may be "
1364 1.8 mrg "lost!\n");
1365 1.8 mrg retval = FALSE;
1366 1.8 mrg }
1367 1.8 mrg pmap_page_protect(PMAP_PGARG(ptmp),
1368 1.8 mrg VM_PROT_NONE);
1369 1.8 mrg uvm_pagefree(ptmp);
1370 1.8 mrg }
1371 1.8 mrg }
1372 1.1 mrg
1373 1.8 mrg } /* end of "lcv" for loop */
1374 1.1 mrg
1375 1.8 mrg } /* end of "pp" for loop */
1376 1.1 mrg
1377 1.8 mrg /*
1378 1.8 mrg * done with pagequeues: unlock
1379 1.8 mrg */
1380 1.8 mrg uvm_unlock_pageq();
1381 1.1 mrg
1382 1.8 mrg /*
1383 1.8 mrg * now wait for all I/O if required.
1384 1.8 mrg */
1385 1.17.2.1 chs #ifdef UBC
1386 1.17.2.1 chs /*
1387 1.17.2.1 chs * XXX currently not needed since all i/o is sync.
1388 1.17.2.1 chs * merge this with VBWAIT.
1389 1.17.2.1 chs */
1390 1.17.2.1 chs #else
1391 1.8 mrg if (need_iosync) {
1392 1.1 mrg
1393 1.8 mrg UVMHIST_LOG(maphist," <<DOING IOSYNC>>",0,0,0,0);
1394 1.8 mrg while (uvn->u_nio != 0) {
1395 1.8 mrg uvn->u_flags |= UVM_VNODE_IOSYNC;
1396 1.8 mrg UVM_UNLOCK_AND_WAIT(&uvn->u_nio, &uvn->u_obj.vmobjlock,
1397 1.8 mrg FALSE, "uvn_flush",0);
1398 1.8 mrg simple_lock(&uvn->u_obj.vmobjlock);
1399 1.8 mrg }
1400 1.8 mrg if (uvn->u_flags & UVM_VNODE_IOSYNCWANTED)
1401 1.8 mrg wakeup(&uvn->u_flags);
1402 1.8 mrg uvn->u_flags &= ~(UVM_VNODE_IOSYNC|UVM_VNODE_IOSYNCWANTED);
1403 1.1 mrg }
1404 1.17.2.1 chs #endif
1405 1.1 mrg
1406 1.8 mrg /* return, with object locked! */
1407 1.8 mrg UVMHIST_LOG(maphist,"<- done (retval=0x%x)",retval,0,0,0);
1408 1.8 mrg return(retval);
1409 1.1 mrg }
1410 1.1 mrg
1411 1.1 mrg /*
1412 1.1 mrg * uvn_cluster
1413 1.1 mrg *
1414 1.1 mrg * we are about to do I/O in an object at offset. this function is called
1415 1.1 mrg * to establish a range of offsets around "offset" in which we can cluster
1416 1.1 mrg * I/O.
1417 1.1 mrg *
1418 1.1 mrg * - currently doesn't matter if obj locked or not.
1419 1.1 mrg */
1420 1.1 mrg
1421 1.8 mrg static void
1422 1.8 mrg uvn_cluster(uobj, offset, loffset, hoffset)
1423 1.8 mrg struct uvm_object *uobj;
1424 1.15 eeh vaddr_t offset;
1425 1.15 eeh vaddr_t *loffset, *hoffset; /* OUT */
1426 1.1 mrg {
1427 1.8 mrg struct uvm_vnode *uvn = (struct uvm_vnode *) uobj;
1428 1.8 mrg *loffset = offset;
1429 1.1 mrg
1430 1.8 mrg if (*loffset >= uvn->u_size)
1431 1.17.2.1 chs #ifdef UBC
1432 1.17.2.1 chs {
1433 1.17.2.1 chs /* XXX nfs writes cause trouble with this */
1434 1.17.2.1 chs *loffset = *hoffset = offset;
1435 1.17.2.1 chs printf("uvn_cluster: offset out of range: vp %p loffset 0x%x\n",
1436 1.17.2.1 chs uobj, (int) *loffset);
1437 1.17.2.1 chs return;
1438 1.17.2.1 chs }
1439 1.17.2.1 chs #else
1440 1.17.2.1 chs panic("uvn_cluster: offset out of range: vp %p loffset 0x%x",
1441 1.17.2.1 chs uobj, (int) *loffset);
1442 1.17.2.1 chs #endif
1443 1.1 mrg
1444 1.8 mrg /*
1445 1.8 mrg * XXX: old pager claims we could use VOP_BMAP to get maxcontig value.
1446 1.8 mrg */
1447 1.8 mrg *hoffset = *loffset + MAXBSIZE;
1448 1.8 mrg if (*hoffset > round_page(uvn->u_size)) /* past end? */
1449 1.8 mrg *hoffset = round_page(uvn->u_size);
1450 1.1 mrg
1451 1.8 mrg return;
1452 1.1 mrg }
1453 1.1 mrg
1454 1.1 mrg /*
1455 1.1 mrg * uvn_put: flush page data to backing store.
1456 1.1 mrg *
1457 1.1 mrg * => prefer map unlocked (not required)
1458 1.1 mrg * => object must be locked! we will _unlock_ it before starting I/O.
1459 1.1 mrg * => flags: PGO_SYNCIO -- use sync. I/O
1460 1.1 mrg * => note: caller must set PG_CLEAN and pmap_clear_modify (if needed)
1461 1.1 mrg * => XXX: currently we use VOP_READ/VOP_WRITE which are only sync.
1462 1.1 mrg * [thus we never do async i/o! see iodone comment]
1463 1.1 mrg */
1464 1.1 mrg
1465 1.8 mrg static int
1466 1.8 mrg uvn_put(uobj, pps, npages, flags)
1467 1.8 mrg struct uvm_object *uobj;
1468 1.8 mrg struct vm_page **pps;
1469 1.8 mrg int npages, flags;
1470 1.1 mrg {
1471 1.8 mrg int retval;
1472 1.1 mrg
1473 1.8 mrg /* note: object locked */
1474 1.17.2.1 chs simple_lock_assert(&uobj->vmobjlock, 1);
1475 1.17.2.1 chs retval = VOP_PUTPAGES((struct vnode *)uobj, pps, npages, 1, &retval);
1476 1.8 mrg /* note: object unlocked */
1477 1.17.2.1 chs simple_lock_assert(&uobj->vmobjlock, 0);
1478 1.1 mrg
1479 1.8 mrg return(retval);
1480 1.1 mrg }
1481 1.1 mrg
1482 1.1 mrg
1483 1.1 mrg /*
1484 1.1 mrg * uvn_get: get pages (synchronously) from backing store
1485 1.1 mrg *
1486 1.1 mrg * => prefer map unlocked (not required)
1487 1.1 mrg * => object must be locked! we will _unlock_ it before starting any I/O.
1488 1.1 mrg * => flags: PGO_ALLPAGES: get all of the pages
1489 1.1 mrg * PGO_LOCKED: fault data structures are locked
1490 1.1 mrg * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
1491 1.1 mrg * => NOTE: caller must check for released pages!!
1492 1.1 mrg */
1493 1.1 mrg
1494 1.8 mrg static int
1495 1.8 mrg uvn_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags)
1496 1.8 mrg struct uvm_object *uobj;
1497 1.15 eeh vaddr_t offset;
1498 1.8 mrg struct vm_page **pps; /* IN/OUT */
1499 1.8 mrg int *npagesp; /* IN (OUT if PGO_LOCKED) */
1500 1.8 mrg int centeridx, advice, flags;
1501 1.8 mrg vm_prot_t access_type;
1502 1.8 mrg {
1503 1.17.2.1 chs struct vnode *vp = (struct vnode *)uobj;
1504 1.17.2.1 chs int error;
1505 1.8 mrg
1506 1.17.2.1 chs simple_lock_assert(&uobj->vmobjlock, 1);
1507 1.17.2.1 chs error = VOP_GETPAGES(vp, offset, pps, npagesp, centeridx,
1508 1.17.2.1 chs access_type, advice, flags);
1509 1.17.2.1 chs simple_lock_assert(&uobj->vmobjlock, flags & PGO_LOCKED ? 1 : 0);
1510 1.8 mrg
1511 1.17.2.1 chs return error ? VM_PAGER_ERROR : VM_PAGER_OK;
1512 1.17.2.1 chs }
1513 1.8 mrg
1514 1.17.2.1 chs /*
1515 1.17.2.1 chs * uvn_findpage:
1516 1.17.2.1 chs * return the page for the uobj and offset requested, allocating if needed.
1517 1.17.2.1 chs * => uobj must be locked.
1518 1.17.2.1 chs * => returned page will be BUSY.
1519 1.17.2.1 chs */
1520 1.8 mrg
1521 1.17.2.1 chs void
1522 1.17.2.1 chs uvn_findpage(uobj, offset, pps)
1523 1.17.2.1 chs struct uvm_object *uobj;
1524 1.17.2.1 chs vaddr_t offset;
1525 1.17.2.1 chs struct vm_page **pps;
1526 1.17.2.1 chs {
1527 1.17.2.1 chs struct vm_page *ptmp;
1528 1.17.2.1 chs UVMHIST_FUNC("uvn_findpage"); UVMHIST_CALLED(maphist);
1529 1.8 mrg
1530 1.17.2.1 chs for (;;) {
1531 1.17.2.1 chs /* look for a current page */
1532 1.17.2.1 chs ptmp = uvm_pagelookup(uobj, offset);
1533 1.17.2.1 chs
1534 1.17.2.1 chs /* nope? allocate one now */
1535 1.17.2.1 chs if (ptmp == NULL) {
1536 1.17.2.1 chs ptmp = uvm_pagealloc(uobj, offset, NULL);
1537 1.17.2.1 chs if (ptmp == NULL) {
1538 1.17.2.1 chs simple_unlock(&uobj->vmobjlock);
1539 1.17.2.1 chs uvm_wait("uvn_fp1");
1540 1.17.2.1 chs simple_lock(&uobj->vmobjlock);
1541 1.8 mrg continue;
1542 1.8 mrg }
1543 1.8 mrg
1544 1.8 mrg /*
1545 1.17.2.1 chs * XXX for now, always zero new pages.
1546 1.8 mrg */
1547 1.17.2.1 chs pmap_zero_page(VM_PAGE_TO_PHYS(ptmp));
1548 1.1 mrg
1549 1.17.2.1 chs break;
1550 1.8 mrg }
1551 1.8 mrg
1552 1.17.2.1 chs /* page is there, see if we need to wait on it */
1553 1.17.2.1 chs if ((ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) {
1554 1.17.2.1 chs ptmp->flags |= PG_WANTED;
1555 1.17.2.1 chs UVM_UNLOCK_AND_WAIT(ptmp, &uobj->vmobjlock, 0,
1556 1.17.2.1 chs "uvn_fp2",0);
1557 1.17.2.1 chs simple_lock(&uobj->vmobjlock);
1558 1.17.2.1 chs continue;
1559 1.8 mrg }
1560 1.17.2.1 chs
1561 1.17.2.1 chs /* BUSY the page and we're done. */
1562 1.17.2.1 chs ptmp->flags |= PG_BUSY;
1563 1.17.2.1 chs UVM_PAGE_OWN(ptmp, "uvn_findpage");
1564 1.17.2.1 chs break;
1565 1.17.2.1 chs }
1566 1.17.2.1 chs *pps = ptmp;
1567 1.1 mrg }
1568 1.1 mrg
1569 1.1 mrg /*
1570 1.1 mrg * uvn_asyncget: start async I/O to bring pages into ram
1571 1.1 mrg *
1572 1.1 mrg * => caller must lock object(???XXX: see if this is best)
1573 1.1 mrg * => could be called from uvn_get or a madvise() fault-ahead.
1574 1.1 mrg * => if it fails, it doesn't matter.
1575 1.1 mrg */
1576 1.1 mrg
1577 1.8 mrg static int
1578 1.8 mrg uvn_asyncget(uobj, offset, npages)
1579 1.8 mrg struct uvm_object *uobj;
1580 1.15 eeh vaddr_t offset;
1581 1.8 mrg int npages;
1582 1.8 mrg {
1583 1.1 mrg
1584 1.8 mrg /*
1585 1.8 mrg * XXXCDC: we can't do async I/O yet
1586 1.8 mrg */
1587 1.8 mrg printf("uvn_asyncget called\n");
1588 1.8 mrg return (KERN_SUCCESS);
1589 1.1 mrg }
1590 1.1 mrg
1591 1.1 mrg /*
1592 1.1 mrg * uvm_vnp_uncache: disable "persisting" in a vnode... when last reference
1593 1.1 mrg * is gone we will kill the object (flushing dirty pages back to the vnode
1594 1.1 mrg * if needed).
1595 1.1 mrg *
1596 1.1 mrg * => returns TRUE if there was no uvm_object attached or if there was
1597 1.1 mrg * one and we killed it [i.e. if there is no active uvn]
1598 1.1 mrg * => called with the vnode VOP_LOCK'd [we will unlock it for I/O, if
1599 1.1 mrg * needed]
1600 1.1 mrg *
1601 1.1 mrg * => XXX: given that we now kill uvn's when a vnode is recycled (without
1602 1.1 mrg * having to hold a reference on the vnode) and given a working
1603 1.1 mrg * uvm_vnp_sync(), how does that effect the need for this function?
1604 1.1 mrg * [XXXCDC: seems like it can die?]
1605 1.1 mrg *
1606 1.1 mrg * => XXX: this function should DIE once we merge the VM and buffer
1607 1.1 mrg * cache.
1608 1.1 mrg *
1609 1.1 mrg * research shows that this is called in the following places:
1610 1.1 mrg * ext2fs_truncate, ffs_truncate, detrunc[msdosfs]: called when vnode
1611 1.1 mrg * changes sizes
1612 1.1 mrg * ext2fs_write, WRITE [ufs_readwrite], msdosfs_write: called when we
1613 1.1 mrg * are written to
1614 1.1 mrg * ex2fs_chmod, ufs_chmod: called if VTEXT vnode and the sticky bit
1615 1.1 mrg * is off
1616 1.1 mrg * ffs_realloccg: when we can't extend the current block and have
1617 1.1 mrg * to allocate a new one we call this [XXX: why?]
1618 1.1 mrg * nfsrv_rename, rename_files: called when the target filename is there
1619 1.1 mrg * and we want to remove it
1620 1.1 mrg * nfsrv_remove, sys_unlink: called on file we are removing
1621 1.1 mrg * nfsrv_access: if VTEXT and we want WRITE access and we don't uncache
1622 1.1 mrg * then return "text busy"
1623 1.1 mrg * nfs_open: seems to uncache any file opened with nfs
1624 1.1 mrg * vn_writechk: if VTEXT vnode and can't uncache return "text busy"
1625 1.1 mrg */
1626 1.1 mrg
1627 1.8 mrg boolean_t
1628 1.8 mrg uvm_vnp_uncache(vp)
1629 1.8 mrg struct vnode *vp;
1630 1.8 mrg {
1631 1.17.2.1 chs #ifdef UBC
1632 1.17.2.1 chs #else
1633 1.8 mrg struct uvm_vnode *uvn = &vp->v_uvm;
1634 1.1 mrg
1635 1.8 mrg /*
1636 1.8 mrg * lock uvn part of the vnode and check to see if we need to do anything
1637 1.8 mrg */
1638 1.1 mrg
1639 1.8 mrg simple_lock(&uvn->u_obj.vmobjlock);
1640 1.8 mrg if ((uvn->u_flags & UVM_VNODE_VALID) == 0 ||
1641 1.8 mrg (uvn->u_flags & UVM_VNODE_BLOCKED) != 0) {
1642 1.8 mrg simple_unlock(&uvn->u_obj.vmobjlock);
1643 1.8 mrg return(TRUE);
1644 1.8 mrg }
1645 1.8 mrg
1646 1.8 mrg /*
1647 1.8 mrg * we have a valid, non-blocked uvn. clear persist flag.
1648 1.8 mrg * if uvn is currently active we can return now.
1649 1.8 mrg */
1650 1.8 mrg
1651 1.8 mrg uvn->u_flags &= ~UVM_VNODE_CANPERSIST;
1652 1.8 mrg if (uvn->u_obj.uo_refs) {
1653 1.8 mrg simple_unlock(&uvn->u_obj.vmobjlock);
1654 1.8 mrg return(FALSE);
1655 1.8 mrg }
1656 1.8 mrg
1657 1.8 mrg /*
1658 1.8 mrg * uvn is currently persisting! we have to gain a reference to
1659 1.8 mrg * it so that we can call uvn_detach to kill the uvn.
1660 1.8 mrg */
1661 1.1 mrg
1662 1.8 mrg VREF(vp); /* seems ok, even with VOP_LOCK */
1663 1.8 mrg uvn->u_obj.uo_refs++; /* value is now 1 */
1664 1.8 mrg simple_unlock(&uvn->u_obj.vmobjlock);
1665 1.1 mrg
1666 1.1 mrg
1667 1.1 mrg #ifdef DEBUG
1668 1.8 mrg /*
1669 1.8 mrg * carry over sanity check from old vnode pager: the vnode should
1670 1.8 mrg * be VOP_LOCK'd, and we confirm it here.
1671 1.8 mrg */
1672 1.8 mrg if (!VOP_ISLOCKED(vp)) {
1673 1.8 mrg boolean_t is_ok_anyway = FALSE;
1674 1.1 mrg #ifdef NFS
1675 1.8 mrg extern int (**nfsv2_vnodeop_p) __P((void *));
1676 1.8 mrg extern int (**spec_nfsv2nodeop_p) __P((void *));
1677 1.8 mrg extern int (**fifo_nfsv2nodeop_p) __P((void *));
1678 1.1 mrg
1679 1.8 mrg /* vnode is NOT VOP_LOCKed: some vnode types _never_ lock */
1680 1.8 mrg if (vp->v_op == nfsv2_vnodeop_p ||
1681 1.8 mrg vp->v_op == spec_nfsv2nodeop_p) {
1682 1.8 mrg is_ok_anyway = TRUE;
1683 1.8 mrg }
1684 1.8 mrg if (vp->v_op == fifo_nfsv2nodeop_p) {
1685 1.8 mrg is_ok_anyway = TRUE;
1686 1.8 mrg }
1687 1.1 mrg #endif /* NFS */
1688 1.8 mrg if (!is_ok_anyway)
1689 1.8 mrg panic("uvm_vnp_uncache: vnode not locked!");
1690 1.8 mrg }
1691 1.1 mrg #endif /* DEBUG */
1692 1.1 mrg
1693 1.8 mrg /*
1694 1.8 mrg * now drop our reference to the vnode. if we have the sole
1695 1.8 mrg * reference to the vnode then this will cause it to die [as we
1696 1.8 mrg * just cleared the persist flag]. we have to unlock the vnode
1697 1.8 mrg * while we are doing this as it may trigger I/O.
1698 1.8 mrg *
1699 1.8 mrg * XXX: it might be possible for uvn to get reclaimed while we are
1700 1.8 mrg * unlocked causing us to return TRUE when we should not. we ignore
1701 1.8 mrg * this as a false-positive return value doesn't hurt us.
1702 1.8 mrg */
1703 1.8 mrg VOP_UNLOCK(vp, 0);
1704 1.8 mrg uvn_detach(&uvn->u_obj);
1705 1.8 mrg vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1706 1.8 mrg
1707 1.8 mrg /*
1708 1.8 mrg * and return...
1709 1.8 mrg */
1710 1.17.2.1 chs #endif
1711 1.8 mrg return(TRUE);
1712 1.1 mrg }
1713 1.1 mrg
1714 1.1 mrg /*
1715 1.1 mrg * uvm_vnp_setsize: grow or shrink a vnode uvn
1716 1.1 mrg *
1717 1.1 mrg * grow => just update size value
1718 1.1 mrg * shrink => toss un-needed pages
1719 1.1 mrg *
1720 1.1 mrg * => we assume that the caller has a reference of some sort to the
1721 1.1 mrg * vnode in question so that it will not be yanked out from under
1722 1.1 mrg * us.
1723 1.1 mrg *
1724 1.1 mrg * called from:
1725 1.1 mrg * => truncate fns (ext2fs_truncate, ffs_truncate, detrunc[msdos])
1726 1.1 mrg * => "write" fns (ext2fs_write, WRITE [ufs/ufs], msdosfs_write, nfs_write)
1727 1.1 mrg * => ffs_balloc [XXX: why? doesn't WRITE handle?]
1728 1.1 mrg * => NFS: nfs_loadattrcache, nfs_getattrcache, nfs_setattr
1729 1.1 mrg * => union fs: union_newsize
1730 1.1 mrg */
1731 1.1 mrg
1732 1.8 mrg void
1733 1.8 mrg uvm_vnp_setsize(vp, newsize)
1734 1.8 mrg struct vnode *vp;
1735 1.8 mrg u_quad_t newsize;
1736 1.8 mrg {
1737 1.8 mrg struct uvm_vnode *uvn = &vp->v_uvm;
1738 1.1 mrg
1739 1.8 mrg /*
1740 1.8 mrg * lock uvn and check for valid object, and if valid: do it!
1741 1.8 mrg */
1742 1.8 mrg simple_lock(&uvn->u_obj.vmobjlock);
1743 1.17.2.1 chs #ifdef UBC
1744 1.17.2.1 chs #else
1745 1.8 mrg if (uvn->u_flags & UVM_VNODE_VALID) {
1746 1.17.2.1 chs #endif
1747 1.8 mrg /*
1748 1.15 eeh * make sure that the newsize fits within a vaddr_t
1749 1.8 mrg * XXX: need to revise addressing data types
1750 1.8 mrg */
1751 1.1 mrg
1752 1.15 eeh if (newsize > (vaddr_t) -PAGE_SIZE) {
1753 1.1 mrg #ifdef DEBUG
1754 1.8 mrg printf("uvm_vnp_setsize: vn %p size truncated "
1755 1.15 eeh "%qx->%lx\n", vp, newsize, (vaddr_t)-PAGE_SIZE);
1756 1.1 mrg #endif
1757 1.15 eeh newsize = (vaddr_t)-PAGE_SIZE;
1758 1.8 mrg }
1759 1.8 mrg
1760 1.8 mrg /*
1761 1.8 mrg * now check if the size has changed: if we shrink we had better
1762 1.8 mrg * toss some pages...
1763 1.8 mrg */
1764 1.8 mrg
1765 1.17.2.1 chs #ifdef UBC
1766 1.17.2.1 chs if (uvn->u_size > newsize && uvn->u_size != VSIZENOTSET) {
1767 1.17.2.1 chs #else
1768 1.17.2.1 chs /*
1769 1.8 mrg if (uvn->u_size > newsize) {
1770 1.17.2.1 chs */
1771 1.17.2.1 chs #endif
1772 1.17.2.1 chs (void)uvn_flush(&uvn->u_obj, (vaddr_t)newsize,
1773 1.17.2.1 chs uvn->u_size, PGO_FREE);
1774 1.8 mrg }
1775 1.17.2.1 chs #ifdef DEBUGxx
1776 1.17.2.1 chs printf("uvm_vnp_setsize: vp %p newsize 0x%x\n", vp, (int)newsize);
1777 1.15 eeh uvn->u_size = (vaddr_t)newsize;
1778 1.17.2.1 chs #endif
1779 1.17.2.1 chs #ifdef UBC
1780 1.17.2.1 chs #else
1781 1.8 mrg }
1782 1.17.2.1 chs #endif
1783 1.8 mrg simple_unlock(&uvn->u_obj.vmobjlock);
1784 1.1 mrg }
1785 1.1 mrg
1786 1.1 mrg /*
1787 1.1 mrg * uvm_vnp_sync: flush all dirty VM pages back to their backing vnodes.
1788 1.1 mrg *
1789 1.1 mrg * => called from sys_sync with no VM structures locked
1790 1.1 mrg * => only one process can do a sync at a time (because the uvn
1791 1.1 mrg * structure only has one queue for sync'ing). we ensure this
1792 1.1 mrg * by holding the uvn_sync_lock while the sync is in progress.
1793 1.1 mrg * other processes attempting a sync will sleep on this lock
1794 1.1 mrg * until we are done.
1795 1.1 mrg */
1796 1.1 mrg
1797 1.8 mrg void
1798 1.8 mrg uvm_vnp_sync(mp)
1799 1.8 mrg struct mount *mp;
1800 1.8 mrg {
1801 1.8 mrg struct uvm_vnode *uvn;
1802 1.8 mrg struct vnode *vp;
1803 1.8 mrg boolean_t got_lock;
1804 1.8 mrg
1805 1.8 mrg /*
1806 1.8 mrg * step 1: ensure we are only ones using the uvn_sync_q by locking
1807 1.8 mrg * our lock...
1808 1.8 mrg */
1809 1.8 mrg lockmgr(&uvn_sync_lock, LK_EXCLUSIVE, (void *)0);
1810 1.8 mrg
1811 1.8 mrg /*
1812 1.8 mrg * step 2: build up a simpleq of uvns of interest based on the
1813 1.8 mrg * write list. we gain a reference to uvns of interest. must
1814 1.8 mrg * be careful about locking uvn's since we will be holding uvn_wl_lock
1815 1.8 mrg * in the body of the loop.
1816 1.8 mrg */
1817 1.8 mrg SIMPLEQ_INIT(&uvn_sync_q);
1818 1.8 mrg simple_lock(&uvn_wl_lock);
1819 1.8 mrg for (uvn = uvn_wlist.lh_first ; uvn != NULL ;
1820 1.8 mrg uvn = uvn->u_wlist.le_next) {
1821 1.1 mrg
1822 1.8 mrg vp = (struct vnode *) uvn;
1823 1.8 mrg if (mp && vp->v_mount != mp)
1824 1.8 mrg continue;
1825 1.8 mrg
1826 1.8 mrg /* attempt to gain reference */
1827 1.8 mrg while ((got_lock = simple_lock_try(&uvn->u_obj.vmobjlock)) ==
1828 1.9 chuck FALSE &&
1829 1.17.2.1 chs (uvn->u_flags & UVM_VNODE_BLOCKED) == 0)
1830 1.8 mrg /* spin */ ;
1831 1.8 mrg
1832 1.8 mrg /*
1833 1.9 chuck * we will exit the loop if either if the following are true:
1834 1.9 chuck * - we got the lock [always true if NCPU == 1]
1835 1.9 chuck * - we failed to get the lock but noticed the vnode was
1836 1.9 chuck * "blocked" -- in this case the vnode must be a dying
1837 1.9 chuck * vnode, and since dying vnodes are in the process of
1838 1.9 chuck * being flushed out, we can safely skip this one
1839 1.9 chuck *
1840 1.9 chuck * we want to skip over the vnode if we did not get the lock,
1841 1.9 chuck * or if the vnode is already dying (due to the above logic).
1842 1.8 mrg *
1843 1.8 mrg * note that uvn must already be valid because we found it on
1844 1.8 mrg * the wlist (this also means it can't be ALOCK'd).
1845 1.8 mrg */
1846 1.9 chuck if (!got_lock || (uvn->u_flags & UVM_VNODE_BLOCKED) != 0) {
1847 1.9 chuck if (got_lock)
1848 1.9 chuck simple_unlock(&uvn->u_obj.vmobjlock);
1849 1.9 chuck continue; /* skip it */
1850 1.9 chuck }
1851 1.8 mrg
1852 1.8 mrg /*
1853 1.8 mrg * gain reference. watch out for persisting uvns (need to
1854 1.8 mrg * regain vnode REF).
1855 1.8 mrg */
1856 1.17.2.1 chs #ifdef UBC
1857 1.17.2.1 chs /* XXX should be using a vref-like function here */
1858 1.17.2.1 chs #else
1859 1.8 mrg if (uvn->u_obj.uo_refs == 0)
1860 1.8 mrg VREF(vp);
1861 1.17.2.1 chs #endif
1862 1.8 mrg uvn->u_obj.uo_refs++;
1863 1.8 mrg simple_unlock(&uvn->u_obj.vmobjlock);
1864 1.8 mrg
1865 1.8 mrg /*
1866 1.8 mrg * got it!
1867 1.8 mrg */
1868 1.8 mrg SIMPLEQ_INSERT_HEAD(&uvn_sync_q, uvn, u_syncq);
1869 1.8 mrg }
1870 1.8 mrg simple_unlock(&uvn_wl_lock);
1871 1.1 mrg
1872 1.8 mrg /*
1873 1.8 mrg * step 3: we now have a list of uvn's that may need cleaning.
1874 1.8 mrg * we are holding the uvn_sync_lock, but have dropped the uvn_wl_lock
1875 1.8 mrg * (so we can now safely lock uvn's again).
1876 1.8 mrg */
1877 1.1 mrg
1878 1.8 mrg for (uvn = uvn_sync_q.sqh_first ; uvn ; uvn = uvn->u_syncq.sqe_next) {
1879 1.8 mrg simple_lock(&uvn->u_obj.vmobjlock);
1880 1.17.2.1 chs #ifdef UBC
1881 1.17.2.1 chs #else
1882 1.1 mrg #ifdef DIAGNOSTIC
1883 1.8 mrg if (uvn->u_flags & UVM_VNODE_DYING) {
1884 1.8 mrg printf("uvm_vnp_sync: dying vnode on sync list\n");
1885 1.8 mrg }
1886 1.1 mrg #endif
1887 1.17.2.1 chs #endif
1888 1.8 mrg uvn_flush(&uvn->u_obj, 0, 0,
1889 1.8 mrg PGO_CLEANIT|PGO_ALLPAGES|PGO_DOACTCLUST);
1890 1.8 mrg
1891 1.8 mrg /*
1892 1.8 mrg * if we have the only reference and we just cleaned the uvn,
1893 1.8 mrg * then we can pull it out of the UVM_VNODE_WRITEABLE state
1894 1.8 mrg * thus allowing us to avoid thinking about flushing it again
1895 1.8 mrg * on later sync ops.
1896 1.8 mrg */
1897 1.8 mrg if (uvn->u_obj.uo_refs == 1 &&
1898 1.8 mrg (uvn->u_flags & UVM_VNODE_WRITEABLE)) {
1899 1.8 mrg LIST_REMOVE(uvn, u_wlist);
1900 1.8 mrg uvn->u_flags &= ~UVM_VNODE_WRITEABLE;
1901 1.8 mrg }
1902 1.8 mrg
1903 1.8 mrg simple_unlock(&uvn->u_obj.vmobjlock);
1904 1.1 mrg
1905 1.8 mrg /* now drop our reference to the uvn */
1906 1.8 mrg uvn_detach(&uvn->u_obj);
1907 1.8 mrg }
1908 1.8 mrg
1909 1.8 mrg /*
1910 1.8 mrg * done! release sync lock
1911 1.8 mrg */
1912 1.8 mrg lockmgr(&uvn_sync_lock, LK_RELEASE, (void *)0);
1913 1.17.2.1 chs }
1914 1.17.2.1 chs
1915 1.17.2.1 chs
1916 1.17.2.1 chs /*
1917 1.17.2.1 chs * uvm_vnp_relocate: update pages' blknos
1918 1.17.2.1 chs */
1919 1.17.2.1 chs
1920 1.17.2.1 chs int
1921 1.17.2.1 chs uvm_vnp_relocate(vp, off, len, blkno)
1922 1.17.2.1 chs struct vnode *vp;
1923 1.17.2.1 chs vaddr_t off;
1924 1.17.2.1 chs vsize_t len;
1925 1.17.2.1 chs daddr_t blkno;
1926 1.17.2.1 chs {
1927 1.17.2.1 chs int npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1928 1.17.2.1 chs struct vm_page *pgs[npages], *pg;
1929 1.17.2.1 chs int i, rv;
1930 1.17.2.1 chs
1931 1.17.2.1 chs printf("relocate: vp %p off 0x%lx npages 0x%x blkno 0x%x\n",
1932 1.17.2.1 chs vp, off, npages, blkno);
1933 1.17.2.1 chs
1934 1.17.2.1 chs #ifdef DIAGNOSTIC
1935 1.17.2.1 chs if (off & (PAGE_SIZE - 1)) {
1936 1.17.2.1 chs panic("uvm_vnp_relocate: vp %p bad off 0x%lx", vp, off);
1937 1.17.2.1 chs }
1938 1.17.2.1 chs #endif
1939 1.17.2.1 chs
1940 1.17.2.1 chs /*
1941 1.17.2.1 chs * get all the pages in the range, change their blknos.
1942 1.17.2.1 chs * XXX access_type? advice?
1943 1.17.2.1 chs */
1944 1.17.2.1 chs
1945 1.17.2.1 chs bzero(pgs, sizeof pgs);
1946 1.17.2.1 chs
1947 1.17.2.1 chs again:
1948 1.17.2.1 chs simple_lock(&vp->v_uvm.u_obj.vmobjlock);
1949 1.17.2.1 chs rv = (vp->v_uvm.u_obj.pgops->pgo_get)(&vp->v_uvm.u_obj, off,
1950 1.17.2.1 chs pgs, &npages,
1951 1.17.2.1 chs 0, 0, 0, PGO_ALLPAGES);
1952 1.17.2.1 chs switch (rv) {
1953 1.17.2.1 chs case VM_PAGER_OK:
1954 1.17.2.1 chs break;
1955 1.17.2.1 chs
1956 1.17.2.1 chs #ifdef DIAGNOSTIC
1957 1.17.2.1 chs case VM_PAGER_PEND:
1958 1.17.2.1 chs panic("ubc_fault: pgo_get got PENDing on non-async I/O");
1959 1.17.2.1 chs #endif
1960 1.17.2.1 chs
1961 1.17.2.1 chs case VM_PAGER_AGAIN:
1962 1.17.2.1 chs tsleep(&lbolt, PVM, "uvn_relocate", 0);
1963 1.17.2.1 chs goto again;
1964 1.17.2.1 chs
1965 1.17.2.1 chs default:
1966 1.17.2.1 chs return rv;
1967 1.17.2.1 chs }
1968 1.17.2.1 chs
1969 1.17.2.1 chs for (i = 0; i < npages; i++) {
1970 1.17.2.1 chs pg = pgs[i];
1971 1.17.2.1 chs
1972 1.17.2.1 chs #ifdef DIAGNOSTIC
1973 1.17.2.1 chs if (pg == NULL) {
1974 1.17.2.1 chs panic("uvm_vnp_relocate: NULL pg");
1975 1.17.2.1 chs }
1976 1.17.2.1 chs #endif
1977 1.17.2.1 chs
1978 1.17.2.1 chs pg->blkno = blkno;
1979 1.17.2.1 chs blkno += PAGE_SIZE >> DEV_BSHIFT;
1980 1.17.2.1 chs
1981 1.17.2.1 chs if (pg->flags & PG_WANTED) {
1982 1.17.2.1 chs wakeup(pg);
1983 1.17.2.1 chs }
1984 1.17.2.1 chs
1985 1.17.2.1 chs #ifdef DIAGNOSTIC
1986 1.17.2.1 chs if (pg->flags & PG_RELEASED) {
1987 1.17.2.1 chs panic("uvm_vnp_relocate: "
1988 1.17.2.1 chs "pgo_get gave us a RELEASED page");
1989 1.17.2.1 chs }
1990 1.17.2.1 chs #endif
1991 1.17.2.1 chs pg->flags &= ~PG_BUSY;
1992 1.17.2.1 chs UVM_PAGE_OWN(pg, NULL);
1993 1.17.2.1 chs }
1994 1.17.2.1 chs
1995 1.17.2.1 chs return 0;
1996 1.1 mrg }
1997